diff options
author | Herbert Xu <herbert@gondor.apana.org.au> | 2013-09-07 04:53:35 +0200 |
---|---|---|
committer | Herbert Xu <herbert@gondor.apana.org.au> | 2013-09-07 04:53:35 +0200 |
commit | eeca9fad52fc4bfdf42c38bfcf383e932eb3e9d6 (patch) | |
tree | cc51c880459d41c0e8d7576405bef4c987bc7aa0 /drivers/net/ethernet | |
parent | hwrng: via - Add MODULE_DEVICE_TABLE (diff) | |
parent | Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6 (diff) | |
download | linux-eeca9fad52fc4bfdf42c38bfcf383e932eb3e9d6.tar.xz linux-eeca9fad52fc4bfdf42c38bfcf383e932eb3e9d6.zip |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux
Merge upstream tree in order to reinstate crct10dif.
Diffstat (limited to 'drivers/net/ethernet')
300 files changed, 16384 insertions, 4260 deletions
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c index adb4bf5eb4b4..ede8daa68275 100644 --- a/drivers/net/ethernet/3com/3c509.c +++ b/drivers/net/ethernet/3com/3c509.c @@ -723,25 +723,6 @@ el3_start_xmit(struct sk_buff *skb, struct net_device *dev) pr_debug("%s: el3_start_xmit(length = %u) called, status %4.4x.\n", dev->name, skb->len, inw(ioaddr + EL3_STATUS)); } -#if 0 -#ifndef final_version - { /* Error-checking code, delete someday. */ - ushort status = inw(ioaddr + EL3_STATUS); - if (status & 0x0001 && /* IRQ line active, missed one. */ - inw(ioaddr + EL3_STATUS) & 1) { /* Make sure. */ - pr_debug("%s: Missed interrupt, status then %04x now %04x" - " Tx %2.2x Rx %4.4x.\n", dev->name, status, - inw(ioaddr + EL3_STATUS), inb(ioaddr + TX_STATUS), - inw(ioaddr + RX_STATUS)); - /* Fake interrupt trigger by masking, acknowledge interrupts. */ - outw(SetStatusEnb | 0x00, ioaddr + EL3_CMD); - outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq, - ioaddr + EL3_CMD); - outw(SetStatusEnb | 0xff, ioaddr + EL3_CMD); - } - } -#endif -#endif /* * We lock the driver against other processors. Note * we don't need to lock versus the IRQ as we suspended diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index 072c6f14e8fc..ad5272b348f0 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c @@ -1012,10 +1012,8 @@ static int vortex_init_one(struct pci_dev *pdev, goto out; rc = pci_request_regions(pdev, DRV_NAME); - if (rc < 0) { - pci_disable_device(pdev); - goto out; - } + if (rc < 0) + goto out_disable; unit = vortex_cards_found; @@ -1032,23 +1030,24 @@ static int vortex_init_one(struct pci_dev *pdev, if (!ioaddr) /* If mapping fails, fall-back to BAR 0... */ ioaddr = pci_iomap(pdev, 0, 0); if (!ioaddr) { - pci_release_regions(pdev); - pci_disable_device(pdev); rc = -ENOMEM; - goto out; + goto out_release; } rc = vortex_probe1(&pdev->dev, ioaddr, pdev->irq, ent->driver_data, unit); - if (rc < 0) { - pci_iounmap(pdev, ioaddr); - pci_release_regions(pdev); - pci_disable_device(pdev); - goto out; - } + if (rc < 0) + goto out_iounmap; vortex_cards_found++; + goto out; +out_iounmap: + pci_iounmap(pdev, ioaddr); +out_release: + pci_release_regions(pdev); +out_disable: + pci_disable_device(pdev); out: return rc; } @@ -1473,7 +1472,7 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq, if (pdev) { vp->pm_state_valid = 1; - pci_save_state(VORTEX_PCI(vp)); + pci_save_state(pdev); acpi_set_WOL(dev); } retval = register_netdev(dev); @@ -3233,21 +3232,20 @@ static void vortex_remove_one(struct pci_dev *pdev) vp = netdev_priv(dev); if (vp->cb_fn_base) - pci_iounmap(VORTEX_PCI(vp), vp->cb_fn_base); + pci_iounmap(pdev, vp->cb_fn_base); unregister_netdev(dev); - if (VORTEX_PCI(vp)) { - pci_set_power_state(VORTEX_PCI(vp), PCI_D0); /* Go active */ - if (vp->pm_state_valid) - pci_restore_state(VORTEX_PCI(vp)); - pci_disable_device(VORTEX_PCI(vp)); - } + pci_set_power_state(pdev, PCI_D0); /* Go active */ + if (vp->pm_state_valid) + pci_restore_state(pdev); + pci_disable_device(pdev); + /* Should really use issue_and_wait() here */ iowrite16(TotalReset | ((vp->drv_flags & EEPROM_RESET) ? 0x04 : 0x14), vp->ioaddr + EL3_CMD); - pci_iounmap(VORTEX_PCI(vp), vp->ioaddr); + pci_iounmap(pdev, vp->ioaddr); pci_free_consistent(pdev, sizeof(struct boom_rx_desc) * RX_RING_SIZE diff --git a/drivers/net/ethernet/3com/Kconfig b/drivers/net/ethernet/3com/Kconfig index 1c71c763f680..f00c76377b44 100644 --- a/drivers/net/ethernet/3com/Kconfig +++ b/drivers/net/ethernet/3com/Kconfig @@ -67,7 +67,6 @@ config PCMCIA_3C589 config VORTEX tristate "3c590/3c900 series (592/595/597) \"Vortex/Boomerang\" support" depends on (PCI || EISA) && HAS_IOPORT - select NET_CORE select MII ---help--- This option enables driver support for a large number of 10Mbps and diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c index 47618e505355..b2e840513735 100644 --- a/drivers/net/ethernet/8390/ne.c +++ b/drivers/net/ethernet/8390/ne.c @@ -849,7 +849,6 @@ static int ne_drv_remove(struct platform_device *pdev) free_irq(dev->irq, dev); release_region(dev->base_addr, NE_IO_EXTENT); free_netdev(dev); - platform_set_drvdata(pdev, NULL); } return 0; } diff --git a/drivers/net/ethernet/8390/ne2k-pci.c b/drivers/net/ethernet/8390/ne2k-pci.c index 587a885de259..92201080e07a 100644 --- a/drivers/net/ethernet/8390/ne2k-pci.c +++ b/drivers/net/ethernet/8390/ne2k-pci.c @@ -676,7 +676,7 @@ static int ne2k_pci_resume (struct pci_dev *pdev) struct net_device *dev = pci_get_drvdata (pdev); int rc; - pci_set_power_state(pdev, 0); + pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); rc = pci_enable_device(pdev); diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index ed956e08d38b..2037080c504d 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -20,9 +20,11 @@ config SUNGEM_PHY source "drivers/net/ethernet/3com/Kconfig" source "drivers/net/ethernet/adaptec/Kconfig" source "drivers/net/ethernet/aeroflex/Kconfig" +source "drivers/net/ethernet/allwinner/Kconfig" source "drivers/net/ethernet/alteon/Kconfig" source "drivers/net/ethernet/amd/Kconfig" source "drivers/net/ethernet/apple/Kconfig" +source "drivers/net/ethernet/arc/Kconfig" source "drivers/net/ethernet/atheros/Kconfig" source "drivers/net/ethernet/cadence/Kconfig" source "drivers/net/ethernet/adi/Kconfig" @@ -63,7 +65,6 @@ config JME tristate "JMicron(R) PCI-Express Gigabit Ethernet support" depends on PCI select CRC32 - select NET_CORE select MII ---help--- This driver supports the PCI-Express gigabit ethernet adapters @@ -95,7 +96,6 @@ config FEALNX tristate "Myson MTD-8xx PCI Ethernet support" depends on PCI select CRC32 - select NET_CORE select MII ---help--- Say Y here to support the Myson MTD-800 family of PCI-based Ethernet @@ -106,7 +106,6 @@ source "drivers/net/ethernet/8390/Kconfig" config NET_NETX tristate "NetX Ethernet support" - select NET_CORE select MII depends on ARCH_NETX ---help--- @@ -124,7 +123,6 @@ source "drivers/net/ethernet/oki-semi/Kconfig" config ETHOC tristate "OpenCores 10/100 Mbps Ethernet MAC support" depends on HAS_IOMEM && HAS_DMA - select NET_CORE select MII select PHYLIB select CRC32 diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 8268d85f9448..390bd0bfaa27 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -6,9 +6,11 @@ obj-$(CONFIG_NET_VENDOR_3COM) += 3com/ obj-$(CONFIG_NET_VENDOR_8390) += 8390/ obj-$(CONFIG_NET_VENDOR_ADAPTEC) += adaptec/ obj-$(CONFIG_GRETH) += aeroflex/ +obj-$(CONFIG_NET_VENDOR_ALLWINNER) += allwinner/ obj-$(CONFIG_NET_VENDOR_ALTEON) += alteon/ obj-$(CONFIG_NET_VENDOR_AMD) += amd/ obj-$(CONFIG_NET_VENDOR_APPLE) += apple/ +obj-$(CONFIG_NET_VENDOR_ARC) += arc/ obj-$(CONFIG_NET_VENDOR_ATHEROS) += atheros/ obj-$(CONFIG_NET_CADENCE) += cadence/ obj-$(CONFIG_NET_BFIN) += adi/ diff --git a/drivers/net/ethernet/adaptec/Kconfig b/drivers/net/ethernet/adaptec/Kconfig index 0bff571b1bb3..5c804bbe3dab 100644 --- a/drivers/net/ethernet/adaptec/Kconfig +++ b/drivers/net/ethernet/adaptec/Kconfig @@ -22,7 +22,6 @@ config ADAPTEC_STARFIRE tristate "Adaptec Starfire/DuraLAN support" depends on PCI select CRC32 - select NET_CORE select MII ---help--- Say Y here if you have an Adaptec Starfire (or DuraLAN) PCI network diff --git a/drivers/net/ethernet/adi/Kconfig b/drivers/net/ethernet/adi/Kconfig index a9481606bbcd..f952fff6a9a9 100644 --- a/drivers/net/ethernet/adi/Kconfig +++ b/drivers/net/ethernet/adi/Kconfig @@ -23,7 +23,6 @@ config BFIN_MAC tristate "Blackfin on-chip MAC support" depends on (BF516 || BF518 || BF526 || BF527 || BF536 || BF537) select CRC32 - select NET_CORE select MII select PHYLIB select BFIN_MAC_USE_L1 if DMA_UNCACHED_NONE diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c index dada66bfe0d6..e904b3838dcc 100644 --- a/drivers/net/ethernet/adi/bfin_mac.c +++ b/drivers/net/ethernet/adi/bfin_mac.c @@ -1719,7 +1719,6 @@ out_err_mii_probe: mdiobus_unregister(lp->mii_bus); mdiobus_free(lp->mii_bus); out_err_probe_mac: - platform_set_drvdata(pdev, NULL); free_netdev(ndev); return rc; @@ -1732,8 +1731,6 @@ static int bfin_mac_remove(struct platform_device *pdev) bfin_phc_release(lp); - platform_set_drvdata(pdev, NULL); - lp->mii_bus->priv = NULL; unregister_netdev(ndev); @@ -1868,7 +1865,6 @@ static int bfin_mii_bus_remove(struct platform_device *pdev) struct bfin_mii_bus_platform_data *mii_bus_pd = dev_get_platdata(&pdev->dev); - platform_set_drvdata(pdev, NULL); mdiobus_unregister(miibus); kfree(miibus->irq); mdiobus_free(miibus); diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c index 269295403fc4..7ff4b30d55ea 100644 --- a/drivers/net/ethernet/aeroflex/greth.c +++ b/drivers/net/ethernet/aeroflex/greth.c @@ -1565,7 +1565,7 @@ error1: static int greth_of_remove(struct platform_device *of_dev) { - struct net_device *ndev = dev_get_drvdata(&of_dev->dev); + struct net_device *ndev = platform_get_drvdata(of_dev); struct greth_private *greth = netdev_priv(ndev); /* Free descriptor areas */ @@ -1573,8 +1573,6 @@ static int greth_of_remove(struct platform_device *of_dev) dma_free_coherent(&of_dev->dev, 1024, greth->tx_bd_base, greth->tx_bd_base_phys); - dev_set_drvdata(&of_dev->dev, NULL); - if (greth->phy) phy_stop(greth->phy); mdiobus_unregister(greth->mdio); diff --git a/drivers/net/ethernet/allwinner/Kconfig b/drivers/net/ethernet/allwinner/Kconfig new file mode 100644 index 000000000000..53ad213e865b --- /dev/null +++ b/drivers/net/ethernet/allwinner/Kconfig @@ -0,0 +1,35 @@ +# +# Allwinner device configuration +# + +config NET_VENDOR_ALLWINNER + bool "Allwinner devices" + default y + depends on ARCH_SUNXI + ---help--- + If you have a network (Ethernet) card belonging to this + class, say Y and read the Ethernet-HOWTO, available from + <http://www.tldp.org/docs.html#howto>. + + Note that the answer to this question doesn't directly + affect the kernel: saying N will just cause the configurator + to skip all the questions about Allwinner cards. If you say Y, + you will be asked for your specific card in the following + questions. + +if NET_VENDOR_ALLWINNER + +config SUN4I_EMAC + tristate "Allwinner A10 EMAC support" + depends on ARCH_SUNXI + depends on OF + select CRC32 + select MII + select PHYLIB + ---help--- + Support for Allwinner A10 EMAC ethernet driver. + + To compile this driver as a module, choose M here. The module + will be called sun4i-emac. + +endif # NET_VENDOR_ALLWINNER diff --git a/drivers/net/ethernet/allwinner/Makefile b/drivers/net/ethernet/allwinner/Makefile new file mode 100644 index 000000000000..03129f796514 --- /dev/null +++ b/drivers/net/ethernet/allwinner/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the Allwinner device drivers. +# + +obj-$(CONFIG_SUN4I_EMAC) += sun4i-emac.o diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c new file mode 100644 index 000000000000..50b853a79d77 --- /dev/null +++ b/drivers/net/ethernet/allwinner/sun4i-emac.c @@ -0,0 +1,954 @@ +/* + * Allwinner EMAC Fast Ethernet driver for Linux. + * + * Copyright 2012-2013 Stefan Roese <sr@denx.de> + * Copyright 2013 Maxime Ripard <maxime.ripard@free-electrons.com> + * + * Based on the Linux driver provided by Allwinner: + * Copyright (C) 1997 Sten Wang + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/clk.h> +#include <linux/etherdevice.h> +#include <linux/ethtool.h> +#include <linux/gpio.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/mii.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/of_mdio.h> +#include <linux/of_net.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/phy.h> + +#include "sun4i-emac.h" + +#define DRV_NAME "sun4i-emac" +#define DRV_VERSION "1.02" + +#define EMAC_MAX_FRAME_LEN 0x0600 + +/* Transmit timeout, default 5 seconds. */ +static int watchdog = 5000; +module_param(watchdog, int, 0400); +MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds"); + +/* EMAC register address locking. + * + * The EMAC uses an address register to control where data written + * to the data register goes. This means that the address register + * must be preserved over interrupts or similar calls. + * + * During interrupt and other critical calls, a spinlock is used to + * protect the system, but the calls themselves save the address + * in the address register in case they are interrupting another + * access to the device. + * + * For general accesses a lock is provided so that calls which are + * allowed to sleep are serialised so that the address register does + * not need to be saved. This lock also serves to serialise access + * to the EEPROM and PHY access registers which are shared between + * these two devices. + */ + +/* The driver supports the original EMACE, and now the two newer + * devices, EMACA and EMACB. + */ + +struct emac_board_info { + struct clk *clk; + struct device *dev; + struct platform_device *pdev; + spinlock_t lock; + void __iomem *membase; + u32 msg_enable; + struct net_device *ndev; + struct sk_buff *skb_last; + u16 tx_fifo_stat; + + int emacrx_completed_flag; + + struct phy_device *phy_dev; + struct device_node *phy_node; + unsigned int link; + unsigned int speed; + unsigned int duplex; + + phy_interface_t phy_interface; +}; + +static void emac_update_speed(struct net_device *dev) +{ + struct emac_board_info *db = netdev_priv(dev); + unsigned int reg_val; + + /* set EMAC SPEED, depend on PHY */ + reg_val = readl(db->membase + EMAC_MAC_SUPP_REG); + reg_val &= ~(0x1 << 8); + if (db->speed == SPEED_100) + reg_val |= 1 << 8; + writel(reg_val, db->membase + EMAC_MAC_SUPP_REG); +} + +static void emac_update_duplex(struct net_device *dev) +{ + struct emac_board_info *db = netdev_priv(dev); + unsigned int reg_val; + + /* set duplex depend on phy */ + reg_val = readl(db->membase + EMAC_MAC_CTL1_REG); + reg_val &= ~EMAC_MAC_CTL1_DUPLEX_EN; + if (db->duplex) + reg_val |= EMAC_MAC_CTL1_DUPLEX_EN; + writel(reg_val, db->membase + EMAC_MAC_CTL1_REG); +} + +static void emac_handle_link_change(struct net_device *dev) +{ + struct emac_board_info *db = netdev_priv(dev); + struct phy_device *phydev = db->phy_dev; + unsigned long flags; + int status_change = 0; + + if (phydev->link) { + if (db->speed != phydev->speed) { + spin_lock_irqsave(&db->lock, flags); + db->speed = phydev->speed; + emac_update_speed(dev); + spin_unlock_irqrestore(&db->lock, flags); + status_change = 1; + } + + if (db->duplex != phydev->duplex) { + spin_lock_irqsave(&db->lock, flags); + db->duplex = phydev->duplex; + emac_update_duplex(dev); + spin_unlock_irqrestore(&db->lock, flags); + status_change = 1; + } + } + + if (phydev->link != db->link) { + if (!phydev->link) { + db->speed = 0; + db->duplex = -1; + } + db->link = phydev->link; + + status_change = 1; + } + + if (status_change) + phy_print_status(phydev); +} + +static int emac_mdio_probe(struct net_device *dev) +{ + struct emac_board_info *db = netdev_priv(dev); + + /* to-do: PHY interrupts are currently not supported */ + + /* attach the mac to the phy */ + db->phy_dev = of_phy_connect(db->ndev, db->phy_node, + &emac_handle_link_change, 0, + db->phy_interface); + if (!db->phy_dev) { + netdev_err(db->ndev, "could not find the PHY\n"); + return -ENODEV; + } + + /* mask with MAC supported features */ + db->phy_dev->supported &= PHY_BASIC_FEATURES; + db->phy_dev->advertising = db->phy_dev->supported; + + db->link = 0; + db->speed = 0; + db->duplex = -1; + + return 0; +} + +static void emac_mdio_remove(struct net_device *dev) +{ + struct emac_board_info *db = netdev_priv(dev); + + phy_disconnect(db->phy_dev); + db->phy_dev = NULL; +} + +static void emac_reset(struct emac_board_info *db) +{ + dev_dbg(db->dev, "resetting device\n"); + + /* RESET device */ + writel(0, db->membase + EMAC_CTL_REG); + udelay(200); + writel(EMAC_CTL_RESET, db->membase + EMAC_CTL_REG); + udelay(200); +} + +static void emac_outblk_32bit(void __iomem *reg, void *data, int count) +{ + writesl(reg, data, round_up(count, 4) / 4); +} + +static void emac_inblk_32bit(void __iomem *reg, void *data, int count) +{ + readsl(reg, data, round_up(count, 4) / 4); +} + +static int emac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct emac_board_info *dm = netdev_priv(dev); + struct phy_device *phydev = dm->phy_dev; + + if (!netif_running(dev)) + return -EINVAL; + + if (!phydev) + return -ENODEV; + + return phy_mii_ioctl(phydev, rq, cmd); +} + +/* ethtool ops */ +static void emac_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, DRV_NAME, sizeof(DRV_NAME)); + strlcpy(info->version, DRV_VERSION, sizeof(DRV_VERSION)); + strlcpy(info->bus_info, dev_name(&dev->dev), sizeof(info->bus_info)); +} + +static int emac_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct emac_board_info *dm = netdev_priv(dev); + struct phy_device *phydev = dm->phy_dev; + + if (!phydev) + return -ENODEV; + + return phy_ethtool_gset(phydev, cmd); +} + +static int emac_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct emac_board_info *dm = netdev_priv(dev); + struct phy_device *phydev = dm->phy_dev; + + if (!phydev) + return -ENODEV; + + return phy_ethtool_sset(phydev, cmd); +} + +static const struct ethtool_ops emac_ethtool_ops = { + .get_drvinfo = emac_get_drvinfo, + .get_settings = emac_get_settings, + .set_settings = emac_set_settings, + .get_link = ethtool_op_get_link, +}; + +static unsigned int emac_setup(struct net_device *ndev) +{ + struct emac_board_info *db = netdev_priv(ndev); + unsigned int reg_val; + + /* set up TX */ + reg_val = readl(db->membase + EMAC_TX_MODE_REG); + + writel(reg_val | EMAC_TX_MODE_ABORTED_FRAME_EN, + db->membase + EMAC_TX_MODE_REG); + + /* set up RX */ + reg_val = readl(db->membase + EMAC_RX_CTL_REG); + + writel(reg_val | EMAC_RX_CTL_PASS_LEN_OOR_EN | + EMAC_RX_CTL_ACCEPT_UNICAST_EN | EMAC_RX_CTL_DA_FILTER_EN | + EMAC_RX_CTL_ACCEPT_MULTICAST_EN | + EMAC_RX_CTL_ACCEPT_BROADCAST_EN, + db->membase + EMAC_RX_CTL_REG); + + /* set MAC */ + /* set MAC CTL0 */ + reg_val = readl(db->membase + EMAC_MAC_CTL0_REG); + writel(reg_val | EMAC_MAC_CTL0_RX_FLOW_CTL_EN | + EMAC_MAC_CTL0_TX_FLOW_CTL_EN, + db->membase + EMAC_MAC_CTL0_REG); + + /* set MAC CTL1 */ + reg_val = readl(db->membase + EMAC_MAC_CTL1_REG); + reg_val |= EMAC_MAC_CTL1_LEN_CHECK_EN; + reg_val |= EMAC_MAC_CTL1_CRC_EN; + reg_val |= EMAC_MAC_CTL1_PAD_EN; + writel(reg_val, db->membase + EMAC_MAC_CTL1_REG); + + /* set up IPGT */ + writel(EMAC_MAC_IPGT_FULL_DUPLEX, db->membase + EMAC_MAC_IPGT_REG); + + /* set up IPGR */ + writel((EMAC_MAC_IPGR_IPG1 << 8) | EMAC_MAC_IPGR_IPG2, + db->membase + EMAC_MAC_IPGR_REG); + + /* set up Collison window */ + writel((EMAC_MAC_CLRT_COLLISION_WINDOW << 8) | EMAC_MAC_CLRT_RM, + db->membase + EMAC_MAC_CLRT_REG); + + /* set up Max Frame Length */ + writel(EMAC_MAX_FRAME_LEN, + db->membase + EMAC_MAC_MAXF_REG); + + return 0; +} + +static unsigned int emac_powerup(struct net_device *ndev) +{ + struct emac_board_info *db = netdev_priv(ndev); + unsigned int reg_val; + + /* initial EMAC */ + /* flush RX FIFO */ + reg_val = readl(db->membase + EMAC_RX_CTL_REG); + reg_val |= 0x8; + writel(reg_val, db->membase + EMAC_RX_CTL_REG); + udelay(1); + + /* initial MAC */ + /* soft reset MAC */ + reg_val = readl(db->membase + EMAC_MAC_CTL0_REG); + reg_val &= ~EMAC_MAC_CTL0_SOFT_RESET; + writel(reg_val, db->membase + EMAC_MAC_CTL0_REG); + + /* set MII clock */ + reg_val = readl(db->membase + EMAC_MAC_MCFG_REG); + reg_val &= (~(0xf << 2)); + reg_val |= (0xD << 2); + writel(reg_val, db->membase + EMAC_MAC_MCFG_REG); + + /* clear RX counter */ + writel(0x0, db->membase + EMAC_RX_FBC_REG); + + /* disable all interrupt and clear interrupt status */ + writel(0, db->membase + EMAC_INT_CTL_REG); + reg_val = readl(db->membase + EMAC_INT_STA_REG); + writel(reg_val, db->membase + EMAC_INT_STA_REG); + + udelay(1); + + /* set up EMAC */ + emac_setup(ndev); + + /* set mac_address to chip */ + writel(ndev->dev_addr[0] << 16 | ndev->dev_addr[1] << 8 | ndev-> + dev_addr[2], db->membase + EMAC_MAC_A1_REG); + writel(ndev->dev_addr[3] << 16 | ndev->dev_addr[4] << 8 | ndev-> + dev_addr[5], db->membase + EMAC_MAC_A0_REG); + + mdelay(1); + + return 0; +} + +static int emac_set_mac_address(struct net_device *dev, void *p) +{ + struct sockaddr *addr = p; + struct emac_board_info *db = netdev_priv(dev); + + if (netif_running(dev)) + return -EBUSY; + + memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); + + writel(dev->dev_addr[0] << 16 | dev->dev_addr[1] << 8 | dev-> + dev_addr[2], db->membase + EMAC_MAC_A1_REG); + writel(dev->dev_addr[3] << 16 | dev->dev_addr[4] << 8 | dev-> + dev_addr[5], db->membase + EMAC_MAC_A0_REG); + + return 0; +} + +/* Initialize emac board */ +static void emac_init_device(struct net_device *dev) +{ + struct emac_board_info *db = netdev_priv(dev); + unsigned long flags; + unsigned int reg_val; + + spin_lock_irqsave(&db->lock, flags); + + emac_update_speed(dev); + emac_update_duplex(dev); + + /* enable RX/TX */ + reg_val = readl(db->membase + EMAC_CTL_REG); + writel(reg_val | EMAC_CTL_RESET | EMAC_CTL_TX_EN | EMAC_CTL_RX_EN, + db->membase + EMAC_CTL_REG); + + /* enable RX/TX0/RX Hlevel interrup */ + reg_val = readl(db->membase + EMAC_INT_CTL_REG); + reg_val |= (0xf << 0) | (0x01 << 8); + writel(reg_val, db->membase + EMAC_INT_CTL_REG); + + spin_unlock_irqrestore(&db->lock, flags); +} + +/* Our watchdog timed out. Called by the networking layer */ +static void emac_timeout(struct net_device *dev) +{ + struct emac_board_info *db = netdev_priv(dev); + unsigned long flags; + + if (netif_msg_timer(db)) + dev_err(db->dev, "tx time out.\n"); + + /* Save previous register address */ + spin_lock_irqsave(&db->lock, flags); + + netif_stop_queue(dev); + emac_reset(db); + emac_init_device(dev); + /* We can accept TX packets again */ + dev->trans_start = jiffies; + netif_wake_queue(dev); + + /* Restore previous register address */ + spin_unlock_irqrestore(&db->lock, flags); +} + +/* Hardware start transmission. + * Send a packet to media from the upper layer. + */ +static int emac_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct emac_board_info *db = netdev_priv(dev); + unsigned long channel; + unsigned long flags; + + channel = db->tx_fifo_stat & 3; + if (channel == 3) + return 1; + + channel = (channel == 1 ? 1 : 0); + + spin_lock_irqsave(&db->lock, flags); + + writel(channel, db->membase + EMAC_TX_INS_REG); + + emac_outblk_32bit(db->membase + EMAC_TX_IO_DATA_REG, + skb->data, skb->len); + dev->stats.tx_bytes += skb->len; + + db->tx_fifo_stat |= 1 << channel; + /* TX control: First packet immediately send, second packet queue */ + if (channel == 0) { + /* set TX len */ + writel(skb->len, db->membase + EMAC_TX_PL0_REG); + /* start translate from fifo to phy */ + writel(readl(db->membase + EMAC_TX_CTL0_REG) | 1, + db->membase + EMAC_TX_CTL0_REG); + + /* save the time stamp */ + dev->trans_start = jiffies; + } else if (channel == 1) { + /* set TX len */ + writel(skb->len, db->membase + EMAC_TX_PL1_REG); + /* start translate from fifo to phy */ + writel(readl(db->membase + EMAC_TX_CTL1_REG) | 1, + db->membase + EMAC_TX_CTL1_REG); + + /* save the time stamp */ + dev->trans_start = jiffies; + } + + if ((db->tx_fifo_stat & 3) == 3) { + /* Second packet */ + netif_stop_queue(dev); + } + + spin_unlock_irqrestore(&db->lock, flags); + + /* free this SKB */ + dev_kfree_skb(skb); + + return NETDEV_TX_OK; +} + +/* EMAC interrupt handler + * receive the packet to upper layer, free the transmitted packet + */ +static void emac_tx_done(struct net_device *dev, struct emac_board_info *db, + unsigned int tx_status) +{ + /* One packet sent complete */ + db->tx_fifo_stat &= ~(tx_status & 3); + if (3 == (tx_status & 3)) + dev->stats.tx_packets += 2; + else + dev->stats.tx_packets++; + + if (netif_msg_tx_done(db)) + dev_dbg(db->dev, "tx done, NSR %02x\n", tx_status); + + netif_wake_queue(dev); +} + +/* Received a packet and pass to upper layer + */ +static void emac_rx(struct net_device *dev) +{ + struct emac_board_info *db = netdev_priv(dev); + struct sk_buff *skb; + u8 *rdptr; + bool good_packet; + static int rxlen_last; + unsigned int reg_val; + u32 rxhdr, rxstatus, rxcount, rxlen; + + /* Check packet ready or not */ + while (1) { + /* race warning: the first packet might arrive with + * the interrupts disabled, but the second will fix + * it + */ + rxcount = readl(db->membase + EMAC_RX_FBC_REG); + + if (netif_msg_rx_status(db)) + dev_dbg(db->dev, "RXCount: %x\n", rxcount); + + if ((db->skb_last != NULL) && (rxlen_last > 0)) { + dev->stats.rx_bytes += rxlen_last; + + /* Pass to upper layer */ + db->skb_last->protocol = eth_type_trans(db->skb_last, + dev); + netif_rx(db->skb_last); + dev->stats.rx_packets++; + db->skb_last = NULL; + rxlen_last = 0; + + reg_val = readl(db->membase + EMAC_RX_CTL_REG); + reg_val &= ~EMAC_RX_CTL_DMA_EN; + writel(reg_val, db->membase + EMAC_RX_CTL_REG); + } + + if (!rxcount) { + db->emacrx_completed_flag = 1; + reg_val = readl(db->membase + EMAC_INT_CTL_REG); + reg_val |= (0xf << 0) | (0x01 << 8); + writel(reg_val, db->membase + EMAC_INT_CTL_REG); + + /* had one stuck? */ + rxcount = readl(db->membase + EMAC_RX_FBC_REG); + if (!rxcount) + return; + } + + reg_val = readl(db->membase + EMAC_RX_IO_DATA_REG); + if (netif_msg_rx_status(db)) + dev_dbg(db->dev, "receive header: %x\n", reg_val); + if (reg_val != EMAC_UNDOCUMENTED_MAGIC) { + /* disable RX */ + reg_val = readl(db->membase + EMAC_CTL_REG); + writel(reg_val & ~EMAC_CTL_RX_EN, + db->membase + EMAC_CTL_REG); + + /* Flush RX FIFO */ + reg_val = readl(db->membase + EMAC_RX_CTL_REG); + writel(reg_val | (1 << 3), + db->membase + EMAC_RX_CTL_REG); + + do { + reg_val = readl(db->membase + EMAC_RX_CTL_REG); + } while (reg_val & (1 << 3)); + + /* enable RX */ + reg_val = readl(db->membase + EMAC_CTL_REG); + writel(reg_val | EMAC_CTL_RX_EN, + db->membase + EMAC_CTL_REG); + reg_val = readl(db->membase + EMAC_INT_CTL_REG); + reg_val |= (0xf << 0) | (0x01 << 8); + writel(reg_val, db->membase + EMAC_INT_CTL_REG); + + db->emacrx_completed_flag = 1; + + return; + } + + /* A packet ready now & Get status/length */ + good_packet = true; + + emac_inblk_32bit(db->membase + EMAC_RX_IO_DATA_REG, + &rxhdr, sizeof(rxhdr)); + + if (netif_msg_rx_status(db)) + dev_dbg(db->dev, "rxhdr: %x\n", *((int *)(&rxhdr))); + + rxlen = EMAC_RX_IO_DATA_LEN(rxhdr); + rxstatus = EMAC_RX_IO_DATA_STATUS(rxhdr); + + if (netif_msg_rx_status(db)) + dev_dbg(db->dev, "RX: status %02x, length %04x\n", + rxstatus, rxlen); + + /* Packet Status check */ + if (rxlen < 0x40) { + good_packet = false; + if (netif_msg_rx_err(db)) + dev_dbg(db->dev, "RX: Bad Packet (runt)\n"); + } + + if (unlikely(!(rxstatus & EMAC_RX_IO_DATA_STATUS_OK))) { + good_packet = false; + + if (rxstatus & EMAC_RX_IO_DATA_STATUS_CRC_ERR) { + if (netif_msg_rx_err(db)) + dev_dbg(db->dev, "crc error\n"); + dev->stats.rx_crc_errors++; + } + + if (rxstatus & EMAC_RX_IO_DATA_STATUS_LEN_ERR) { + if (netif_msg_rx_err(db)) + dev_dbg(db->dev, "length error\n"); + dev->stats.rx_length_errors++; + } + } + + /* Move data from EMAC */ + skb = dev_alloc_skb(rxlen + 4); + if (good_packet && skb) { + skb_reserve(skb, 2); + rdptr = (u8 *) skb_put(skb, rxlen - 4); + + /* Read received packet from RX SRAM */ + if (netif_msg_rx_status(db)) + dev_dbg(db->dev, "RxLen %x\n", rxlen); + + emac_inblk_32bit(db->membase + EMAC_RX_IO_DATA_REG, + rdptr, rxlen); + dev->stats.rx_bytes += rxlen; + + /* Pass to upper layer */ + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); + dev->stats.rx_packets++; + } + } +} + +static irqreturn_t emac_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct emac_board_info *db = netdev_priv(dev); + int int_status; + unsigned long flags; + unsigned int reg_val; + + /* A real interrupt coming */ + + /* holders of db->lock must always block IRQs */ + spin_lock_irqsave(&db->lock, flags); + + /* Disable all interrupts */ + writel(0, db->membase + EMAC_INT_CTL_REG); + + /* Got EMAC interrupt status */ + /* Got ISR */ + int_status = readl(db->membase + EMAC_INT_STA_REG); + /* Clear ISR status */ + writel(int_status, db->membase + EMAC_INT_STA_REG); + + if (netif_msg_intr(db)) + dev_dbg(db->dev, "emac interrupt %02x\n", int_status); + + /* Received the coming packet */ + if ((int_status & 0x100) && (db->emacrx_completed_flag == 1)) { + /* carrier lost */ + db->emacrx_completed_flag = 0; + emac_rx(dev); + } + + /* Transmit Interrupt check */ + if (int_status & (0x01 | 0x02)) + emac_tx_done(dev, db, int_status); + + if (int_status & (0x04 | 0x08)) + netdev_info(dev, " ab : %x\n", int_status); + + /* Re-enable interrupt mask */ + if (db->emacrx_completed_flag == 1) { + reg_val = readl(db->membase + EMAC_INT_CTL_REG); + reg_val |= (0xf << 0) | (0x01 << 8); + writel(reg_val, db->membase + EMAC_INT_CTL_REG); + } + spin_unlock_irqrestore(&db->lock, flags); + + return IRQ_HANDLED; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* + * Used by netconsole + */ +static void emac_poll_controller(struct net_device *dev) +{ + disable_irq(dev->irq); + emac_interrupt(dev->irq, dev); + enable_irq(dev->irq); +} +#endif + +/* Open the interface. + * The interface is opened whenever "ifconfig" actives it. + */ +static int emac_open(struct net_device *dev) +{ + struct emac_board_info *db = netdev_priv(dev); + int ret; + + if (netif_msg_ifup(db)) + dev_dbg(db->dev, "enabling %s\n", dev->name); + + if (devm_request_irq(db->dev, dev->irq, &emac_interrupt, + 0, dev->name, dev)) + return -EAGAIN; + + /* Initialize EMAC board */ + emac_reset(db); + emac_init_device(dev); + + ret = emac_mdio_probe(dev); + if (ret < 0) { + netdev_err(dev, "cannot probe MDIO bus\n"); + return ret; + } + + phy_start(db->phy_dev); + netif_start_queue(dev); + + return 0; +} + +static void emac_shutdown(struct net_device *dev) +{ + unsigned int reg_val; + struct emac_board_info *db = netdev_priv(dev); + + /* Disable all interrupt */ + writel(0, db->membase + EMAC_INT_CTL_REG); + + /* clear interupt status */ + reg_val = readl(db->membase + EMAC_INT_STA_REG); + writel(reg_val, db->membase + EMAC_INT_STA_REG); + + /* Disable RX/TX */ + reg_val = readl(db->membase + EMAC_CTL_REG); + reg_val &= ~(EMAC_CTL_TX_EN | EMAC_CTL_RX_EN | EMAC_CTL_RESET); + writel(reg_val, db->membase + EMAC_CTL_REG); +} + +/* Stop the interface. + * The interface is stopped when it is brought. + */ +static int emac_stop(struct net_device *ndev) +{ + struct emac_board_info *db = netdev_priv(ndev); + + if (netif_msg_ifdown(db)) + dev_dbg(db->dev, "shutting down %s\n", ndev->name); + + netif_stop_queue(ndev); + netif_carrier_off(ndev); + + phy_stop(db->phy_dev); + + emac_mdio_remove(ndev); + + emac_shutdown(ndev); + + return 0; +} + +static const struct net_device_ops emac_netdev_ops = { + .ndo_open = emac_open, + .ndo_stop = emac_stop, + .ndo_start_xmit = emac_start_xmit, + .ndo_tx_timeout = emac_timeout, + .ndo_do_ioctl = emac_ioctl, + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = emac_set_mac_address, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = emac_poll_controller, +#endif +}; + +/* Search EMAC board, allocate space and register it + */ +static int emac_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct emac_board_info *db; + struct net_device *ndev; + int ret = 0; + const char *mac_addr; + + ndev = alloc_etherdev(sizeof(struct emac_board_info)); + if (!ndev) { + dev_err(&pdev->dev, "could not allocate device.\n"); + return -ENOMEM; + } + + SET_NETDEV_DEV(ndev, &pdev->dev); + + db = netdev_priv(ndev); + memset(db, 0, sizeof(*db)); + + db->dev = &pdev->dev; + db->ndev = ndev; + db->pdev = pdev; + + spin_lock_init(&db->lock); + + db->membase = of_iomap(np, 0); + if (!db->membase) { + dev_err(&pdev->dev, "failed to remap registers\n"); + ret = -ENOMEM; + goto out; + } + + /* fill in parameters for net-dev structure */ + ndev->base_addr = (unsigned long)db->membase; + ndev->irq = irq_of_parse_and_map(np, 0); + if (ndev->irq == -ENXIO) { + netdev_err(ndev, "No irq resource\n"); + ret = ndev->irq; + goto out; + } + + db->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(db->clk)) + goto out; + + clk_prepare_enable(db->clk); + + db->phy_node = of_parse_phandle(np, "phy", 0); + if (!db->phy_node) { + dev_err(&pdev->dev, "no associated PHY\n"); + ret = -ENODEV; + goto out; + } + + /* Read MAC-address from DT */ + mac_addr = of_get_mac_address(np); + if (mac_addr) + memcpy(ndev->dev_addr, mac_addr, ETH_ALEN); + + /* Check if the MAC address is valid, if not get a random one */ + if (!is_valid_ether_addr(ndev->dev_addr)) { + eth_hw_addr_random(ndev); + dev_warn(&pdev->dev, "using random MAC address %pM\n", + ndev->dev_addr); + } + + db->emacrx_completed_flag = 1; + emac_powerup(ndev); + emac_reset(db); + + ether_setup(ndev); + + ndev->netdev_ops = &emac_netdev_ops; + ndev->watchdog_timeo = msecs_to_jiffies(watchdog); + ndev->ethtool_ops = &emac_ethtool_ops; + + platform_set_drvdata(pdev, ndev); + + /* Carrier starts down, phylib will bring it up */ + netif_carrier_off(ndev); + + ret = register_netdev(ndev); + if (ret) { + dev_err(&pdev->dev, "Registering netdev failed!\n"); + ret = -ENODEV; + goto out; + } + + dev_info(&pdev->dev, "%s: at %p, IRQ %d MAC: %pM\n", + ndev->name, db->membase, ndev->irq, ndev->dev_addr); + + return 0; + +out: + dev_err(db->dev, "not found (%d).\n", ret); + + free_netdev(ndev); + + return ret; +} + +static int emac_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + + unregister_netdev(ndev); + free_netdev(ndev); + + dev_dbg(&pdev->dev, "released and freed device\n"); + return 0; +} + +static int emac_suspend(struct platform_device *dev, pm_message_t state) +{ + struct net_device *ndev = platform_get_drvdata(dev); + + netif_carrier_off(ndev); + netif_device_detach(ndev); + emac_shutdown(ndev); + + return 0; +} + +static int emac_resume(struct platform_device *dev) +{ + struct net_device *ndev = platform_get_drvdata(dev); + struct emac_board_info *db = netdev_priv(ndev); + + emac_reset(db); + emac_init_device(ndev); + netif_device_attach(ndev); + + return 0; +} + +static const struct of_device_id emac_of_match[] = { + {.compatible = "allwinner,sun4i-emac",}, + {}, +}; + +MODULE_DEVICE_TABLE(of, emac_of_match); + +static struct platform_driver emac_driver = { + .driver = { + .name = "sun4i-emac", + .of_match_table = emac_of_match, + }, + .probe = emac_probe, + .remove = emac_remove, + .suspend = emac_suspend, + .resume = emac_resume, +}; + +module_platform_driver(emac_driver); + +MODULE_AUTHOR("Stefan Roese <sr@denx.de>"); +MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>"); +MODULE_DESCRIPTION("Allwinner A10 emac network driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.h b/drivers/net/ethernet/allwinner/sun4i-emac.h new file mode 100644 index 000000000000..38c72d9ec600 --- /dev/null +++ b/drivers/net/ethernet/allwinner/sun4i-emac.h @@ -0,0 +1,108 @@ +/* + * Allwinner EMAC Fast Ethernet driver for Linux. + * + * Copyright 2012 Stefan Roese <sr@denx.de> + * Copyright 2013 Maxime Ripard <maxime.ripard@free-electrons.com> + * + * Based on the Linux driver provided by Allwinner: + * Copyright (C) 1997 Sten Wang + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef _SUN4I_EMAC_H_ +#define _SUN4I_EMAC_H_ + +#define EMAC_CTL_REG (0x00) +#define EMAC_CTL_RESET (1 << 0) +#define EMAC_CTL_TX_EN (1 << 1) +#define EMAC_CTL_RX_EN (1 << 2) +#define EMAC_TX_MODE_REG (0x04) +#define EMAC_TX_MODE_ABORTED_FRAME_EN (1 << 0) +#define EMAC_TX_MODE_DMA_EN (1 << 1) +#define EMAC_TX_FLOW_REG (0x08) +#define EMAC_TX_CTL0_REG (0x0c) +#define EMAC_TX_CTL1_REG (0x10) +#define EMAC_TX_INS_REG (0x14) +#define EMAC_TX_PL0_REG (0x18) +#define EMAC_TX_PL1_REG (0x1c) +#define EMAC_TX_STA_REG (0x20) +#define EMAC_TX_IO_DATA_REG (0x24) +#define EMAC_TX_IO_DATA1_REG (0x28) +#define EMAC_TX_TSVL0_REG (0x2c) +#define EMAC_TX_TSVH0_REG (0x30) +#define EMAC_TX_TSVL1_REG (0x34) +#define EMAC_TX_TSVH1_REG (0x38) +#define EMAC_RX_CTL_REG (0x3c) +#define EMAC_RX_CTL_AUTO_DRQ_EN (1 << 1) +#define EMAC_RX_CTL_DMA_EN (1 << 2) +#define EMAC_RX_CTL_PASS_ALL_EN (1 << 4) +#define EMAC_RX_CTL_PASS_CTL_EN (1 << 5) +#define EMAC_RX_CTL_PASS_CRC_ERR_EN (1 << 6) +#define EMAC_RX_CTL_PASS_LEN_ERR_EN (1 << 7) +#define EMAC_RX_CTL_PASS_LEN_OOR_EN (1 << 8) +#define EMAC_RX_CTL_ACCEPT_UNICAST_EN (1 << 16) +#define EMAC_RX_CTL_DA_FILTER_EN (1 << 17) +#define EMAC_RX_CTL_ACCEPT_MULTICAST_EN (1 << 20) +#define EMAC_RX_CTL_HASH_FILTER_EN (1 << 21) +#define EMAC_RX_CTL_ACCEPT_BROADCAST_EN (1 << 22) +#define EMAC_RX_CTL_SA_FILTER_EN (1 << 24) +#define EMAC_RX_CTL_SA_FILTER_INVERT_EN (1 << 25) +#define EMAC_RX_HASH0_REG (0x40) +#define EMAC_RX_HASH1_REG (0x44) +#define EMAC_RX_STA_REG (0x48) +#define EMAC_RX_IO_DATA_REG (0x4c) +#define EMAC_RX_IO_DATA_LEN(x) (x & 0xffff) +#define EMAC_RX_IO_DATA_STATUS(x) ((x >> 16) & 0xffff) +#define EMAC_RX_IO_DATA_STATUS_CRC_ERR (1 << 4) +#define EMAC_RX_IO_DATA_STATUS_LEN_ERR (3 << 5) +#define EMAC_RX_IO_DATA_STATUS_OK (1 << 7) +#define EMAC_RX_FBC_REG (0x50) +#define EMAC_INT_CTL_REG (0x54) +#define EMAC_INT_STA_REG (0x58) +#define EMAC_MAC_CTL0_REG (0x5c) +#define EMAC_MAC_CTL0_RX_FLOW_CTL_EN (1 << 2) +#define EMAC_MAC_CTL0_TX_FLOW_CTL_EN (1 << 3) +#define EMAC_MAC_CTL0_SOFT_RESET (1 << 15) +#define EMAC_MAC_CTL1_REG (0x60) +#define EMAC_MAC_CTL1_DUPLEX_EN (1 << 0) +#define EMAC_MAC_CTL1_LEN_CHECK_EN (1 << 1) +#define EMAC_MAC_CTL1_HUGE_FRAME_EN (1 << 2) +#define EMAC_MAC_CTL1_DELAYED_CRC_EN (1 << 3) +#define EMAC_MAC_CTL1_CRC_EN (1 << 4) +#define EMAC_MAC_CTL1_PAD_EN (1 << 5) +#define EMAC_MAC_CTL1_PAD_CRC_EN (1 << 6) +#define EMAC_MAC_CTL1_AD_SHORT_FRAME_EN (1 << 7) +#define EMAC_MAC_CTL1_BACKOFF_DIS (1 << 12) +#define EMAC_MAC_IPGT_REG (0x64) +#define EMAC_MAC_IPGT_HALF_DUPLEX (0x12) +#define EMAC_MAC_IPGT_FULL_DUPLEX (0x15) +#define EMAC_MAC_IPGR_REG (0x68) +#define EMAC_MAC_IPGR_IPG1 (0x0c) +#define EMAC_MAC_IPGR_IPG2 (0x12) +#define EMAC_MAC_CLRT_REG (0x6c) +#define EMAC_MAC_CLRT_COLLISION_WINDOW (0x37) +#define EMAC_MAC_CLRT_RM (0x0f) +#define EMAC_MAC_MAXF_REG (0x70) +#define EMAC_MAC_SUPP_REG (0x74) +#define EMAC_MAC_TEST_REG (0x78) +#define EMAC_MAC_MCFG_REG (0x7c) +#define EMAC_MAC_A0_REG (0x98) +#define EMAC_MAC_A1_REG (0x9c) +#define EMAC_MAC_A2_REG (0xa0) +#define EMAC_SAFX_L_REG0 (0xa4) +#define EMAC_SAFX_H_REG0 (0xa8) +#define EMAC_SAFX_L_REG1 (0xac) +#define EMAC_SAFX_H_REG1 (0xb0) +#define EMAC_SAFX_L_REG2 (0xb4) +#define EMAC_SAFX_H_REG2 (0xb8) +#define EMAC_SAFX_L_REG3 (0xbc) +#define EMAC_SAFX_H_REG3 (0xc0) + +#define EMAC_PHY_DUPLEX (1 << 8) + +#define EMAC_EEPROM_MAGIC (0x444d394b) +#define EMAC_UNDOCUMENTED_MAGIC (0x0143414d) +#endif /* _SUN4I_EMAC_H_ */ diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c index b7894f8af9d1..219be1bf3cfc 100644 --- a/drivers/net/ethernet/alteon/acenic.c +++ b/drivers/net/ethernet/alteon/acenic.c @@ -702,19 +702,6 @@ static struct pci_driver acenic_pci_driver = { .remove = acenic_remove_one, }; -static int __init acenic_init(void) -{ - return pci_register_driver(&acenic_pci_driver); -} - -static void __exit acenic_exit(void) -{ - pci_unregister_driver(&acenic_pci_driver); -} - -module_init(acenic_init); -module_exit(acenic_exit); - static void ace_free_descriptors(struct net_device *dev) { struct ace_private *ap = netdev_priv(dev); @@ -3199,3 +3186,5 @@ static int read_eeprom_byte(struct net_device *dev, unsigned long offset) ap->name, offset); goto out; } + +module_pci_driver(acenic_pci_driver); diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig index 13d74aa4033d..562df46e0a82 100644 --- a/drivers/net/ethernet/amd/Kconfig +++ b/drivers/net/ethernet/amd/Kconfig @@ -34,7 +34,6 @@ config AMD8111_ETH tristate "AMD 8111 (new PCI LANCE) support" depends on PCI select CRC32 - select NET_CORE select MII ---help--- If you have an AMD 8111-based PCI LANCE ethernet card, @@ -60,7 +59,6 @@ config PCNET32 tristate "AMD PCnet32 PCI support" depends on PCI select CRC32 - select NET_CORE select MII ---help--- If you have a PCnet32 or PCnetPCI based network (Ethernet) card, diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c index 8e6b665a6726..1b1429d5d5c2 100644 --- a/drivers/net/ethernet/amd/amd8111e.c +++ b/drivers/net/ethernet/amd/amd8111e.c @@ -1813,7 +1813,7 @@ static const struct net_device_ops amd8111e_netdev_ops = { static int amd8111e_probe_one(struct pci_dev *pdev, const struct pci_device_id *ent) { - int err,i,pm_cap; + int err, i; unsigned long reg_addr,reg_len; struct amd8111e_priv* lp; struct net_device* dev; @@ -1842,7 +1842,7 @@ static int amd8111e_probe_one(struct pci_dev *pdev, pci_set_master(pdev); /* Find power-management capability. */ - if((pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM))==0){ + if (!pdev->pm_cap) { printk(KERN_ERR "amd8111e: No Power Management capability, " "exiting.\n"); err = -ENODEV; @@ -1875,7 +1875,7 @@ static int amd8111e_probe_one(struct pci_dev *pdev, lp = netdev_priv(dev); lp->pci_dev = pdev; lp->amd8111e_net_dev = dev; - lp->pm_cap = pm_cap; + lp->pm_cap = pdev->pm_cap; spin_lock_init(&lp->lock); @@ -1981,15 +1981,4 @@ static struct pci_driver amd8111e_driver = { .resume = amd8111e_resume }; -static int __init amd8111e_init(void) -{ - return pci_register_driver(&amd8111e_driver); -} - -static void __exit amd8111e_cleanup(void) -{ - pci_unregister_driver(&amd8111e_driver); -} - -module_init(amd8111e_init); -module_exit(amd8111e_cleanup); +module_pci_driver(amd8111e_driver); diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c index 688aede742c7..ceb45bc963a9 100644 --- a/drivers/net/ethernet/amd/au1000_eth.c +++ b/drivers/net/ethernet/amd/au1000_eth.c @@ -1301,8 +1301,6 @@ static int au1000_remove(struct platform_device *pdev) int i; struct resource *base, *macen; - platform_set_drvdata(pdev, NULL); - unregister_netdev(dev); mdiobus_unregister(aup->mii_bus); mdiobus_free(aup->mii_bus); diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c index f47b780892e9..ece56831a647 100644 --- a/drivers/net/ethernet/amd/sunlance.c +++ b/drivers/net/ethernet/amd/sunlance.c @@ -1470,7 +1470,7 @@ no_link_test: goto fail; } - dev_set_drvdata(&op->dev, lp); + platform_set_drvdata(op, lp); printk(KERN_INFO "%s: LANCE %pM\n", dev->name, dev->dev_addr); @@ -1501,7 +1501,7 @@ static int sunlance_sbus_probe(struct platform_device *op) static int sunlance_sbus_remove(struct platform_device *op) { - struct lance_private *lp = dev_get_drvdata(&op->dev); + struct lance_private *lp = platform_get_drvdata(op); struct net_device *net_dev = lp->dev; unregister_netdev(net_dev); @@ -1510,8 +1510,6 @@ static int sunlance_sbus_remove(struct platform_device *op) free_netdev(net_dev); - dev_set_drvdata(&op->dev, NULL); - return 0; } diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c index f36bbd6d5085..a597b766f080 100644 --- a/drivers/net/ethernet/apple/bmac.c +++ b/drivers/net/ethernet/apple/bmac.c @@ -1016,7 +1016,6 @@ static void bmac_set_multicast(struct net_device *dev) static void bmac_set_multicast(struct net_device *dev) { struct netdev_hw_addr *ha; - int i; unsigned short rx_cfg; u32 crc; @@ -1030,14 +1029,12 @@ static void bmac_set_multicast(struct net_device *dev) rx_cfg |= RxPromiscEnable; bmwrite(dev, RXCFG, rx_cfg); } else { - u16 hash_table[4]; + u16 hash_table[4] = { 0 }; rx_cfg = bmread(dev, RXCFG); rx_cfg &= ~RxPromiscEnable; bmwrite(dev, RXCFG, rx_cfg); - for(i = 0; i < 4; i++) hash_table[i] = 0; - netdev_for_each_mc_addr(ha, dev) { crc = ether_crc_le(6, ha->addr); crc >>= 26; diff --git a/drivers/net/ethernet/arc/Kconfig b/drivers/net/ethernet/arc/Kconfig new file mode 100644 index 000000000000..514c57fd26f1 --- /dev/null +++ b/drivers/net/ethernet/arc/Kconfig @@ -0,0 +1,31 @@ +# +# ARC EMAC network device configuration +# + +config NET_VENDOR_ARC + bool "ARC devices" + default y + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + <http://www.tldp.org/docs.html#howto>. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about ARC cards. If you say Y, you will be asked for + your specific card in the following questions. + +if NET_VENDOR_ARC + +config ARC_EMAC + tristate "ARC EMAC support" + select MII + select PHYLIB + depends on OF_IRQ + depends on OF_NET + ---help--- + On some legacy ARC (Synopsys) FPGA boards such as ARCAngel4/ML50x + non-standard on-chip ethernet device ARC EMAC 10/100 is used. + Say Y here if you have such a board. If unsure, say N. + +endif # NET_VENDOR_ARC diff --git a/drivers/net/ethernet/arc/Makefile b/drivers/net/ethernet/arc/Makefile new file mode 100644 index 000000000000..00c8657637d5 --- /dev/null +++ b/drivers/net/ethernet/arc/Makefile @@ -0,0 +1,6 @@ +# +# Makefile for the ARC network device drivers. +# + +arc_emac-objs := emac_main.o emac_mdio.o +obj-$(CONFIG_ARC_EMAC) += arc_emac.o diff --git a/drivers/net/ethernet/arc/emac.h b/drivers/net/ethernet/arc/emac.h new file mode 100644 index 000000000000..dc08678bf9a4 --- /dev/null +++ b/drivers/net/ethernet/arc/emac.h @@ -0,0 +1,214 @@ +/* + * Copyright (C) 2004-2013 Synopsys, Inc. (www.synopsys.com) + * + * Registers and bits definitions of ARC EMAC + */ + +#ifndef ARC_EMAC_H +#define ARC_EMAC_H + +#include <linux/device.h> +#include <linux/dma-mapping.h> +#include <linux/netdevice.h> +#include <linux/phy.h> + +/* STATUS and ENABLE Register bit masks */ +#define TXINT_MASK (1<<0) /* Transmit interrupt */ +#define RXINT_MASK (1<<1) /* Receive interrupt */ +#define ERR_MASK (1<<2) /* Error interrupt */ +#define TXCH_MASK (1<<3) /* Transmit chaining error interrupt */ +#define MSER_MASK (1<<4) /* Missed packet counter error */ +#define RXCR_MASK (1<<8) /* RXCRCERR counter rolled over */ +#define RXFR_MASK (1<<9) /* RXFRAMEERR counter rolled over */ +#define RXFL_MASK (1<<10) /* RXOFLOWERR counter rolled over */ +#define MDIO_MASK (1<<12) /* MDIO complete interrupt */ +#define TXPL_MASK (1<<31) /* Force polling of BD by EMAC */ + +/* CONTROL Register bit masks */ +#define EN_MASK (1<<0) /* VMAC enable */ +#define TXRN_MASK (1<<3) /* TX enable */ +#define RXRN_MASK (1<<4) /* RX enable */ +#define DSBC_MASK (1<<8) /* Disable receive broadcast */ +#define ENFL_MASK (1<<10) /* Enable Full-duplex */ +#define PROM_MASK (1<<11) /* Promiscuous mode */ + +/* Buffer descriptor INFO bit masks */ +#define OWN_MASK (1<<31) /* 0-CPU owns buffer, 1-EMAC owns buffer */ +#define FIRST_MASK (1<<16) /* First buffer in chain */ +#define LAST_MASK (1<<17) /* Last buffer in chain */ +#define LEN_MASK 0x000007FF /* last 11 bits */ +#define CRLS (1<<21) +#define DEFR (1<<22) +#define DROP (1<<23) +#define RTRY (1<<24) +#define LTCL (1<<28) +#define UFLO (1<<29) + +#define FOR_EMAC OWN_MASK +#define FOR_CPU 0 + +/* ARC EMAC register set combines entries for MAC and MDIO */ +enum { + R_ID = 0, + R_STATUS, + R_ENABLE, + R_CTRL, + R_POLLRATE, + R_RXERR, + R_MISS, + R_TX_RING, + R_RX_RING, + R_ADDRL, + R_ADDRH, + R_LAFL, + R_LAFH, + R_MDIO, +}; + +#define TX_TIMEOUT (400*HZ/1000) /* Transmission timeout */ + +#define ARC_EMAC_NAPI_WEIGHT 40 /* Workload for NAPI */ + +#define EMAC_BUFFER_SIZE 1536 /* EMAC buffer size */ + +/** + * struct arc_emac_bd - EMAC buffer descriptor (BD). + * + * @info: Contains status information on the buffer itself. + * @data: 32-bit byte addressable pointer to the packet data. + */ +struct arc_emac_bd { + __le32 info; + dma_addr_t data; +}; + +/* Number of Rx/Tx BD's */ +#define RX_BD_NUM 128 +#define TX_BD_NUM 128 + +#define RX_RING_SZ (RX_BD_NUM * sizeof(struct arc_emac_bd)) +#define TX_RING_SZ (TX_BD_NUM * sizeof(struct arc_emac_bd)) + +/** + * struct buffer_state - Stores Rx/Tx buffer state. + * @sk_buff: Pointer to socket buffer. + * @addr: Start address of DMA-mapped memory region. + * @len: Length of DMA-mapped memory region. + */ +struct buffer_state { + struct sk_buff *skb; + DEFINE_DMA_UNMAP_ADDR(addr); + DEFINE_DMA_UNMAP_LEN(len); +}; + +/** + * struct arc_emac_priv - Storage of EMAC's private information. + * @dev: Pointer to the current device. + * @ndev: Pointer to the current network device. + * @phy_dev: Pointer to attached PHY device. + * @bus: Pointer to the current MII bus. + * @regs: Base address of EMAC memory-mapped control registers. + * @napi: Structure for NAPI. + * @stats: Network device statistics. + * @rxbd: Pointer to Rx BD ring. + * @txbd: Pointer to Tx BD ring. + * @rxbd_dma: DMA handle for Rx BD ring. + * @txbd_dma: DMA handle for Tx BD ring. + * @rx_buff: Storage for Rx buffers states. + * @tx_buff: Storage for Tx buffers states. + * @txbd_curr: Index of Tx BD to use on the next "ndo_start_xmit". + * @txbd_dirty: Index of Tx BD to free on the next Tx interrupt. + * @last_rx_bd: Index of the last Rx BD we've got from EMAC. + * @link: PHY's last seen link state. + * @duplex: PHY's last set duplex mode. + * @speed: PHY's last set speed. + * @max_speed: Maximum supported by current system network data-rate. + */ +struct arc_emac_priv { + /* Devices */ + struct device *dev; + struct net_device *ndev; + struct phy_device *phy_dev; + struct mii_bus *bus; + + void __iomem *regs; + + struct napi_struct napi; + struct net_device_stats stats; + + struct arc_emac_bd *rxbd; + struct arc_emac_bd *txbd; + + dma_addr_t rxbd_dma; + dma_addr_t txbd_dma; + + struct buffer_state rx_buff[RX_BD_NUM]; + struct buffer_state tx_buff[TX_BD_NUM]; + unsigned int txbd_curr; + unsigned int txbd_dirty; + + unsigned int last_rx_bd; + + unsigned int link; + unsigned int duplex; + unsigned int speed; + unsigned int max_speed; +}; + +/** + * arc_reg_set - Sets EMAC register with provided value. + * @priv: Pointer to ARC EMAC private data structure. + * @reg: Register offset from base address. + * @value: Value to set in register. + */ +static inline void arc_reg_set(struct arc_emac_priv *priv, int reg, int value) +{ + iowrite32(value, priv->regs + reg * sizeof(int)); +} + +/** + * arc_reg_get - Gets value of specified EMAC register. + * @priv: Pointer to ARC EMAC private data structure. + * @reg: Register offset from base address. + * + * returns: Value of requested register. + */ +static inline unsigned int arc_reg_get(struct arc_emac_priv *priv, int reg) +{ + return ioread32(priv->regs + reg * sizeof(int)); +} + +/** + * arc_reg_or - Applies mask to specified EMAC register - ("reg" | "mask"). + * @priv: Pointer to ARC EMAC private data structure. + * @reg: Register offset from base address. + * @mask: Mask to apply to specified register. + * + * This function reads initial register value, then applies provided mask + * to it and then writes register back. + */ +static inline void arc_reg_or(struct arc_emac_priv *priv, int reg, int mask) +{ + unsigned int value = arc_reg_get(priv, reg); + arc_reg_set(priv, reg, value | mask); +} + +/** + * arc_reg_clr - Applies mask to specified EMAC register - ("reg" & ~"mask"). + * @priv: Pointer to ARC EMAC private data structure. + * @reg: Register offset from base address. + * @mask: Mask to apply to specified register. + * + * This function reads initial register value, then applies provided mask + * to it and then writes register back. + */ +static inline void arc_reg_clr(struct arc_emac_priv *priv, int reg, int mask) +{ + unsigned int value = arc_reg_get(priv, reg); + arc_reg_set(priv, reg, value & ~mask); +} + +int arc_mdio_probe(struct platform_device *pdev, struct arc_emac_priv *priv); +int arc_mdio_remove(struct arc_emac_priv *priv); + +#endif /* ARC_EMAC_H */ diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c new file mode 100644 index 000000000000..f1b121ee5525 --- /dev/null +++ b/drivers/net/ethernet/arc/emac_main.c @@ -0,0 +1,819 @@ +/* + * Copyright (C) 2004-2013 Synopsys, Inc. (www.synopsys.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Driver for the ARC EMAC 10100 (hardware revision 5) + * + * Contributors: + * Amit Bhor + * Sameer Dhavale + * Vineet Gupta + */ + +#include <linux/etherdevice.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/of_mdio.h> +#include <linux/of_net.h> +#include <linux/of_platform.h> + +#include "emac.h" + +#define DRV_NAME "arc_emac" +#define DRV_VERSION "1.0" + +/** + * arc_emac_adjust_link - Adjust the PHY link duplex. + * @ndev: Pointer to the net_device structure. + * + * This function is called to change the duplex setting after auto negotiation + * is done by the PHY. + */ +static void arc_emac_adjust_link(struct net_device *ndev) +{ + struct arc_emac_priv *priv = netdev_priv(ndev); + struct phy_device *phy_dev = priv->phy_dev; + unsigned int reg, state_changed = 0; + + if (priv->link != phy_dev->link) { + priv->link = phy_dev->link; + state_changed = 1; + } + + if (priv->speed != phy_dev->speed) { + priv->speed = phy_dev->speed; + state_changed = 1; + } + + if (priv->duplex != phy_dev->duplex) { + reg = arc_reg_get(priv, R_CTRL); + + if (DUPLEX_FULL == phy_dev->duplex) + reg |= ENFL_MASK; + else + reg &= ~ENFL_MASK; + + arc_reg_set(priv, R_CTRL, reg); + priv->duplex = phy_dev->duplex; + state_changed = 1; + } + + if (state_changed) + phy_print_status(phy_dev); +} + +/** + * arc_emac_get_settings - Get PHY settings. + * @ndev: Pointer to net_device structure. + * @cmd: Pointer to ethtool_cmd structure. + * + * This implements ethtool command for getting PHY settings. If PHY could + * not be found, the function returns -ENODEV. This function calls the + * relevant PHY ethtool API to get the PHY settings. + * Issue "ethtool ethX" under linux prompt to execute this function. + */ +static int arc_emac_get_settings(struct net_device *ndev, + struct ethtool_cmd *cmd) +{ + struct arc_emac_priv *priv = netdev_priv(ndev); + + return phy_ethtool_gset(priv->phy_dev, cmd); +} + +/** + * arc_emac_set_settings - Set PHY settings as passed in the argument. + * @ndev: Pointer to net_device structure. + * @cmd: Pointer to ethtool_cmd structure. + * + * This implements ethtool command for setting various PHY settings. If PHY + * could not be found, the function returns -ENODEV. This function calls the + * relevant PHY ethtool API to set the PHY. + * Issue e.g. "ethtool -s ethX speed 1000" under linux prompt to execute this + * function. + */ +static int arc_emac_set_settings(struct net_device *ndev, + struct ethtool_cmd *cmd) +{ + struct arc_emac_priv *priv = netdev_priv(ndev); + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + return phy_ethtool_sset(priv->phy_dev, cmd); +} + +/** + * arc_emac_get_drvinfo - Get EMAC driver information. + * @ndev: Pointer to net_device structure. + * @info: Pointer to ethtool_drvinfo structure. + * + * This implements ethtool command for getting the driver information. + * Issue "ethtool -i ethX" under linux prompt to execute this function. + */ +static void arc_emac_get_drvinfo(struct net_device *ndev, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); +} + +static const struct ethtool_ops arc_emac_ethtool_ops = { + .get_settings = arc_emac_get_settings, + .set_settings = arc_emac_set_settings, + .get_drvinfo = arc_emac_get_drvinfo, + .get_link = ethtool_op_get_link, +}; + +#define FIRST_OR_LAST_MASK (FIRST_MASK | LAST_MASK) + +/** + * arc_emac_tx_clean - clears processed by EMAC Tx BDs. + * @ndev: Pointer to the network device. + */ +static void arc_emac_tx_clean(struct net_device *ndev) +{ + struct arc_emac_priv *priv = netdev_priv(ndev); + struct net_device_stats *stats = &priv->stats; + unsigned int i; + + for (i = 0; i < TX_BD_NUM; i++) { + unsigned int *txbd_dirty = &priv->txbd_dirty; + struct arc_emac_bd *txbd = &priv->txbd[*txbd_dirty]; + struct buffer_state *tx_buff = &priv->tx_buff[*txbd_dirty]; + struct sk_buff *skb = tx_buff->skb; + unsigned int info = le32_to_cpu(txbd->info); + + *txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM; + + if ((info & FOR_EMAC) || !txbd->data) + break; + + if (unlikely(info & (DROP | DEFR | LTCL | UFLO))) { + stats->tx_errors++; + stats->tx_dropped++; + + if (info & DEFR) + stats->tx_carrier_errors++; + + if (info & LTCL) + stats->collisions++; + + if (info & UFLO) + stats->tx_fifo_errors++; + } else if (likely(info & FIRST_OR_LAST_MASK)) { + stats->tx_packets++; + stats->tx_bytes += skb->len; + } + + dma_unmap_single(&ndev->dev, dma_unmap_addr(tx_buff, addr), + dma_unmap_len(tx_buff, len), DMA_TO_DEVICE); + + /* return the sk_buff to system */ + dev_kfree_skb_irq(skb); + + txbd->data = 0; + txbd->info = 0; + + if (netif_queue_stopped(ndev)) + netif_wake_queue(ndev); + } +} + +/** + * arc_emac_rx - processing of Rx packets. + * @ndev: Pointer to the network device. + * @budget: How many BDs to process on 1 call. + * + * returns: Number of processed BDs + * + * Iterate through Rx BDs and deliver received packages to upper layer. + */ +static int arc_emac_rx(struct net_device *ndev, int budget) +{ + struct arc_emac_priv *priv = netdev_priv(ndev); + unsigned int work_done; + + for (work_done = 0; work_done <= budget; work_done++) { + unsigned int *last_rx_bd = &priv->last_rx_bd; + struct net_device_stats *stats = &priv->stats; + struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd]; + struct arc_emac_bd *rxbd = &priv->rxbd[*last_rx_bd]; + unsigned int pktlen, info = le32_to_cpu(rxbd->info); + struct sk_buff *skb; + dma_addr_t addr; + + if (unlikely((info & OWN_MASK) == FOR_EMAC)) + break; + + /* Make a note that we saw a packet at this BD. + * So next time, driver starts from this + 1 + */ + *last_rx_bd = (*last_rx_bd + 1) % RX_BD_NUM; + + if (unlikely((info & FIRST_OR_LAST_MASK) != + FIRST_OR_LAST_MASK)) { + /* We pre-allocate buffers of MTU size so incoming + * packets won't be split/chained. + */ + if (net_ratelimit()) + netdev_err(ndev, "incomplete packet received\n"); + + /* Return ownership to EMAC */ + rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE); + stats->rx_errors++; + stats->rx_length_errors++; + continue; + } + + pktlen = info & LEN_MASK; + stats->rx_packets++; + stats->rx_bytes += pktlen; + skb = rx_buff->skb; + skb_put(skb, pktlen); + skb->dev = ndev; + skb->protocol = eth_type_trans(skb, ndev); + + dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr), + dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE); + + /* Prepare the BD for next cycle */ + rx_buff->skb = netdev_alloc_skb_ip_align(ndev, + EMAC_BUFFER_SIZE); + if (unlikely(!rx_buff->skb)) { + stats->rx_errors++; + /* Because receive_skb is below, increment rx_dropped */ + stats->rx_dropped++; + continue; + } + + /* receive_skb only if new skb was allocated to avoid holes */ + netif_receive_skb(skb); + + addr = dma_map_single(&ndev->dev, (void *)rx_buff->skb->data, + EMAC_BUFFER_SIZE, DMA_FROM_DEVICE); + if (dma_mapping_error(&ndev->dev, addr)) { + if (net_ratelimit()) + netdev_err(ndev, "cannot dma map\n"); + dev_kfree_skb(rx_buff->skb); + stats->rx_errors++; + continue; + } + dma_unmap_addr_set(rx_buff, addr, addr); + dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE); + + rxbd->data = cpu_to_le32(addr); + + /* Make sure pointer to data buffer is set */ + wmb(); + + /* Return ownership to EMAC */ + rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE); + } + + return work_done; +} + +/** + * arc_emac_poll - NAPI poll handler. + * @napi: Pointer to napi_struct structure. + * @budget: How many BDs to process on 1 call. + * + * returns: Number of processed BDs + */ +static int arc_emac_poll(struct napi_struct *napi, int budget) +{ + struct net_device *ndev = napi->dev; + struct arc_emac_priv *priv = netdev_priv(ndev); + unsigned int work_done; + + arc_emac_tx_clean(ndev); + + work_done = arc_emac_rx(ndev, budget); + if (work_done < budget) { + napi_complete(napi); + arc_reg_or(priv, R_ENABLE, RXINT_MASK); + } + + return work_done; +} + +/** + * arc_emac_intr - Global interrupt handler for EMAC. + * @irq: irq number. + * @dev_instance: device instance. + * + * returns: IRQ_HANDLED for all cases. + * + * ARC EMAC has only 1 interrupt line, and depending on bits raised in + * STATUS register we may tell what is a reason for interrupt to fire. + */ +static irqreturn_t arc_emac_intr(int irq, void *dev_instance) +{ + struct net_device *ndev = dev_instance; + struct arc_emac_priv *priv = netdev_priv(ndev); + struct net_device_stats *stats = &priv->stats; + unsigned int status; + + status = arc_reg_get(priv, R_STATUS); + status &= ~MDIO_MASK; + + /* Reset all flags except "MDIO complete" */ + arc_reg_set(priv, R_STATUS, status); + + if (status & RXINT_MASK) { + if (likely(napi_schedule_prep(&priv->napi))) { + arc_reg_clr(priv, R_ENABLE, RXINT_MASK); + __napi_schedule(&priv->napi); + } + } + + if (status & ERR_MASK) { + /* MSER/RXCR/RXFR/RXFL interrupt fires on corresponding + * 8-bit error counter overrun. + */ + + if (status & MSER_MASK) { + stats->rx_missed_errors += 0x100; + stats->rx_errors += 0x100; + } + + if (status & RXCR_MASK) { + stats->rx_crc_errors += 0x100; + stats->rx_errors += 0x100; + } + + if (status & RXFR_MASK) { + stats->rx_frame_errors += 0x100; + stats->rx_errors += 0x100; + } + + if (status & RXFL_MASK) { + stats->rx_over_errors += 0x100; + stats->rx_errors += 0x100; + } + } + + return IRQ_HANDLED; +} + +/** + * arc_emac_open - Open the network device. + * @ndev: Pointer to the network device. + * + * returns: 0, on success or non-zero error value on failure. + * + * This function sets the MAC address, requests and enables an IRQ + * for the EMAC device and starts the Tx queue. + * It also connects to the phy device. + */ +static int arc_emac_open(struct net_device *ndev) +{ + struct arc_emac_priv *priv = netdev_priv(ndev); + struct phy_device *phy_dev = priv->phy_dev; + int i; + + phy_dev->autoneg = AUTONEG_ENABLE; + phy_dev->speed = 0; + phy_dev->duplex = 0; + phy_dev->advertising = phy_dev->supported; + + if (priv->max_speed > 100) { + phy_dev->advertising &= PHY_GBIT_FEATURES; + } else if (priv->max_speed <= 100) { + phy_dev->advertising &= PHY_BASIC_FEATURES; + if (priv->max_speed <= 10) { + phy_dev->advertising &= ~SUPPORTED_100baseT_Half; + phy_dev->advertising &= ~SUPPORTED_100baseT_Full; + } + } + + priv->last_rx_bd = 0; + + /* Allocate and set buffers for Rx BD's */ + for (i = 0; i < RX_BD_NUM; i++) { + dma_addr_t addr; + unsigned int *last_rx_bd = &priv->last_rx_bd; + struct arc_emac_bd *rxbd = &priv->rxbd[*last_rx_bd]; + struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd]; + + rx_buff->skb = netdev_alloc_skb_ip_align(ndev, + EMAC_BUFFER_SIZE); + if (unlikely(!rx_buff->skb)) + return -ENOMEM; + + addr = dma_map_single(&ndev->dev, (void *)rx_buff->skb->data, + EMAC_BUFFER_SIZE, DMA_FROM_DEVICE); + if (dma_mapping_error(&ndev->dev, addr)) { + netdev_err(ndev, "cannot dma map\n"); + dev_kfree_skb(rx_buff->skb); + return -ENOMEM; + } + dma_unmap_addr_set(rx_buff, addr, addr); + dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE); + + rxbd->data = cpu_to_le32(addr); + + /* Make sure pointer to data buffer is set */ + wmb(); + + /* Return ownership to EMAC */ + rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE); + + *last_rx_bd = (*last_rx_bd + 1) % RX_BD_NUM; + } + + /* Clean Tx BD's */ + memset(priv->txbd, 0, TX_RING_SZ); + + /* Initialize logical address filter */ + arc_reg_set(priv, R_LAFL, 0); + arc_reg_set(priv, R_LAFH, 0); + + /* Set BD ring pointers for device side */ + arc_reg_set(priv, R_RX_RING, (unsigned int)priv->rxbd_dma); + arc_reg_set(priv, R_TX_RING, (unsigned int)priv->txbd_dma); + + /* Enable interrupts */ + arc_reg_set(priv, R_ENABLE, RXINT_MASK | ERR_MASK); + + /* Set CONTROL */ + arc_reg_set(priv, R_CTRL, + (RX_BD_NUM << 24) | /* RX BD table length */ + (TX_BD_NUM << 16) | /* TX BD table length */ + TXRN_MASK | RXRN_MASK); + + napi_enable(&priv->napi); + + /* Enable EMAC */ + arc_reg_or(priv, R_CTRL, EN_MASK); + + phy_start_aneg(priv->phy_dev); + + netif_start_queue(ndev); + + return 0; +} + +/** + * arc_emac_stop - Close the network device. + * @ndev: Pointer to the network device. + * + * This function stops the Tx queue, disables interrupts and frees the IRQ for + * the EMAC device. + * It also disconnects the PHY device associated with the EMAC device. + */ +static int arc_emac_stop(struct net_device *ndev) +{ + struct arc_emac_priv *priv = netdev_priv(ndev); + + napi_disable(&priv->napi); + netif_stop_queue(ndev); + + /* Disable interrupts */ + arc_reg_clr(priv, R_ENABLE, RXINT_MASK | ERR_MASK); + + /* Disable EMAC */ + arc_reg_clr(priv, R_CTRL, EN_MASK); + + return 0; +} + +/** + * arc_emac_stats - Get system network statistics. + * @ndev: Pointer to net_device structure. + * + * Returns the address of the device statistics structure. + * Statistics are updated in interrupt handler. + */ +static struct net_device_stats *arc_emac_stats(struct net_device *ndev) +{ + struct arc_emac_priv *priv = netdev_priv(ndev); + struct net_device_stats *stats = &priv->stats; + unsigned long miss, rxerr; + u8 rxcrc, rxfram, rxoflow; + + rxerr = arc_reg_get(priv, R_RXERR); + miss = arc_reg_get(priv, R_MISS); + + rxcrc = rxerr; + rxfram = rxerr >> 8; + rxoflow = rxerr >> 16; + + stats->rx_errors += miss; + stats->rx_errors += rxcrc + rxfram + rxoflow; + + stats->rx_over_errors += rxoflow; + stats->rx_frame_errors += rxfram; + stats->rx_crc_errors += rxcrc; + stats->rx_missed_errors += miss; + + return stats; +} + +/** + * arc_emac_tx - Starts the data transmission. + * @skb: sk_buff pointer that contains data to be Transmitted. + * @ndev: Pointer to net_device structure. + * + * returns: NETDEV_TX_OK, on success + * NETDEV_TX_BUSY, if any of the descriptors are not free. + * + * This function is invoked from upper layers to initiate transmission. + */ +static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev) +{ + struct arc_emac_priv *priv = netdev_priv(ndev); + unsigned int len, *txbd_curr = &priv->txbd_curr; + struct net_device_stats *stats = &priv->stats; + __le32 *info = &priv->txbd[*txbd_curr].info; + dma_addr_t addr; + + if (skb_padto(skb, ETH_ZLEN)) + return NETDEV_TX_OK; + + len = max_t(unsigned int, ETH_ZLEN, skb->len); + + /* EMAC still holds this buffer in its possession. + * CPU must not modify this buffer descriptor + */ + if (unlikely((le32_to_cpu(*info) & OWN_MASK) == FOR_EMAC)) { + netif_stop_queue(ndev); + return NETDEV_TX_BUSY; + } + + addr = dma_map_single(&ndev->dev, (void *)skb->data, len, + DMA_TO_DEVICE); + + if (unlikely(dma_mapping_error(&ndev->dev, addr))) { + stats->tx_dropped++; + stats->tx_errors++; + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } + dma_unmap_addr_set(&priv->tx_buff[*txbd_curr], addr, addr); + dma_unmap_len_set(&priv->tx_buff[*txbd_curr], len, len); + + priv->tx_buff[*txbd_curr].skb = skb; + priv->txbd[*txbd_curr].data = cpu_to_le32(addr); + + /* Make sure pointer to data buffer is set */ + wmb(); + + *info = cpu_to_le32(FOR_EMAC | FIRST_OR_LAST_MASK | len); + + /* Increment index to point to the next BD */ + *txbd_curr = (*txbd_curr + 1) % TX_BD_NUM; + + /* Get "info" of the next BD */ + info = &priv->txbd[*txbd_curr].info; + + /* Check if if Tx BD ring is full - next BD is still owned by EMAC */ + if (unlikely((le32_to_cpu(*info) & OWN_MASK) == FOR_EMAC)) + netif_stop_queue(ndev); + + arc_reg_set(priv, R_STATUS, TXPL_MASK); + + skb_tx_timestamp(skb); + + return NETDEV_TX_OK; +} + +/** + * arc_emac_set_address - Set the MAC address for this device. + * @ndev: Pointer to net_device structure. + * @p: 6 byte Address to be written as MAC address. + * + * This function copies the HW address from the sockaddr structure to the + * net_device structure and updates the address in HW. + * + * returns: -EBUSY if the net device is busy or 0 if the address is set + * successfully. + */ +static int arc_emac_set_address(struct net_device *ndev, void *p) +{ + struct arc_emac_priv *priv = netdev_priv(ndev); + struct sockaddr *addr = p; + unsigned int addr_low, addr_hi; + + if (netif_running(ndev)) + return -EBUSY; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); + + addr_low = le32_to_cpu(*(__le32 *) &ndev->dev_addr[0]); + addr_hi = le16_to_cpu(*(__le16 *) &ndev->dev_addr[4]); + + arc_reg_set(priv, R_ADDRL, addr_low); + arc_reg_set(priv, R_ADDRH, addr_hi); + + return 0; +} + +static const struct net_device_ops arc_emac_netdev_ops = { + .ndo_open = arc_emac_open, + .ndo_stop = arc_emac_stop, + .ndo_start_xmit = arc_emac_tx, + .ndo_set_mac_address = arc_emac_set_address, + .ndo_get_stats = arc_emac_stats, +}; + +static int arc_emac_probe(struct platform_device *pdev) +{ + struct resource res_regs, res_irq; + struct device_node *phy_node; + struct arc_emac_priv *priv; + struct net_device *ndev; + const char *mac_addr; + unsigned int id, clock_frequency; + int err; + + if (!pdev->dev.of_node) + return -ENODEV; + + /* Get PHY from device tree */ + phy_node = of_parse_phandle(pdev->dev.of_node, "phy", 0); + if (!phy_node) { + dev_err(&pdev->dev, "failed to retrieve phy description from device tree\n"); + return -ENODEV; + } + + /* Get EMAC registers base address from device tree */ + err = of_address_to_resource(pdev->dev.of_node, 0, &res_regs); + if (err) { + dev_err(&pdev->dev, "failed to retrieve registers base from device tree\n"); + return -ENODEV; + } + + /* Get CPU clock frequency from device tree */ + if (of_property_read_u32(pdev->dev.of_node, "clock-frequency", + &clock_frequency)) { + dev_err(&pdev->dev, "failed to retrieve <clock-frequency> from device tree\n"); + return -EINVAL; + } + + /* Get IRQ from device tree */ + err = of_irq_to_resource(pdev->dev.of_node, 0, &res_irq); + if (!err) { + dev_err(&pdev->dev, "failed to retrieve <irq> value from device tree\n"); + return -ENODEV; + } + + ndev = alloc_etherdev(sizeof(struct arc_emac_priv)); + if (!ndev) + return -ENOMEM; + + SET_NETDEV_DEV(ndev, &pdev->dev); + + ndev->netdev_ops = &arc_emac_netdev_ops; + ndev->ethtool_ops = &arc_emac_ethtool_ops; + ndev->watchdog_timeo = TX_TIMEOUT; + /* FIXME :: no multicast support yet */ + ndev->flags &= ~IFF_MULTICAST; + + priv = netdev_priv(ndev); + priv->dev = &pdev->dev; + priv->ndev = ndev; + + priv->regs = devm_ioremap_resource(&pdev->dev, &res_regs); + if (IS_ERR(priv->regs)) { + err = PTR_ERR(priv->regs); + goto out; + } + dev_dbg(&pdev->dev, "Registers base address is 0x%p\n", priv->regs); + + id = arc_reg_get(priv, R_ID); + + /* Check for EMAC revision 5 or 7, magic number */ + if (!(id == 0x0005fd02 || id == 0x0007fd02)) { + dev_err(&pdev->dev, "ARC EMAC not detected, id=0x%x\n", id); + err = -ENODEV; + goto out; + } + dev_info(&pdev->dev, "ARC EMAC detected with id: 0x%x\n", id); + + /* Set poll rate so that it polls every 1 ms */ + arc_reg_set(priv, R_POLLRATE, clock_frequency / 1000000); + + /* Get max speed of operation from device tree */ + if (of_property_read_u32(pdev->dev.of_node, "max-speed", + &priv->max_speed)) { + dev_err(&pdev->dev, "failed to retrieve <max-speed> from device tree\n"); + err = -EINVAL; + goto out; + } + + ndev->irq = res_irq.start; + dev_info(&pdev->dev, "IRQ is %d\n", ndev->irq); + + /* Register interrupt handler for device */ + err = devm_request_irq(&pdev->dev, ndev->irq, arc_emac_intr, 0, + ndev->name, ndev); + if (err) { + dev_err(&pdev->dev, "could not allocate IRQ\n"); + goto out; + } + + /* Get MAC address from device tree */ + mac_addr = of_get_mac_address(pdev->dev.of_node); + + if (!mac_addr || !is_valid_ether_addr(mac_addr)) + eth_hw_addr_random(ndev); + else + memcpy(ndev->dev_addr, mac_addr, ETH_ALEN); + + dev_info(&pdev->dev, "MAC address is now %pM\n", ndev->dev_addr); + + /* Do 1 allocation instead of 2 separate ones for Rx and Tx BD rings */ + priv->rxbd = dmam_alloc_coherent(&pdev->dev, RX_RING_SZ + TX_RING_SZ, + &priv->rxbd_dma, GFP_KERNEL); + + if (!priv->rxbd) { + dev_err(&pdev->dev, "failed to allocate data buffers\n"); + err = -ENOMEM; + goto out; + } + + priv->txbd = priv->rxbd + RX_BD_NUM; + + priv->txbd_dma = priv->rxbd_dma + RX_RING_SZ; + dev_dbg(&pdev->dev, "EMAC Device addr: Rx Ring [0x%x], Tx Ring[%x]\n", + (unsigned int)priv->rxbd_dma, (unsigned int)priv->txbd_dma); + + err = arc_mdio_probe(pdev, priv); + if (err) { + dev_err(&pdev->dev, "failed to probe MII bus\n"); + goto out; + } + + priv->phy_dev = of_phy_connect(ndev, phy_node, arc_emac_adjust_link, 0, + PHY_INTERFACE_MODE_MII); + if (!priv->phy_dev) { + dev_err(&pdev->dev, "of_phy_connect() failed\n"); + err = -ENODEV; + goto out; + } + + dev_info(&pdev->dev, "connected to %s phy with id 0x%x\n", + priv->phy_dev->drv->name, priv->phy_dev->phy_id); + + netif_napi_add(ndev, &priv->napi, arc_emac_poll, ARC_EMAC_NAPI_WEIGHT); + + err = register_netdev(ndev); + if (err) { + netif_napi_del(&priv->napi); + dev_err(&pdev->dev, "failed to register network device\n"); + goto out; + } + + return 0; + +out: + free_netdev(ndev); + return err; +} + +static int arc_emac_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct arc_emac_priv *priv = netdev_priv(ndev); + + phy_disconnect(priv->phy_dev); + priv->phy_dev = NULL; + arc_mdio_remove(priv); + unregister_netdev(ndev); + netif_napi_del(&priv->napi); + free_netdev(ndev); + + return 0; +} + +static const struct of_device_id arc_emac_dt_ids[] = { + { .compatible = "snps,arc-emac" }, + { /* Sentinel */ } +}; +MODULE_DEVICE_TABLE(of, arc_emac_dt_ids); + +static struct platform_driver arc_emac_driver = { + .probe = arc_emac_probe, + .remove = arc_emac_remove, + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + .of_match_table = arc_emac_dt_ids, + }, +}; + +module_platform_driver(arc_emac_driver); + +MODULE_AUTHOR("Alexey Brodkin <abrodkin@synopsys.com>"); +MODULE_DESCRIPTION("ARC EMAC driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/arc/emac_mdio.c b/drivers/net/ethernet/arc/emac_mdio.c new file mode 100644 index 000000000000..26ba2423f33a --- /dev/null +++ b/drivers/net/ethernet/arc/emac_mdio.c @@ -0,0 +1,152 @@ +/* + * Copyright (C) 2004-2013 Synopsys, Inc. (www.synopsys.com) + * + * MDIO implementation for ARC EMAC + */ + +#include <linux/delay.h> +#include <linux/of_mdio.h> +#include <linux/platform_device.h> + +#include "emac.h" + +/* Number of seconds we wait for "MDIO complete" flag to appear */ +#define ARC_MDIO_COMPLETE_POLL_COUNT 1 + +/** + * arc_mdio_complete_wait - Waits until MDIO transaction is completed. + * @priv: Pointer to ARC EMAC private data structure. + * + * returns: 0 on success, -ETIMEDOUT on a timeout. + */ +static int arc_mdio_complete_wait(struct arc_emac_priv *priv) +{ + unsigned int i; + + for (i = 0; i < ARC_MDIO_COMPLETE_POLL_COUNT * 40; i++) { + unsigned int status = arc_reg_get(priv, R_STATUS); + + status &= MDIO_MASK; + + if (status) { + /* Reset "MDIO complete" flag */ + arc_reg_set(priv, R_STATUS, status); + return 0; + } + + msleep(25); + } + + return -ETIMEDOUT; +} + +/** + * arc_mdio_read - MDIO interface read function. + * @bus: Pointer to MII bus structure. + * @phy_addr: Address of the PHY device. + * @reg_num: PHY register to read. + * + * returns: The register contents on success, -ETIMEDOUT on a timeout. + * + * Reads the contents of the requested register from the requested PHY + * address. + */ +static int arc_mdio_read(struct mii_bus *bus, int phy_addr, int reg_num) +{ + struct arc_emac_priv *priv = bus->priv; + unsigned int value; + int error; + + arc_reg_set(priv, R_MDIO, + 0x60020000 | (phy_addr << 23) | (reg_num << 18)); + + error = arc_mdio_complete_wait(priv); + if (error < 0) + return error; + + value = arc_reg_get(priv, R_MDIO) & 0xffff; + + dev_dbg(priv->dev, "arc_mdio_read(phy_addr=%i, reg_num=%x) = %x\n", + phy_addr, reg_num, value); + + return value; +} + +/** + * arc_mdio_write - MDIO interface write function. + * @bus: Pointer to MII bus structure. + * @phy_addr: Address of the PHY device. + * @reg_num: PHY register to write to. + * @value: Value to be written into the register. + * + * returns: 0 on success, -ETIMEDOUT on a timeout. + * + * Writes the value to the requested register. + */ +static int arc_mdio_write(struct mii_bus *bus, int phy_addr, + int reg_num, u16 value) +{ + struct arc_emac_priv *priv = bus->priv; + + dev_dbg(priv->dev, + "arc_mdio_write(phy_addr=%i, reg_num=%x, value=%x)\n", + phy_addr, reg_num, value); + + arc_reg_set(priv, R_MDIO, + 0x50020000 | (phy_addr << 23) | (reg_num << 18) | value); + + return arc_mdio_complete_wait(priv); +} + +/** + * arc_mdio_probe - MDIO probe function. + * @pdev: Pointer to platform device. + * @priv: Pointer to ARC EMAC private data structure. + * + * returns: 0 on success, -ENOMEM when mdiobus_alloc + * (to allocate memory for MII bus structure) fails. + * + * Sets up and registers the MDIO interface. + */ +int arc_mdio_probe(struct platform_device *pdev, struct arc_emac_priv *priv) +{ + struct mii_bus *bus; + int error; + + bus = mdiobus_alloc(); + if (!bus) + return -ENOMEM; + + priv->bus = bus; + bus->priv = priv; + bus->parent = priv->dev; + bus->name = "Synopsys MII Bus", + bus->read = &arc_mdio_read; + bus->write = &arc_mdio_write; + + snprintf(bus->id, MII_BUS_ID_SIZE, "%s", pdev->name); + + error = of_mdiobus_register(bus, pdev->dev.of_node); + if (error) { + dev_err(priv->dev, "cannot register MDIO bus %s\n", bus->name); + mdiobus_free(bus); + return error; + } + + return 0; +} + +/** + * arc_mdio_remove - MDIO remove function. + * @priv: Pointer to ARC EMAC private data structure. + * + * Unregisters the MDIO and frees any associate memory for MII bus. + */ +int arc_mdio_remove(struct arc_emac_priv *priv) +{ + mdiobus_unregister(priv->bus); + mdiobus_free(priv->bus); + priv->bus = NULL; + + return 0; +} diff --git a/drivers/net/ethernet/atheros/Kconfig b/drivers/net/ethernet/atheros/Kconfig index ad6aa1e98348..58ad37c733bc 100644 --- a/drivers/net/ethernet/atheros/Kconfig +++ b/drivers/net/ethernet/atheros/Kconfig @@ -22,7 +22,6 @@ config ATL2 tristate "Atheros L2 Fast Ethernet support" depends on PCI select CRC32 - select NET_CORE select MII ---help--- This driver supports the Atheros L2 fast ethernet adapter. @@ -34,7 +33,6 @@ config ATL1 tristate "Atheros/Attansic L1 Gigabit Ethernet support" depends on PCI select CRC32 - select NET_CORE select MII ---help--- This driver supports the Atheros/Attansic L1 gigabit ethernet @@ -47,7 +45,6 @@ config ATL1E tristate "Atheros L1E Gigabit Ethernet support" depends on PCI select CRC32 - select NET_CORE select MII ---help--- This driver supports the Atheros L1E gigabit ethernet adapter. @@ -59,7 +56,6 @@ config ATL1C tristate "Atheros L1C Gigabit Ethernet support" depends on PCI select CRC32 - select NET_CORE select MII ---help--- This driver supports the Atheros L1C gigabit ethernet adapter. @@ -71,7 +67,6 @@ config ALX tristate "Qualcomm Atheros AR816x/AR817x support" depends on PCI select CRC32 - select NET_CORE select MDIO help This driver supports the Qualcomm Atheros L1F ethernet adapter, diff --git a/drivers/net/ethernet/atheros/alx/alx.h b/drivers/net/ethernet/atheros/alx/alx.h index 50b3ae2b143d..d71103dbf2cd 100644 --- a/drivers/net/ethernet/atheros/alx/alx.h +++ b/drivers/net/ethernet/atheros/alx/alx.h @@ -85,16 +85,16 @@ struct alx_priv { struct { dma_addr_t dma; void *virt; - int size; + unsigned int size; } descmem; /* protect int_mask updates */ spinlock_t irq_lock; u32 int_mask; - int tx_ringsz; - int rx_ringsz; - int rxbuf_size; + unsigned int tx_ringsz; + unsigned int rx_ringsz; + unsigned int rxbuf_size; struct napi_struct napi; struct alx_tx_queue txq; diff --git a/drivers/net/ethernet/atheros/alx/ethtool.c b/drivers/net/ethernet/atheros/alx/ethtool.c index 6fa2aec2bc81..45b36507abc1 100644 --- a/drivers/net/ethernet/atheros/alx/ethtool.c +++ b/drivers/net/ethernet/atheros/alx/ethtool.c @@ -46,21 +46,37 @@ #include "reg.h" #include "hw.h" +static u32 alx_get_supported_speeds(struct alx_hw *hw) +{ + u32 supported = SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full; + + if (alx_hw_giga(hw)) + supported |= SUPPORTED_1000baseT_Full; + + BUILD_BUG_ON(SUPPORTED_10baseT_Half != ADVERTISED_10baseT_Half); + BUILD_BUG_ON(SUPPORTED_10baseT_Full != ADVERTISED_10baseT_Full); + BUILD_BUG_ON(SUPPORTED_100baseT_Half != ADVERTISED_100baseT_Half); + BUILD_BUG_ON(SUPPORTED_100baseT_Full != ADVERTISED_100baseT_Full); + BUILD_BUG_ON(SUPPORTED_1000baseT_Full != ADVERTISED_1000baseT_Full); + + return supported; +} static int alx_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) { struct alx_priv *alx = netdev_priv(netdev); struct alx_hw *hw = &alx->hw; - ecmd->supported = SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full | - SUPPORTED_100baseT_Half | - SUPPORTED_100baseT_Full | - SUPPORTED_Autoneg | + ecmd->supported = SUPPORTED_Autoneg | SUPPORTED_TP | - SUPPORTED_Pause; + SUPPORTED_Pause | + SUPPORTED_Asym_Pause; if (alx_hw_giga(hw)) ecmd->supported |= SUPPORTED_1000baseT_Full; + ecmd->supported |= alx_get_supported_speeds(hw); ecmd->advertising = ADVERTISED_TP; if (hw->adv_cfg & ADVERTISED_Autoneg) @@ -68,6 +84,7 @@ static int alx_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) ecmd->port = PORT_TP; ecmd->phy_address = 0; + if (hw->adv_cfg & ADVERTISED_Autoneg) ecmd->autoneg = AUTONEG_ENABLE; else @@ -85,14 +102,8 @@ static int alx_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) } } - if (hw->link_speed != SPEED_UNKNOWN) { - ethtool_cmd_speed_set(ecmd, - hw->link_speed - hw->link_speed % 10); - ecmd->duplex = hw->link_speed % 10; - } else { - ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); - ecmd->duplex = DUPLEX_UNKNOWN; - } + ethtool_cmd_speed_set(ecmd, hw->link_speed); + ecmd->duplex = hw->duplex; return 0; } @@ -106,28 +117,15 @@ static int alx_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) ASSERT_RTNL(); if (ecmd->autoneg == AUTONEG_ENABLE) { - if (ecmd->advertising & ADVERTISED_1000baseT_Half) + if (ecmd->advertising & ~alx_get_supported_speeds(hw)) return -EINVAL; adv_cfg = ecmd->advertising | ADVERTISED_Autoneg; } else { - int speed = ethtool_cmd_speed(ecmd); - - switch (speed + ecmd->duplex) { - case SPEED_10 + DUPLEX_HALF: - adv_cfg = ADVERTISED_10baseT_Half; - break; - case SPEED_10 + DUPLEX_FULL: - adv_cfg = ADVERTISED_10baseT_Full; - break; - case SPEED_100 + DUPLEX_HALF: - adv_cfg = ADVERTISED_100baseT_Half; - break; - case SPEED_100 + DUPLEX_FULL: - adv_cfg = ADVERTISED_100baseT_Full; - break; - default: + adv_cfg = alx_speed_to_ethadv(ethtool_cmd_speed(ecmd), + ecmd->duplex); + + if (!adv_cfg || adv_cfg == ADVERTISED_1000baseT_Full) return -EINVAL; - } } hw->adv_cfg = adv_cfg; @@ -140,21 +138,10 @@ static void alx_get_pauseparam(struct net_device *netdev, struct alx_priv *alx = netdev_priv(netdev); struct alx_hw *hw = &alx->hw; - if (hw->flowctrl & ALX_FC_ANEG && - hw->adv_cfg & ADVERTISED_Autoneg) - pause->autoneg = AUTONEG_ENABLE; - else - pause->autoneg = AUTONEG_DISABLE; - - if (hw->flowctrl & ALX_FC_TX) - pause->tx_pause = 1; - else - pause->tx_pause = 0; - - if (hw->flowctrl & ALX_FC_RX) - pause->rx_pause = 1; - else - pause->rx_pause = 0; + pause->autoneg = !!(hw->flowctrl & ALX_FC_ANEG && + hw->adv_cfg & ADVERTISED_Autoneg); + pause->tx_pause = !!(hw->flowctrl & ALX_FC_TX); + pause->rx_pause = !!(hw->flowctrl & ALX_FC_RX); } @@ -187,7 +174,8 @@ static int alx_set_pauseparam(struct net_device *netdev, if (reconfig_phy) { err = alx_setup_speed_duplex(hw, hw->adv_cfg, fc); - return err; + if (err) + return err; } /* flow control on mac */ @@ -213,60 +201,12 @@ static void alx_set_msglevel(struct net_device *netdev, u32 data) alx->msg_enable = data; } -static void alx_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) -{ - struct alx_priv *alx = netdev_priv(netdev); - struct alx_hw *hw = &alx->hw; - - wol->supported = WAKE_MAGIC | WAKE_PHY; - wol->wolopts = 0; - - if (hw->sleep_ctrl & ALX_SLEEP_WOL_MAGIC) - wol->wolopts |= WAKE_MAGIC; - if (hw->sleep_ctrl & ALX_SLEEP_WOL_PHY) - wol->wolopts |= WAKE_PHY; -} - -static int alx_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) -{ - struct alx_priv *alx = netdev_priv(netdev); - struct alx_hw *hw = &alx->hw; - - if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE | - WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)) - return -EOPNOTSUPP; - - hw->sleep_ctrl = 0; - - if (wol->wolopts & WAKE_MAGIC) - hw->sleep_ctrl |= ALX_SLEEP_WOL_MAGIC; - if (wol->wolopts & WAKE_PHY) - hw->sleep_ctrl |= ALX_SLEEP_WOL_PHY; - - device_set_wakeup_enable(&alx->hw.pdev->dev, hw->sleep_ctrl); - - return 0; -} - -static void alx_get_drvinfo(struct net_device *netdev, - struct ethtool_drvinfo *drvinfo) -{ - struct alx_priv *alx = netdev_priv(netdev); - - strlcpy(drvinfo->driver, alx_drv_name, sizeof(drvinfo->driver)); - strlcpy(drvinfo->bus_info, pci_name(alx->hw.pdev), - sizeof(drvinfo->bus_info)); -} - const struct ethtool_ops alx_ethtool_ops = { .get_settings = alx_get_settings, .set_settings = alx_set_settings, .get_pauseparam = alx_get_pauseparam, .set_pauseparam = alx_set_pauseparam, - .get_drvinfo = alx_get_drvinfo, .get_msglevel = alx_get_msglevel, .set_msglevel = alx_set_msglevel, - .get_wol = alx_get_wol, - .set_wol = alx_set_wol, .get_link = ethtool_op_get_link, }; diff --git a/drivers/net/ethernet/atheros/alx/hw.c b/drivers/net/ethernet/atheros/alx/hw.c index 220a16ad0e49..1e8c24a3cb4e 100644 --- a/drivers/net/ethernet/atheros/alx/hw.c +++ b/drivers/net/ethernet/atheros/alx/hw.c @@ -282,8 +282,8 @@ static bool alx_read_macaddr(struct alx_hw *hw, u8 *addr) mac1 = alx_read_mem32(hw, ALX_STAD1); /* addr should be big-endian */ - *(__be32 *)(addr + 2) = cpu_to_be32(mac0); - *(__be16 *)addr = cpu_to_be16(mac1); + put_unaligned(cpu_to_be32(mac0), (__be32 *)(addr + 2)); + put_unaligned(cpu_to_be16(mac1), (__be16 *)addr); return is_valid_ether_addr(addr); } @@ -326,22 +326,12 @@ void alx_set_macaddr(struct alx_hw *hw, const u8 *addr) u32 val; /* for example: 00-0B-6A-F6-00-DC * STAD0=6AF600DC, STAD1=000B */ - val = be32_to_cpu(*(__be32 *)(addr + 2)); + val = be32_to_cpu(get_unaligned((__be32 *)(addr + 2))); alx_write_mem32(hw, ALX_STAD0, val); - val = be16_to_cpu(*(__be16 *)addr); + val = be16_to_cpu(get_unaligned((__be16 *)addr)); alx_write_mem32(hw, ALX_STAD1, val); } -static void alx_enable_osc(struct alx_hw *hw) -{ - u32 val; - - /* rising edge */ - val = alx_read_mem32(hw, ALX_MISC); - alx_write_mem32(hw, ALX_MISC, val & ~ALX_MISC_INTNLOSC_OPEN); - alx_write_mem32(hw, ALX_MISC, val | ALX_MISC_INTNLOSC_OPEN); -} - static void alx_reset_osc(struct alx_hw *hw, u8 rev) { u32 val, val2; @@ -624,12 +614,12 @@ void alx_start_mac(struct alx_hw *hw) alx_write_mem32(hw, ALX_TXQ0, txq | ALX_TXQ0_EN); mac = hw->rx_ctrl; - if (hw->link_speed % 10 == DUPLEX_FULL) + if (hw->duplex == DUPLEX_FULL) mac |= ALX_MAC_CTRL_FULLD; else mac &= ~ALX_MAC_CTRL_FULLD; ALX_SET_FIELD(mac, ALX_MAC_CTRL_SPEED, - hw->link_speed >= SPEED_1000 ? ALX_MAC_CTRL_SPEED_1000 : + hw->link_speed == SPEED_1000 ? ALX_MAC_CTRL_SPEED_1000 : ALX_MAC_CTRL_SPEED_10_100); mac |= ALX_MAC_CTRL_TX_EN | ALX_MAC_CTRL_RX_EN; hw->rx_ctrl = mac; @@ -790,28 +780,22 @@ void alx_post_phy_link(struct alx_hw *hw) u16 phy_val, len, agc; u8 revid = alx_hw_revision(hw); bool adj_th = revid == ALX_REV_B0; - int speed; - - if (hw->link_speed == SPEED_UNKNOWN) - speed = SPEED_UNKNOWN; - else - speed = hw->link_speed - hw->link_speed % 10; if (revid != ALX_REV_B0 && !alx_is_rev_a(revid)) return; /* 1000BT/AZ, wrong cable length */ - if (speed != SPEED_UNKNOWN) { + if (hw->link_speed != SPEED_UNKNOWN) { alx_read_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL6, &phy_val); len = ALX_GET_FIELD(phy_val, ALX_CLDCTRL6_CAB_LEN); alx_read_phy_dbg(hw, ALX_MIIDBG_AGC, &phy_val); agc = ALX_GET_FIELD(phy_val, ALX_AGC_2_VGA); - if ((speed == SPEED_1000 && + if ((hw->link_speed == SPEED_1000 && (len > ALX_CLDCTRL6_CAB_LEN_SHORT1G || (len == 0 && agc > ALX_AGC_LONG1G_LIMT))) || - (speed == SPEED_100 && + (hw->link_speed == SPEED_100 && (len > ALX_CLDCTRL6_CAB_LEN_SHORT100M || (len == 0 && agc > ALX_AGC_LONG100M_LIMT)))) { alx_write_phy_dbg(hw, ALX_MIIDBG_AZ_ANADECT, @@ -831,10 +815,10 @@ void alx_post_phy_link(struct alx_hw *hw) /* threshold adjust */ if (adj_th && hw->lnk_patch) { - if (speed == SPEED_100) { + if (hw->link_speed == SPEED_100) { alx_write_phy_dbg(hw, ALX_MIIDBG_MSE16DB, ALX_MSE16DB_UP); - } else if (speed == SPEED_1000) { + } else if (hw->link_speed == SPEED_1000) { /* * Giga link threshold, raise the tolerance of * noise 50% @@ -864,66 +848,6 @@ void alx_post_phy_link(struct alx_hw *hw) } } - -/* NOTE: - * 1. phy link must be established before calling this function - * 2. wol option (pattern,magic,link,etc.) is configed before call it. - */ -int alx_pre_suspend(struct alx_hw *hw, int speed) -{ - u32 master, mac, phy, val; - int err = 0; - - master = alx_read_mem32(hw, ALX_MASTER); - master &= ~ALX_MASTER_PCLKSEL_SRDS; - mac = hw->rx_ctrl; - /* 10/100 half */ - ALX_SET_FIELD(mac, ALX_MAC_CTRL_SPEED, ALX_MAC_CTRL_SPEED_10_100); - mac &= ~(ALX_MAC_CTRL_FULLD | ALX_MAC_CTRL_RX_EN | ALX_MAC_CTRL_TX_EN); - - phy = alx_read_mem32(hw, ALX_PHY_CTRL); - phy &= ~(ALX_PHY_CTRL_DSPRST_OUT | ALX_PHY_CTRL_CLS); - phy |= ALX_PHY_CTRL_RST_ANALOG | ALX_PHY_CTRL_HIB_PULSE | - ALX_PHY_CTRL_HIB_EN; - - /* without any activity */ - if (!(hw->sleep_ctrl & ALX_SLEEP_ACTIVE)) { - err = alx_write_phy_reg(hw, ALX_MII_IER, 0); - if (err) - return err; - phy |= ALX_PHY_CTRL_IDDQ | ALX_PHY_CTRL_POWER_DOWN; - } else { - if (hw->sleep_ctrl & (ALX_SLEEP_WOL_MAGIC | ALX_SLEEP_CIFS)) - mac |= ALX_MAC_CTRL_RX_EN | ALX_MAC_CTRL_BRD_EN; - if (hw->sleep_ctrl & ALX_SLEEP_CIFS) - mac |= ALX_MAC_CTRL_TX_EN; - if (speed % 10 == DUPLEX_FULL) - mac |= ALX_MAC_CTRL_FULLD; - if (speed >= SPEED_1000) - ALX_SET_FIELD(mac, ALX_MAC_CTRL_SPEED, - ALX_MAC_CTRL_SPEED_1000); - phy |= ALX_PHY_CTRL_DSPRST_OUT; - err = alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, - ALX_MIIEXT_S3DIG10, - ALX_MIIEXT_S3DIG10_SL); - if (err) - return err; - } - - alx_enable_osc(hw); - hw->rx_ctrl = mac; - alx_write_mem32(hw, ALX_MASTER, master); - alx_write_mem32(hw, ALX_MAC_CTRL, mac); - alx_write_mem32(hw, ALX_PHY_CTRL, phy); - - /* set val of PDLL D3PLLOFF */ - val = alx_read_mem32(hw, ALX_PDLL_TRNS1); - val |= ALX_PDLL_TRNS1_D3PLLOFF_EN; - alx_write_mem32(hw, ALX_PDLL_TRNS1, val); - - return 0; -} - bool alx_phy_configured(struct alx_hw *hw) { u32 cfg, hw_cfg; @@ -938,7 +862,7 @@ bool alx_phy_configured(struct alx_hw *hw) return cfg == hw_cfg; } -int alx_get_phy_link(struct alx_hw *hw, int *speed) +int alx_read_phy_link(struct alx_hw *hw) { struct pci_dev *pdev = hw->pdev; u16 bmsr, giga; @@ -953,7 +877,8 @@ int alx_get_phy_link(struct alx_hw *hw, int *speed) return err; if (!(bmsr & BMSR_LSTATUS)) { - *speed = SPEED_UNKNOWN; + hw->link_speed = SPEED_UNKNOWN; + hw->duplex = DUPLEX_UNKNOWN; return 0; } @@ -967,20 +892,20 @@ int alx_get_phy_link(struct alx_hw *hw, int *speed) switch (giga & ALX_GIGA_PSSR_SPEED) { case ALX_GIGA_PSSR_1000MBS: - *speed = SPEED_1000; + hw->link_speed = SPEED_1000; break; case ALX_GIGA_PSSR_100MBS: - *speed = SPEED_100; + hw->link_speed = SPEED_100; break; case ALX_GIGA_PSSR_10MBS: - *speed = SPEED_10; + hw->link_speed = SPEED_10; break; default: goto wrong_speed; } - *speed += (giga & ALX_GIGA_PSSR_DPLX) ? DUPLEX_FULL : DUPLEX_HALF; - return 1; + hw->duplex = (giga & ALX_GIGA_PSSR_DPLX) ? DUPLEX_FULL : DUPLEX_HALF; + return 0; wrong_speed: dev_err(&pdev->dev, "invalid PHY speed/duplex: 0x%x\n", giga); @@ -995,26 +920,6 @@ int alx_clear_phy_intr(struct alx_hw *hw) return alx_read_phy_reg(hw, ALX_MII_ISR, &isr); } -int alx_config_wol(struct alx_hw *hw) -{ - u32 wol = 0; - int err = 0; - - /* turn on magic packet event */ - if (hw->sleep_ctrl & ALX_SLEEP_WOL_MAGIC) - wol |= ALX_WOL0_MAGIC_EN | ALX_WOL0_PME_MAGIC_EN; - - /* turn on link up event */ - if (hw->sleep_ctrl & ALX_SLEEP_WOL_PHY) { - wol |= ALX_WOL0_LINK_EN | ALX_WOL0_PME_LINK; - /* only link up can wake up */ - err = alx_write_phy_reg(hw, ALX_MII_IER, ALX_IER_LINK_UP); - } - alx_write_mem32(hw, ALX_WOL0, wol); - - return err; -} - void alx_disable_rss(struct alx_hw *hw) { u32 ctrl = alx_read_mem32(hw, ALX_RXQ0); @@ -1126,85 +1031,6 @@ void alx_configure_basic(struct alx_hw *hw) alx_write_mem32(hw, ALX_WRR, val); } -static inline u32 alx_speed_to_ethadv(int speed) -{ - switch (speed) { - case SPEED_1000 + DUPLEX_FULL: - return ADVERTISED_1000baseT_Full; - case SPEED_100 + DUPLEX_FULL: - return ADVERTISED_100baseT_Full; - case SPEED_100 + DUPLEX_HALF: - return ADVERTISED_10baseT_Half; - case SPEED_10 + DUPLEX_FULL: - return ADVERTISED_10baseT_Full; - case SPEED_10 + DUPLEX_HALF: - return ADVERTISED_10baseT_Half; - default: - return 0; - } -} - -int alx_select_powersaving_speed(struct alx_hw *hw, int *speed) -{ - int i, err, spd; - u16 lpa; - - err = alx_get_phy_link(hw, &spd); - if (err < 0) - return err; - - if (spd == SPEED_UNKNOWN) - return 0; - - err = alx_read_phy_reg(hw, MII_LPA, &lpa); - if (err) - return err; - - if (!(lpa & LPA_LPACK)) { - *speed = spd; - return 0; - } - - if (lpa & LPA_10FULL) - *speed = SPEED_10 + DUPLEX_FULL; - else if (lpa & LPA_10HALF) - *speed = SPEED_10 + DUPLEX_HALF; - else if (lpa & LPA_100FULL) - *speed = SPEED_100 + DUPLEX_FULL; - else - *speed = SPEED_100 + DUPLEX_HALF; - - if (*speed != spd) { - err = alx_write_phy_reg(hw, ALX_MII_IER, 0); - if (err) - return err; - err = alx_setup_speed_duplex(hw, - alx_speed_to_ethadv(*speed) | - ADVERTISED_Autoneg, - ALX_FC_ANEG | ALX_FC_RX | - ALX_FC_TX); - if (err) - return err; - - /* wait for linkup */ - for (i = 0; i < ALX_MAX_SETUP_LNK_CYCLE; i++) { - int speed2; - - msleep(100); - - err = alx_get_phy_link(hw, &speed2); - if (err < 0) - return err; - if (speed2 != SPEED_UNKNOWN) - break; - } - if (i == ALX_MAX_SETUP_LNK_CYCLE) - return -ETIMEDOUT; - } - - return 0; -} - bool alx_get_phy_info(struct alx_hw *hw) { u16 devs1, devs2; diff --git a/drivers/net/ethernet/atheros/alx/hw.h b/drivers/net/ethernet/atheros/alx/hw.h index 65e723d2172a..96f3b4381e17 100644 --- a/drivers/net/ethernet/atheros/alx/hw.h +++ b/drivers/net/ethernet/atheros/alx/hw.h @@ -412,12 +412,11 @@ struct alx_hw { u32 smb_timer; /* SPEED_* + DUPLEX_*, SPEED_UNKNOWN if link is down */ int link_speed; + u8 duplex; /* auto-neg advertisement or force mode config */ - u32 adv_cfg; u8 flowctrl; - - u32 sleep_ctrl; + u32 adv_cfg; spinlock_t mdio_lock; struct mdio_if_info mdio; @@ -478,14 +477,12 @@ void alx_reset_pcie(struct alx_hw *hw); void alx_enable_aspm(struct alx_hw *hw, bool l0s_en, bool l1_en); int alx_setup_speed_duplex(struct alx_hw *hw, u32 ethadv, u8 flowctrl); void alx_post_phy_link(struct alx_hw *hw); -int alx_pre_suspend(struct alx_hw *hw, int speed); int alx_read_phy_reg(struct alx_hw *hw, u16 reg, u16 *phy_data); int alx_write_phy_reg(struct alx_hw *hw, u16 reg, u16 phy_data); int alx_read_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 *pdata); int alx_write_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 data); -int alx_get_phy_link(struct alx_hw *hw, int *speed); +int alx_read_phy_link(struct alx_hw *hw); int alx_clear_phy_intr(struct alx_hw *hw); -int alx_config_wol(struct alx_hw *hw); void alx_cfg_mac_flowcontrol(struct alx_hw *hw, u8 fc); void alx_start_mac(struct alx_hw *hw); int alx_reset_mac(struct alx_hw *hw); @@ -493,7 +490,21 @@ void alx_set_macaddr(struct alx_hw *hw, const u8 *addr); bool alx_phy_configured(struct alx_hw *hw); void alx_configure_basic(struct alx_hw *hw); void alx_disable_rss(struct alx_hw *hw); -int alx_select_powersaving_speed(struct alx_hw *hw, int *speed); bool alx_get_phy_info(struct alx_hw *hw); +static inline u32 alx_speed_to_ethadv(int speed, u8 duplex) +{ + if (speed == SPEED_1000 && duplex == DUPLEX_FULL) + return ADVERTISED_1000baseT_Full; + if (speed == SPEED_100 && duplex == DUPLEX_FULL) + return ADVERTISED_100baseT_Full; + if (speed == SPEED_100 && duplex== DUPLEX_HALF) + return ADVERTISED_100baseT_Half; + if (speed == SPEED_10 && duplex == DUPLEX_FULL) + return ADVERTISED_10baseT_Full; + if (speed == SPEED_10 && duplex == DUPLEX_HALF) + return ADVERTISED_10baseT_Half; + return 0; +} + #endif diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index 418de8b13165..027398ebbba6 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -706,12 +706,12 @@ static int alx_init_sw(struct alx_priv *alx) alx->rxbuf_size = ALIGN(ALX_RAW_MTU(hw->mtu), 8); alx->tx_ringsz = 256; alx->rx_ringsz = 512; - hw->sleep_ctrl = ALX_SLEEP_WOL_MAGIC | ALX_SLEEP_WOL_PHY; hw->imt = 200; alx->int_mask = ALX_ISR_MISC; hw->dma_chnl = hw->max_dma_chnl; hw->ith_tpd = alx->tx_ringsz / 3; hw->link_speed = SPEED_UNKNOWN; + hw->duplex = DUPLEX_UNKNOWN; hw->adv_cfg = ADVERTISED_Autoneg | ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | @@ -758,6 +758,7 @@ static void alx_halt(struct alx_priv *alx) alx_netif_stop(alx); hw->link_speed = SPEED_UNKNOWN; + hw->duplex = DUPLEX_UNKNOWN; alx_reset_mac(hw); @@ -869,18 +870,18 @@ static void __alx_stop(struct alx_priv *alx) alx_free_rings(alx); } -static const char *alx_speed_desc(u16 speed) +static const char *alx_speed_desc(struct alx_hw *hw) { - switch (speed) { - case SPEED_1000 + DUPLEX_FULL: + switch (alx_speed_to_ethadv(hw->link_speed, hw->duplex)) { + case ADVERTISED_1000baseT_Full: return "1 Gbps Full"; - case SPEED_100 + DUPLEX_FULL: + case ADVERTISED_100baseT_Full: return "100 Mbps Full"; - case SPEED_100 + DUPLEX_HALF: + case ADVERTISED_100baseT_Half: return "100 Mbps Half"; - case SPEED_10 + DUPLEX_FULL: + case ADVERTISED_10baseT_Full: return "10 Mbps Full"; - case SPEED_10 + DUPLEX_HALF: + case ADVERTISED_10baseT_Half: return "10 Mbps Half"; default: return "Unknown speed"; @@ -891,7 +892,8 @@ static void alx_check_link(struct alx_priv *alx) { struct alx_hw *hw = &alx->hw; unsigned long flags; - int speed, old_speed; + int old_speed; + u8 old_duplex; int err; /* clear PHY internal interrupt status, otherwise the main @@ -899,7 +901,9 @@ static void alx_check_link(struct alx_priv *alx) */ alx_clear_phy_intr(hw); - err = alx_get_phy_link(hw, &speed); + old_speed = hw->link_speed; + old_duplex = hw->duplex; + err = alx_read_phy_link(hw); if (err < 0) goto reset; @@ -908,15 +912,12 @@ static void alx_check_link(struct alx_priv *alx) alx_write_mem32(hw, ALX_IMR, alx->int_mask); spin_unlock_irqrestore(&alx->irq_lock, flags); - old_speed = hw->link_speed; - - if (old_speed == speed) + if (old_speed == hw->link_speed) return; - hw->link_speed = speed; - if (speed != SPEED_UNKNOWN) { + if (hw->link_speed != SPEED_UNKNOWN) { netif_info(alx, link, alx->dev, - "NIC Up: %s\n", alx_speed_desc(speed)); + "NIC Up: %s\n", alx_speed_desc(hw)); alx_post_phy_link(hw); alx_enable_aspm(hw, true, true); alx_start_mac(hw); @@ -959,65 +960,6 @@ static int alx_stop(struct net_device *netdev) return 0; } -static int __alx_shutdown(struct pci_dev *pdev, bool *wol_en) -{ - struct alx_priv *alx = pci_get_drvdata(pdev); - struct net_device *netdev = alx->dev; - struct alx_hw *hw = &alx->hw; - int err, speed; - - netif_device_detach(netdev); - - if (netif_running(netdev)) - __alx_stop(alx); - -#ifdef CONFIG_PM_SLEEP - err = pci_save_state(pdev); - if (err) - return err; -#endif - - err = alx_select_powersaving_speed(hw, &speed); - if (err) - return err; - err = alx_clear_phy_intr(hw); - if (err) - return err; - err = alx_pre_suspend(hw, speed); - if (err) - return err; - err = alx_config_wol(hw); - if (err) - return err; - - *wol_en = false; - if (hw->sleep_ctrl & ALX_SLEEP_ACTIVE) { - netif_info(alx, wol, netdev, - "wol: ctrl=%X, speed=%X\n", - hw->sleep_ctrl, speed); - device_set_wakeup_enable(&pdev->dev, true); - *wol_en = true; - } - - pci_disable_device(pdev); - - return 0; -} - -static void alx_shutdown(struct pci_dev *pdev) -{ - int err; - bool wol_en; - - err = __alx_shutdown(pdev, &wol_en); - if (!err) { - pci_wake_from_d3(pdev, wol_en); - pci_set_power_state(pdev, PCI_D3hot); - } else { - dev_err(&pdev->dev, "shutdown fail %d\n", err); - } -} - static void alx_link_check(struct work_struct *work) { struct alx_priv *alx; @@ -1303,6 +1245,8 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) SET_NETDEV_DEV(netdev, &pdev->dev); alx = netdev_priv(netdev); + spin_lock_init(&alx->hw.mdio_lock); + spin_lock_init(&alx->irq_lock); alx->dev = netdev; alx->hw.pdev = pdev; alx->msg_enable = NETIF_MSG_LINK | NETIF_MSG_HW | NETIF_MSG_IFUP | @@ -1385,9 +1329,6 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) INIT_WORK(&alx->link_check_wk, alx_link_check); INIT_WORK(&alx->reset_wk, alx_reset); - spin_lock_init(&alx->hw.mdio_lock); - spin_lock_init(&alx->irq_lock); - netif_carrier_off(netdev); err = register_netdev(netdev); @@ -1396,8 +1337,6 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto out_unmap; } - device_set_wakeup_enable(&pdev->dev, hw->sleep_ctrl); - netdev_info(netdev, "Qualcomm Atheros AR816x/AR817x Ethernet [%pM]\n", netdev->dev_addr); @@ -1442,22 +1381,12 @@ static void alx_remove(struct pci_dev *pdev) static int alx_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); - int err; - bool wol_en; - - err = __alx_shutdown(pdev, &wol_en); - if (err) { - dev_err(&pdev->dev, "shutdown fail in suspend %d\n", err); - return err; - } - - if (wol_en) { - pci_prepare_to_sleep(pdev); - } else { - pci_wake_from_d3(pdev, false); - pci_set_power_state(pdev, PCI_D3hot); - } + struct alx_priv *alx = pci_get_drvdata(pdev); + if (!netif_running(alx->dev)) + return 0; + netif_device_detach(alx->dev); + __alx_stop(alx); return 0; } @@ -1465,49 +1394,20 @@ static int alx_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct alx_priv *alx = pci_get_drvdata(pdev); - struct net_device *netdev = alx->dev; - struct alx_hw *hw = &alx->hw; - int err; - - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - pci_save_state(pdev); - - pci_enable_wake(pdev, PCI_D3hot, 0); - pci_enable_wake(pdev, PCI_D3cold, 0); - - hw->link_speed = SPEED_UNKNOWN; - alx->int_mask = ALX_ISR_MISC; - - alx_reset_pcie(hw); - alx_reset_phy(hw); - - err = alx_reset_mac(hw); - if (err) { - netif_err(alx, hw, alx->dev, - "resume:reset_mac fail %d\n", err); - return -EIO; - } - err = alx_setup_speed_duplex(hw, hw->adv_cfg, hw->flowctrl); - if (err) { - netif_err(alx, hw, alx->dev, - "resume:setup_speed_duplex fail %d\n", err); - return -EIO; - } - - if (netif_running(netdev)) { - err = __alx_open(alx, true); - if (err) - return err; - } - - netif_device_attach(netdev); - - return err; + if (!netif_running(alx->dev)) + return 0; + netif_device_attach(alx->dev); + return __alx_open(alx, true); } + +static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume); +#define ALX_PM_OPS (&alx_pm_ops) +#else +#define ALX_PM_OPS NULL #endif + static pci_ers_result_t alx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { @@ -1550,8 +1450,6 @@ static pci_ers_result_t alx_pci_error_slot_reset(struct pci_dev *pdev) } pci_set_master(pdev); - pci_enable_wake(pdev, PCI_D3hot, 0); - pci_enable_wake(pdev, PCI_D3cold, 0); alx_reset_pcie(hw); if (!alx_reset_mac(hw)) @@ -1587,13 +1485,6 @@ static const struct pci_error_handlers alx_err_handlers = { .resume = alx_pci_error_resume, }; -#ifdef CONFIG_PM_SLEEP -static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume); -#define ALX_PM_OPS (&alx_pm_ops) -#else -#define ALX_PM_OPS NULL -#endif - static DEFINE_PCI_DEVICE_TABLE(alx_pci_tbl) = { { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8161), .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, @@ -1611,7 +1502,6 @@ static struct pci_driver alx_driver = { .id_table = alx_pci_tbl, .probe = alx_probe, .remove = alx_remove, - .shutdown = alx_shutdown, .err_handler = &alx_err_handlers, .driver.pm = ALX_PM_OPS, }; diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index 0ba900762b13..786a87483298 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c @@ -2755,27 +2755,4 @@ static struct pci_driver atl1c_driver = { .driver.pm = &atl1c_pm_ops, }; -/** - * atl1c_init_module - Driver Registration Routine - * - * atl1c_init_module is the first routine called when the driver is - * loaded. All it does is register with the PCI subsystem. - */ -static int __init atl1c_init_module(void) -{ - return pci_register_driver(&atl1c_driver); -} - -/** - * atl1c_exit_module - Driver Exit Cleanup Routine - * - * atl1c_exit_module is called just before the driver is removed - * from memory. - */ -static void __exit atl1c_exit_module(void) -{ - pci_unregister_driver(&atl1c_driver); -} - -module_init(atl1c_init_module); -module_exit(atl1c_exit_module); +module_pci_driver(atl1c_driver); diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c index 0688bb82b442..1966444590f6 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c @@ -1665,8 +1665,8 @@ check_sum: return 0; } -static void atl1e_tx_map(struct atl1e_adapter *adapter, - struct sk_buff *skb, struct atl1e_tpd_desc *tpd) +static int atl1e_tx_map(struct atl1e_adapter *adapter, + struct sk_buff *skb, struct atl1e_tpd_desc *tpd) { struct atl1e_tpd_desc *use_tpd = NULL; struct atl1e_tx_buffer *tx_buffer = NULL; @@ -1677,6 +1677,8 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter, u16 nr_frags; u16 f; int segment; + int ring_start = adapter->tx_ring.next_to_use; + int ring_end; nr_frags = skb_shinfo(skb)->nr_frags; segment = (tpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK; @@ -1689,6 +1691,9 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter, tx_buffer->length = map_len; tx_buffer->dma = pci_map_single(adapter->pdev, skb->data, hdr_len, PCI_DMA_TODEVICE); + if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma)) + return -ENOSPC; + ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_SINGLE); mapped_len += map_len; use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma); @@ -1715,6 +1720,22 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter, tx_buffer->dma = pci_map_single(adapter->pdev, skb->data + mapped_len, map_len, PCI_DMA_TODEVICE); + + if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma)) { + /* We need to unwind the mappings we've done */ + ring_end = adapter->tx_ring.next_to_use; + adapter->tx_ring.next_to_use = ring_start; + while (adapter->tx_ring.next_to_use != ring_end) { + tpd = atl1e_get_tpd(adapter); + tx_buffer = atl1e_get_tx_buffer(adapter, tpd); + pci_unmap_single(adapter->pdev, tx_buffer->dma, + tx_buffer->length, PCI_DMA_TODEVICE); + } + /* Reset the tx rings next pointer */ + adapter->tx_ring.next_to_use = ring_start; + return -ENOSPC; + } + ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_SINGLE); mapped_len += map_len; use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma); @@ -1750,6 +1771,23 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter, (i * MAX_TX_BUF_LEN), tx_buffer->length, DMA_TO_DEVICE); + + if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma)) { + /* We need to unwind the mappings we've done */ + ring_end = adapter->tx_ring.next_to_use; + adapter->tx_ring.next_to_use = ring_start; + while (adapter->tx_ring.next_to_use != ring_end) { + tpd = atl1e_get_tpd(adapter); + tx_buffer = atl1e_get_tx_buffer(adapter, tpd); + dma_unmap_page(&adapter->pdev->dev, tx_buffer->dma, + tx_buffer->length, DMA_TO_DEVICE); + } + + /* Reset the ring next to use pointer */ + adapter->tx_ring.next_to_use = ring_start; + return -ENOSPC; + } + ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_PAGE); use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma); use_tpd->word2 = (use_tpd->word2 & (~TPD_BUFLEN_MASK)) | @@ -1767,6 +1805,7 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter, /* The last buffer info contain the skb address, so it will be free after unmap */ tx_buffer->skb = skb; + return 0; } static void atl1e_tx_queue(struct atl1e_adapter *adapter, u16 count, @@ -1834,10 +1873,15 @@ static netdev_tx_t atl1e_xmit_frame(struct sk_buff *skb, return NETDEV_TX_OK; } - atl1e_tx_map(adapter, skb, tpd); + if (atl1e_tx_map(adapter, skb, tpd)) { + dev_kfree_skb_any(skb); + goto out; + } + atl1e_tx_queue(adapter, tpd_req, tpd); netdev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */ +out: spin_unlock_irqrestore(&adapter->tx_lock, flags); return NETDEV_TX_OK; } @@ -2489,27 +2533,4 @@ static struct pci_driver atl1e_driver = { .err_handler = &atl1e_err_handler }; -/** - * atl1e_init_module - Driver Registration Routine - * - * atl1e_init_module is the first routine called when the driver is - * loaded. All it does is register with the PCI subsystem. - */ -static int __init atl1e_init_module(void) -{ - return pci_register_driver(&atl1e_driver); -} - -/** - * atl1e_exit_module - Driver Exit Cleanup Routine - * - * atl1e_exit_module is called just before the driver is removed - * from memory. - */ -static void __exit atl1e_exit_module(void) -{ - pci_unregister_driver(&atl1e_driver); -} - -module_init(atl1e_init_module); -module_exit(atl1e_exit_module); +module_pci_driver(atl1e_driver); diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c index fa0915f3999b..538211d6f7d9 100644 --- a/drivers/net/ethernet/atheros/atlx/atl1.c +++ b/drivers/net/ethernet/atheros/atlx/atl1.c @@ -3145,31 +3145,6 @@ static struct pci_driver atl1_driver = { .driver.pm = &atl1_pm_ops, }; -/** - * atl1_exit_module - Driver Exit Cleanup Routine - * - * atl1_exit_module is called just before the driver is removed - * from memory. - */ -static void __exit atl1_exit_module(void) -{ - pci_unregister_driver(&atl1_driver); -} - -/** - * atl1_init_module - Driver Registration Routine - * - * atl1_init_module is the first routine called when the driver is - * loaded. All it does is register with the PCI subsystem. - */ -static int __init atl1_init_module(void) -{ - return pci_register_driver(&atl1_driver); -} - -module_init(atl1_init_module); -module_exit(atl1_exit_module); - struct atl1_stats { char stat_string[ETH_GSTRING_LEN]; int sizeof_stat; @@ -3705,3 +3680,5 @@ static const struct ethtool_ops atl1_ethtool_ops = { .get_ethtool_stats = atl1_get_ethtool_stats, .get_sset_count = atl1_get_sset_count, }; + +module_pci_driver(atl1_driver); diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index 3e69b3f88099..52c96036dcc4 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -22,7 +22,6 @@ config B44 tristate "Broadcom 440x/47xx ethernet support" depends on SSB_POSSIBLE && HAS_DMA select SSB - select NET_CORE select MII ---help--- If you have a network (Ethernet) controller of this type, say Y @@ -54,7 +53,6 @@ config B44_PCI config BCM63XX_ENET tristate "Broadcom 63xx internal mac support" depends on BCM63XX - select NET_CORE select MII select PHYLIB help @@ -133,6 +131,7 @@ config BNX2X_SRIOV config BGMAC tristate "BCMA bus GBit core support" depends on BCMA_HOST_SOC && HAS_DMA + select PHYLIB ---help--- This driver supports GBit MAC and BCM4706 GBit MAC cores on BCMA bus. They can be found on BCM47xx SoCs and provide gigabit ethernet. diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index 0b3e23ec37f7..b1bcd4ba4744 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -41,8 +41,8 @@ static int copybreak __read_mostly = 128; module_param(copybreak, int, 0); MODULE_PARM_DESC(copybreak, "Receive copy threshold"); -/* io memory shared between all devices */ -static void __iomem *bcm_enet_shared_base; +/* io registers memory shared between all devices */ +static void __iomem *bcm_enet_shared_base[3]; /* * io helpers to access mac registers @@ -59,17 +59,76 @@ static inline void enet_writel(struct bcm_enet_priv *priv, } /* - * io helpers to access shared registers + * io helpers to access switch registers */ +static inline u32 enetsw_readl(struct bcm_enet_priv *priv, u32 off) +{ + return bcm_readl(priv->base + off); +} + +static inline void enetsw_writel(struct bcm_enet_priv *priv, + u32 val, u32 off) +{ + bcm_writel(val, priv->base + off); +} + +static inline u16 enetsw_readw(struct bcm_enet_priv *priv, u32 off) +{ + return bcm_readw(priv->base + off); +} + +static inline void enetsw_writew(struct bcm_enet_priv *priv, + u16 val, u32 off) +{ + bcm_writew(val, priv->base + off); +} + +static inline u8 enetsw_readb(struct bcm_enet_priv *priv, u32 off) +{ + return bcm_readb(priv->base + off); +} + +static inline void enetsw_writeb(struct bcm_enet_priv *priv, + u8 val, u32 off) +{ + bcm_writeb(val, priv->base + off); +} + + +/* io helpers to access shared registers */ static inline u32 enet_dma_readl(struct bcm_enet_priv *priv, u32 off) { - return bcm_readl(bcm_enet_shared_base + off); + return bcm_readl(bcm_enet_shared_base[0] + off); } static inline void enet_dma_writel(struct bcm_enet_priv *priv, u32 val, u32 off) { - bcm_writel(val, bcm_enet_shared_base + off); + bcm_writel(val, bcm_enet_shared_base[0] + off); +} + +static inline u32 enet_dmac_readl(struct bcm_enet_priv *priv, u32 off, int chan) +{ + return bcm_readl(bcm_enet_shared_base[1] + + bcm63xx_enetdmacreg(off) + chan * priv->dma_chan_width); +} + +static inline void enet_dmac_writel(struct bcm_enet_priv *priv, + u32 val, u32 off, int chan) +{ + bcm_writel(val, bcm_enet_shared_base[1] + + bcm63xx_enetdmacreg(off) + chan * priv->dma_chan_width); +} + +static inline u32 enet_dmas_readl(struct bcm_enet_priv *priv, u32 off, int chan) +{ + return bcm_readl(bcm_enet_shared_base[2] + off + chan * priv->dma_chan_width); +} + +static inline void enet_dmas_writel(struct bcm_enet_priv *priv, + u32 val, u32 off, int chan) +{ + bcm_writel(val, bcm_enet_shared_base[2] + off + chan * priv->dma_chan_width); } /* @@ -196,7 +255,6 @@ static int bcm_enet_refill_rx(struct net_device *dev) if (!skb) break; priv->rx_skb[desc_idx] = skb; - p = dma_map_single(&priv->pdev->dev, skb->data, priv->rx_skb_size, DMA_FROM_DEVICE); @@ -206,7 +264,7 @@ static int bcm_enet_refill_rx(struct net_device *dev) len_stat = priv->rx_skb_size << DMADESC_LENGTH_SHIFT; len_stat |= DMADESC_OWNER_MASK; if (priv->rx_dirty_desc == priv->rx_ring_size - 1) { - len_stat |= DMADESC_WRAP_MASK; + len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift); priv->rx_dirty_desc = 0; } else { priv->rx_dirty_desc++; @@ -217,7 +275,10 @@ static int bcm_enet_refill_rx(struct net_device *dev) priv->rx_desc_count++; /* tell dma engine we allocated one buffer */ - enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan)); + if (priv->dma_has_sram) + enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan)); + else + enet_dmac_writel(priv, 1, ENETDMAC_BUFALLOC, priv->rx_chan); } /* If rx ring is still empty, set a timer to try allocating @@ -293,13 +354,15 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget) /* if the packet does not have start of packet _and_ * end of packet flag set, then just recycle it */ - if ((len_stat & DMADESC_ESOP_MASK) != DMADESC_ESOP_MASK) { + if ((len_stat & (DMADESC_ESOP_MASK >> priv->dma_desc_shift)) != + (DMADESC_ESOP_MASK >> priv->dma_desc_shift)) { dev->stats.rx_dropped++; continue; } /* recycle packet if it's marked as bad */ - if (unlikely(len_stat & DMADESC_ERR_MASK)) { + if (!priv->enet_is_sw && + unlikely(len_stat & DMADESC_ERR_MASK)) { dev->stats.rx_errors++; if (len_stat & DMADESC_OVSIZE_MASK) @@ -353,8 +416,8 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget) bcm_enet_refill_rx(dev); /* kick rx dma */ - enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK, - ENETDMA_CHANCFG_REG(priv->rx_chan)); + enet_dmac_writel(priv, priv->dma_chan_en_mask, + ENETDMAC_CHANCFG, priv->rx_chan); } return processed; @@ -429,10 +492,10 @@ static int bcm_enet_poll(struct napi_struct *napi, int budget) dev = priv->net_dev; /* ack interrupts */ - enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, - ENETDMA_IR_REG(priv->rx_chan)); - enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, - ENETDMA_IR_REG(priv->tx_chan)); + enet_dmac_writel(priv, priv->dma_chan_int_mask, + ENETDMAC_IR, priv->rx_chan); + enet_dmac_writel(priv, priv->dma_chan_int_mask, + ENETDMAC_IR, priv->tx_chan); /* reclaim sent skb */ tx_work_done = bcm_enet_tx_reclaim(dev, 0); @@ -451,10 +514,10 @@ static int bcm_enet_poll(struct napi_struct *napi, int budget) napi_complete(napi); /* restore rx/tx interrupt */ - enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, - ENETDMA_IRMASK_REG(priv->rx_chan)); - enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, - ENETDMA_IRMASK_REG(priv->tx_chan)); + enet_dmac_writel(priv, priv->dma_chan_int_mask, + ENETDMAC_IRMASK, priv->rx_chan); + enet_dmac_writel(priv, priv->dma_chan_int_mask, + ENETDMAC_IRMASK, priv->tx_chan); return rx_work_done; } @@ -497,8 +560,8 @@ static irqreturn_t bcm_enet_isr_dma(int irq, void *dev_id) priv = netdev_priv(dev); /* mask rx/tx interrupts */ - enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan)); - enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan)); + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan); + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan); napi_schedule(&priv->napi); @@ -530,6 +593,26 @@ static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) goto out_unlock; } + /* pad small packets sent on a switch device */ + if (priv->enet_is_sw && skb->len < 64) { + int needed = 64 - skb->len; + char *data; + + if (unlikely(skb_tailroom(skb) < needed)) { + struct sk_buff *nskb; + + nskb = skb_copy_expand(skb, 0, needed, GFP_ATOMIC); + if (!nskb) { + ret = NETDEV_TX_BUSY; + goto out_unlock; + } + dev_kfree_skb(skb); + skb = nskb; + } + data = skb_put(skb, needed); + memset(data, 0, needed); + } + /* point to the next available desc */ desc = &priv->tx_desc_cpu[priv->tx_curr_desc]; priv->tx_skb[priv->tx_curr_desc] = skb; @@ -539,14 +622,14 @@ static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) DMA_TO_DEVICE); len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK; - len_stat |= DMADESC_ESOP_MASK | + len_stat |= (DMADESC_ESOP_MASK >> priv->dma_desc_shift) | DMADESC_APPEND_CRC | DMADESC_OWNER_MASK; priv->tx_curr_desc++; if (priv->tx_curr_desc == priv->tx_ring_size) { priv->tx_curr_desc = 0; - len_stat |= DMADESC_WRAP_MASK; + len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift); } priv->tx_desc_count--; @@ -557,8 +640,8 @@ static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) wmb(); /* kick tx dma */ - enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK, - ENETDMA_CHANCFG_REG(priv->tx_chan)); + enet_dmac_writel(priv, priv->dma_chan_en_mask, + ENETDMAC_CHANCFG, priv->tx_chan); /* stop queue if no more desc available */ if (!priv->tx_desc_count) @@ -686,6 +769,9 @@ static void bcm_enet_set_flow(struct bcm_enet_priv *priv, int rx_en, int tx_en) val &= ~ENET_RXCFG_ENFLOW_MASK; enet_writel(priv, val, ENET_RXCFG_REG); + if (!priv->dma_has_sram) + return; + /* tx flow control (pause frame generation) */ val = enet_dma_readl(priv, ENETDMA_CFG_REG); if (tx_en) @@ -833,8 +919,8 @@ static int bcm_enet_open(struct net_device *dev) /* mask all interrupts and request them */ enet_writel(priv, 0, ENET_IRMASK_REG); - enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan)); - enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan)); + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan); + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan); ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev); if (ret) @@ -909,8 +995,12 @@ static int bcm_enet_open(struct net_device *dev) priv->rx_curr_desc = 0; /* initialize flow control buffer allocation */ - enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0, - ENETDMA_BUFALLOC_REG(priv->rx_chan)); + if (priv->dma_has_sram) + enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0, + ENETDMA_BUFALLOC_REG(priv->rx_chan)); + else + enet_dmac_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0, + ENETDMAC_BUFALLOC, priv->rx_chan); if (bcm_enet_refill_rx(dev)) { dev_err(kdev, "cannot allocate rx skb queue\n"); @@ -919,37 +1009,55 @@ static int bcm_enet_open(struct net_device *dev) } /* write rx & tx ring addresses */ - enet_dma_writel(priv, priv->rx_desc_dma, - ENETDMA_RSTART_REG(priv->rx_chan)); - enet_dma_writel(priv, priv->tx_desc_dma, - ENETDMA_RSTART_REG(priv->tx_chan)); + if (priv->dma_has_sram) { + enet_dmas_writel(priv, priv->rx_desc_dma, + ENETDMAS_RSTART_REG, priv->rx_chan); + enet_dmas_writel(priv, priv->tx_desc_dma, + ENETDMAS_RSTART_REG, priv->tx_chan); + } else { + enet_dmac_writel(priv, priv->rx_desc_dma, + ENETDMAC_RSTART, priv->rx_chan); + enet_dmac_writel(priv, priv->tx_desc_dma, + ENETDMAC_RSTART, priv->tx_chan); + } /* clear remaining state ram for rx & tx channel */ - enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->rx_chan)); - enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->tx_chan)); - enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->rx_chan)); - enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->tx_chan)); - enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->rx_chan)); - enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->tx_chan)); + if (priv->dma_has_sram) { + enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->rx_chan); + enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->tx_chan); + enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->rx_chan); + enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->tx_chan); + enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->rx_chan); + enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->tx_chan); + } else { + enet_dmac_writel(priv, 0, ENETDMAC_FC, priv->rx_chan); + enet_dmac_writel(priv, 0, ENETDMAC_FC, priv->tx_chan); + } /* set max rx/tx length */ enet_writel(priv, priv->hw_mtu, ENET_RXMAXLEN_REG); enet_writel(priv, priv->hw_mtu, ENET_TXMAXLEN_REG); /* set dma maximum burst len */ - enet_dma_writel(priv, BCMENET_DMA_MAXBURST, - ENETDMA_MAXBURST_REG(priv->rx_chan)); - enet_dma_writel(priv, BCMENET_DMA_MAXBURST, - ENETDMA_MAXBURST_REG(priv->tx_chan)); + enet_dmac_writel(priv, priv->dma_maxburst, + ENETDMAC_MAXBURST, priv->rx_chan); + enet_dmac_writel(priv, priv->dma_maxburst, + ENETDMAC_MAXBURST, priv->tx_chan); /* set correct transmit fifo watermark */ enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG); /* set flow control low/high threshold to 1/3 / 2/3 */ - val = priv->rx_ring_size / 3; - enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan)); - val = (priv->rx_ring_size * 2) / 3; - enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan)); + if (priv->dma_has_sram) { + val = priv->rx_ring_size / 3; + enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan)); + val = (priv->rx_ring_size * 2) / 3; + enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan)); + } else { + enet_dmac_writel(priv, 5, ENETDMAC_FC, priv->rx_chan); + enet_dmac_writel(priv, priv->rx_ring_size, ENETDMAC_LEN, priv->rx_chan); + enet_dmac_writel(priv, priv->tx_ring_size, ENETDMAC_LEN, priv->tx_chan); + } /* all set, enable mac and interrupts, start dma engine and * kick rx dma channel */ @@ -958,26 +1066,26 @@ static int bcm_enet_open(struct net_device *dev) val |= ENET_CTL_ENABLE_MASK; enet_writel(priv, val, ENET_CTL_REG); enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG); - enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK, - ENETDMA_CHANCFG_REG(priv->rx_chan)); + enet_dmac_writel(priv, priv->dma_chan_en_mask, + ENETDMAC_CHANCFG, priv->rx_chan); /* watch "mib counters about to overflow" interrupt */ enet_writel(priv, ENET_IR_MIB, ENET_IR_REG); enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG); /* watch "packet transferred" interrupt in rx and tx */ - enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, - ENETDMA_IR_REG(priv->rx_chan)); - enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, - ENETDMA_IR_REG(priv->tx_chan)); + enet_dmac_writel(priv, priv->dma_chan_int_mask, + ENETDMAC_IR, priv->rx_chan); + enet_dmac_writel(priv, priv->dma_chan_int_mask, + ENETDMAC_IR, priv->tx_chan); /* make sure we enable napi before rx interrupt */ napi_enable(&priv->napi); - enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, - ENETDMA_IRMASK_REG(priv->rx_chan)); - enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, - ENETDMA_IRMASK_REG(priv->tx_chan)); + enet_dmac_writel(priv, priv->dma_chan_int_mask, + ENETDMAC_IRMASK, priv->rx_chan); + enet_dmac_writel(priv, priv->dma_chan_int_mask, + ENETDMAC_IRMASK, priv->tx_chan); if (priv->has_phy) phy_start(priv->phydev); @@ -1057,14 +1165,14 @@ static void bcm_enet_disable_dma(struct bcm_enet_priv *priv, int chan) { int limit; - enet_dma_writel(priv, 0, ENETDMA_CHANCFG_REG(chan)); + enet_dmac_writel(priv, 0, ENETDMAC_CHANCFG, chan); limit = 1000; do { u32 val; - val = enet_dma_readl(priv, ENETDMA_CHANCFG_REG(chan)); - if (!(val & ENETDMA_CHANCFG_EN_MASK)) + val = enet_dmac_readl(priv, ENETDMAC_CHANCFG, chan); + if (!(val & ENETDMAC_CHANCFG_EN_MASK)) break; udelay(1); } while (limit--); @@ -1090,8 +1198,8 @@ static int bcm_enet_stop(struct net_device *dev) /* mask all interrupts */ enet_writel(priv, 0, ENET_IRMASK_REG); - enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan)); - enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan)); + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan); + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan); /* make sure no mib update is scheduled */ cancel_work_sync(&priv->mib_update_task); @@ -1328,6 +1436,20 @@ static void bcm_enet_get_ethtool_stats(struct net_device *netdev, mutex_unlock(&priv->mib_update_lock); } +static int bcm_enet_nway_reset(struct net_device *dev) +{ + struct bcm_enet_priv *priv; + + priv = netdev_priv(dev); + if (priv->has_phy) { + if (!priv->phydev) + return -ENODEV; + return genphy_restart_aneg(priv->phydev); + } + + return -EOPNOTSUPP; +} + static int bcm_enet_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { @@ -1470,6 +1592,7 @@ static const struct ethtool_ops bcm_enet_ethtool_ops = { .get_strings = bcm_enet_get_strings, .get_sset_count = bcm_enet_get_sset_count, .get_ethtool_stats = bcm_enet_get_ethtool_stats, + .nway_reset = bcm_enet_nway_reset, .get_settings = bcm_enet_get_settings, .set_settings = bcm_enet_set_settings, .get_drvinfo = bcm_enet_get_drvinfo, @@ -1530,7 +1653,7 @@ static int compute_hw_mtu(struct bcm_enet_priv *priv, int mtu) * it's appended */ priv->rx_skb_size = ALIGN(actual_mtu + ETH_FCS_LEN, - BCMENET_DMA_MAXBURST * 4); + priv->dma_maxburst * 4); return 0; } @@ -1621,7 +1744,7 @@ static int bcm_enet_probe(struct platform_device *pdev) /* stop if shared driver failed, assume driver->probe will be * called in the same order we register devices (correct ?) */ - if (!bcm_enet_shared_base) + if (!bcm_enet_shared_base[0]) return -ENODEV; res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -1637,6 +1760,9 @@ static int bcm_enet_probe(struct platform_device *pdev) return -ENOMEM; priv = netdev_priv(dev); + priv->enet_is_sw = false; + priv->dma_maxburst = BCMENET_DMA_MAXBURST; + ret = compute_hw_mtu(priv, dev->mtu); if (ret) goto out; @@ -1687,6 +1813,11 @@ static int bcm_enet_probe(struct platform_device *pdev) priv->pause_tx = pd->pause_tx; priv->force_duplex_full = pd->force_duplex_full; priv->force_speed_100 = pd->force_speed_100; + priv->dma_chan_en_mask = pd->dma_chan_en_mask; + priv->dma_chan_int_mask = pd->dma_chan_int_mask; + priv->dma_chan_width = pd->dma_chan_width; + priv->dma_has_sram = pd->dma_has_sram; + priv->dma_desc_shift = pd->dma_desc_shift; } if (priv->mac_id == 0 && priv->has_phy && !priv->use_external_mii) { @@ -1847,7 +1978,6 @@ static int bcm_enet_remove(struct platform_device *pdev) clk_disable_unprepare(priv->mac_clk); clk_put(priv->mac_clk); - platform_set_drvdata(pdev, NULL); free_netdev(dev); return 0; } @@ -1862,19 +1992,881 @@ struct platform_driver bcm63xx_enet_driver = { }; /* - * reserve & remap memory space shared between all macs + * switch mii access callbacks */ -static int bcm_enet_shared_probe(struct platform_device *pdev) +static int bcmenet_sw_mdio_read(struct bcm_enet_priv *priv, + int ext, int phy_id, int location) { - struct resource *res; + u32 reg; + int ret; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) + spin_lock_bh(&priv->enetsw_mdio_lock); + enetsw_writel(priv, 0, ENETSW_MDIOC_REG); + + reg = ENETSW_MDIOC_RD_MASK | + (phy_id << ENETSW_MDIOC_PHYID_SHIFT) | + (location << ENETSW_MDIOC_REG_SHIFT); + + if (ext) + reg |= ENETSW_MDIOC_EXT_MASK; + + enetsw_writel(priv, reg, ENETSW_MDIOC_REG); + udelay(50); + ret = enetsw_readw(priv, ENETSW_MDIOD_REG); + spin_unlock_bh(&priv->enetsw_mdio_lock); + return ret; +} + +static void bcmenet_sw_mdio_write(struct bcm_enet_priv *priv, + int ext, int phy_id, int location, + uint16_t data) +{ + u32 reg; + + spin_lock_bh(&priv->enetsw_mdio_lock); + enetsw_writel(priv, 0, ENETSW_MDIOC_REG); + + reg = ENETSW_MDIOC_WR_MASK | + (phy_id << ENETSW_MDIOC_PHYID_SHIFT) | + (location << ENETSW_MDIOC_REG_SHIFT); + + if (ext) + reg |= ENETSW_MDIOC_EXT_MASK; + + reg |= data; + + enetsw_writel(priv, reg, ENETSW_MDIOC_REG); + udelay(50); + spin_unlock_bh(&priv->enetsw_mdio_lock); +} + +static inline int bcm_enet_port_is_rgmii(int portid) +{ + return portid >= ENETSW_RGMII_PORT0; +} + +/* + * enet sw PHY polling + */ +static void swphy_poll_timer(unsigned long data) +{ + struct bcm_enet_priv *priv = (struct bcm_enet_priv *)data; + unsigned int i; + + for (i = 0; i < priv->num_ports; i++) { + struct bcm63xx_enetsw_port *port; + int val, j, up, advertise, lpa, lpa2, speed, duplex, media; + int external_phy = bcm_enet_port_is_rgmii(i); + u8 override; + + port = &priv->used_ports[i]; + if (!port->used) + continue; + + if (port->bypass_link) + continue; + + /* dummy read to clear */ + for (j = 0; j < 2; j++) + val = bcmenet_sw_mdio_read(priv, external_phy, + port->phy_id, MII_BMSR); + + if (val == 0xffff) + continue; + + up = (val & BMSR_LSTATUS) ? 1 : 0; + if (!(up ^ priv->sw_port_link[i])) + continue; + + priv->sw_port_link[i] = up; + + /* link changed */ + if (!up) { + dev_info(&priv->pdev->dev, "link DOWN on %s\n", + port->name); + enetsw_writeb(priv, ENETSW_PORTOV_ENABLE_MASK, + ENETSW_PORTOV_REG(i)); + enetsw_writeb(priv, ENETSW_PTCTRL_RXDIS_MASK | + ENETSW_PTCTRL_TXDIS_MASK, + ENETSW_PTCTRL_REG(i)); + continue; + } + + advertise = bcmenet_sw_mdio_read(priv, external_phy, + port->phy_id, MII_ADVERTISE); + + lpa = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id, + MII_LPA); + + lpa2 = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id, + MII_STAT1000); + + /* figure out media and duplex from advertise and LPA values */ + media = mii_nway_result(lpa & advertise); + duplex = (media & ADVERTISE_FULL) ? 1 : 0; + if (lpa2 & LPA_1000FULL) + duplex = 1; + + if (lpa2 & (LPA_1000FULL | LPA_1000HALF)) + speed = 1000; + else { + if (media & (ADVERTISE_100FULL | ADVERTISE_100HALF)) + speed = 100; + else + speed = 10; + } + + dev_info(&priv->pdev->dev, + "link UP on %s, %dMbps, %s-duplex\n", + port->name, speed, duplex ? "full" : "half"); + + override = ENETSW_PORTOV_ENABLE_MASK | + ENETSW_PORTOV_LINKUP_MASK; + + if (speed == 1000) + override |= ENETSW_IMPOV_1000_MASK; + else if (speed == 100) + override |= ENETSW_IMPOV_100_MASK; + if (duplex) + override |= ENETSW_IMPOV_FDX_MASK; + + enetsw_writeb(priv, override, ENETSW_PORTOV_REG(i)); + enetsw_writeb(priv, 0, ENETSW_PTCTRL_REG(i)); + } + + priv->swphy_poll.expires = jiffies + HZ; + add_timer(&priv->swphy_poll); +} + +/* + * open callback, allocate dma rings & buffers and start rx operation + */ +static int bcm_enetsw_open(struct net_device *dev) +{ + struct bcm_enet_priv *priv; + struct device *kdev; + int i, ret; + unsigned int size; + void *p; + u32 val; + + priv = netdev_priv(dev); + kdev = &priv->pdev->dev; + + /* mask all interrupts and request them */ + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan); + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan); + + ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, + IRQF_DISABLED, dev->name, dev); + if (ret) + goto out_freeirq; + + if (priv->irq_tx != -1) { + ret = request_irq(priv->irq_tx, bcm_enet_isr_dma, + IRQF_DISABLED, dev->name, dev); + if (ret) + goto out_freeirq_rx; + } + + /* allocate rx dma ring */ + size = priv->rx_ring_size * sizeof(struct bcm_enet_desc); + p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL); + if (!p) { + dev_err(kdev, "cannot allocate rx ring %u\n", size); + ret = -ENOMEM; + goto out_freeirq_tx; + } + + memset(p, 0, size); + priv->rx_desc_alloc_size = size; + priv->rx_desc_cpu = p; + + /* allocate tx dma ring */ + size = priv->tx_ring_size * sizeof(struct bcm_enet_desc); + p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL); + if (!p) { + dev_err(kdev, "cannot allocate tx ring\n"); + ret = -ENOMEM; + goto out_free_rx_ring; + } + + memset(p, 0, size); + priv->tx_desc_alloc_size = size; + priv->tx_desc_cpu = p; + + priv->tx_skb = kzalloc(sizeof(struct sk_buff *) * priv->tx_ring_size, + GFP_KERNEL); + if (!priv->tx_skb) { + dev_err(kdev, "cannot allocate rx skb queue\n"); + ret = -ENOMEM; + goto out_free_tx_ring; + } + + priv->tx_desc_count = priv->tx_ring_size; + priv->tx_dirty_desc = 0; + priv->tx_curr_desc = 0; + spin_lock_init(&priv->tx_lock); + + /* init & fill rx ring with skbs */ + priv->rx_skb = kzalloc(sizeof(struct sk_buff *) * priv->rx_ring_size, + GFP_KERNEL); + if (!priv->rx_skb) { + dev_err(kdev, "cannot allocate rx skb queue\n"); + ret = -ENOMEM; + goto out_free_tx_skb; + } + + priv->rx_desc_count = 0; + priv->rx_dirty_desc = 0; + priv->rx_curr_desc = 0; + + /* disable all ports */ + for (i = 0; i < priv->num_ports; i++) { + enetsw_writeb(priv, ENETSW_PORTOV_ENABLE_MASK, + ENETSW_PORTOV_REG(i)); + enetsw_writeb(priv, ENETSW_PTCTRL_RXDIS_MASK | + ENETSW_PTCTRL_TXDIS_MASK, + ENETSW_PTCTRL_REG(i)); + + priv->sw_port_link[i] = 0; + } + + /* reset mib */ + val = enetsw_readb(priv, ENETSW_GMCR_REG); + val |= ENETSW_GMCR_RST_MIB_MASK; + enetsw_writeb(priv, val, ENETSW_GMCR_REG); + mdelay(1); + val &= ~ENETSW_GMCR_RST_MIB_MASK; + enetsw_writeb(priv, val, ENETSW_GMCR_REG); + mdelay(1); + + /* force CPU port state */ + val = enetsw_readb(priv, ENETSW_IMPOV_REG); + val |= ENETSW_IMPOV_FORCE_MASK | ENETSW_IMPOV_LINKUP_MASK; + enetsw_writeb(priv, val, ENETSW_IMPOV_REG); + + /* enable switch forward engine */ + val = enetsw_readb(priv, ENETSW_SWMODE_REG); + val |= ENETSW_SWMODE_FWD_EN_MASK; + enetsw_writeb(priv, val, ENETSW_SWMODE_REG); + + /* enable jumbo on all ports */ + enetsw_writel(priv, 0x1ff, ENETSW_JMBCTL_PORT_REG); + enetsw_writew(priv, 9728, ENETSW_JMBCTL_MAXSIZE_REG); + + /* initialize flow control buffer allocation */ + enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0, + ENETDMA_BUFALLOC_REG(priv->rx_chan)); + + if (bcm_enet_refill_rx(dev)) { + dev_err(kdev, "cannot allocate rx skb queue\n"); + ret = -ENOMEM; + goto out; + } + + /* write rx & tx ring addresses */ + enet_dmas_writel(priv, priv->rx_desc_dma, + ENETDMAS_RSTART_REG, priv->rx_chan); + enet_dmas_writel(priv, priv->tx_desc_dma, + ENETDMAS_RSTART_REG, priv->tx_chan); + + /* clear remaining state ram for rx & tx channel */ + enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->rx_chan); + enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->tx_chan); + enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->rx_chan); + enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->tx_chan); + enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->rx_chan); + enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->tx_chan); + + /* set dma maximum burst len */ + enet_dmac_writel(priv, priv->dma_maxburst, + ENETDMAC_MAXBURST, priv->rx_chan); + enet_dmac_writel(priv, priv->dma_maxburst, + ENETDMAC_MAXBURST, priv->tx_chan); + + /* set flow control low/high threshold to 1/3 / 2/3 */ + val = priv->rx_ring_size / 3; + enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan)); + val = (priv->rx_ring_size * 2) / 3; + enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan)); + + /* all set, enable mac and interrupts, start dma engine and + * kick rx dma channel + */ + wmb(); + enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG); + enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK, + ENETDMAC_CHANCFG, priv->rx_chan); + + /* watch "packet transferred" interrupt in rx and tx */ + enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, + ENETDMAC_IR, priv->rx_chan); + enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, + ENETDMAC_IR, priv->tx_chan); + + /* make sure we enable napi before rx interrupt */ + napi_enable(&priv->napi); + + enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, + ENETDMAC_IRMASK, priv->rx_chan); + enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, + ENETDMAC_IRMASK, priv->tx_chan); + + netif_carrier_on(dev); + netif_start_queue(dev); + + /* apply override config for bypass_link ports here. */ + for (i = 0; i < priv->num_ports; i++) { + struct bcm63xx_enetsw_port *port; + u8 override; + port = &priv->used_ports[i]; + if (!port->used) + continue; + + if (!port->bypass_link) + continue; + + override = ENETSW_PORTOV_ENABLE_MASK | + ENETSW_PORTOV_LINKUP_MASK; + + switch (port->force_speed) { + case 1000: + override |= ENETSW_IMPOV_1000_MASK; + break; + case 100: + override |= ENETSW_IMPOV_100_MASK; + break; + case 10: + break; + default: + pr_warn("invalid forced speed on port %s: assume 10\n", + port->name); + break; + } + + if (port->force_duplex_full) + override |= ENETSW_IMPOV_FDX_MASK; + + + enetsw_writeb(priv, override, ENETSW_PORTOV_REG(i)); + enetsw_writeb(priv, 0, ENETSW_PTCTRL_REG(i)); + } + + /* start phy polling timer */ + init_timer(&priv->swphy_poll); + priv->swphy_poll.function = swphy_poll_timer; + priv->swphy_poll.data = (unsigned long)priv; + priv->swphy_poll.expires = jiffies; + add_timer(&priv->swphy_poll); + return 0; + +out: + for (i = 0; i < priv->rx_ring_size; i++) { + struct bcm_enet_desc *desc; + + if (!priv->rx_skb[i]) + continue; + + desc = &priv->rx_desc_cpu[i]; + dma_unmap_single(kdev, desc->address, priv->rx_skb_size, + DMA_FROM_DEVICE); + kfree_skb(priv->rx_skb[i]); + } + kfree(priv->rx_skb); + +out_free_tx_skb: + kfree(priv->tx_skb); + +out_free_tx_ring: + dma_free_coherent(kdev, priv->tx_desc_alloc_size, + priv->tx_desc_cpu, priv->tx_desc_dma); + +out_free_rx_ring: + dma_free_coherent(kdev, priv->rx_desc_alloc_size, + priv->rx_desc_cpu, priv->rx_desc_dma); + +out_freeirq_tx: + if (priv->irq_tx != -1) + free_irq(priv->irq_tx, dev); + +out_freeirq_rx: + free_irq(priv->irq_rx, dev); + +out_freeirq: + return ret; +} + +/* stop callback */ +static int bcm_enetsw_stop(struct net_device *dev) +{ + struct bcm_enet_priv *priv; + struct device *kdev; + int i; + + priv = netdev_priv(dev); + kdev = &priv->pdev->dev; + + del_timer_sync(&priv->swphy_poll); + netif_stop_queue(dev); + napi_disable(&priv->napi); + del_timer_sync(&priv->rx_timeout); + + /* mask all interrupts */ + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan); + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan); + + /* disable dma & mac */ + bcm_enet_disable_dma(priv, priv->tx_chan); + bcm_enet_disable_dma(priv, priv->rx_chan); + + /* force reclaim of all tx buffers */ + bcm_enet_tx_reclaim(dev, 1); + + /* free the rx skb ring */ + for (i = 0; i < priv->rx_ring_size; i++) { + struct bcm_enet_desc *desc; + + if (!priv->rx_skb[i]) + continue; + + desc = &priv->rx_desc_cpu[i]; + dma_unmap_single(kdev, desc->address, priv->rx_skb_size, + DMA_FROM_DEVICE); + kfree_skb(priv->rx_skb[i]); + } + + /* free remaining allocated memory */ + kfree(priv->rx_skb); + kfree(priv->tx_skb); + dma_free_coherent(kdev, priv->rx_desc_alloc_size, + priv->rx_desc_cpu, priv->rx_desc_dma); + dma_free_coherent(kdev, priv->tx_desc_alloc_size, + priv->tx_desc_cpu, priv->tx_desc_dma); + if (priv->irq_tx != -1) + free_irq(priv->irq_tx, dev); + free_irq(priv->irq_rx, dev); + + return 0; +} + +/* try to sort out phy external status by walking the used_port field + * in the bcm_enet_priv structure. in case the phy address is not + * assigned to any physical port on the switch, assume it is external + * (and yell at the user). + */ +static int bcm_enetsw_phy_is_external(struct bcm_enet_priv *priv, int phy_id) +{ + int i; + + for (i = 0; i < priv->num_ports; ++i) { + if (!priv->used_ports[i].used) + continue; + if (priv->used_ports[i].phy_id == phy_id) + return bcm_enet_port_is_rgmii(i); + } + + printk_once(KERN_WARNING "bcm63xx_enet: could not find a used port with phy_id %i, assuming phy is external\n", + phy_id); + return 1; +} + +/* can't use bcmenet_sw_mdio_read directly as we need to sort out + * external/internal status of the given phy_id first. + */ +static int bcm_enetsw_mii_mdio_read(struct net_device *dev, int phy_id, + int location) +{ + struct bcm_enet_priv *priv; + + priv = netdev_priv(dev); + return bcmenet_sw_mdio_read(priv, + bcm_enetsw_phy_is_external(priv, phy_id), + phy_id, location); +} + +/* can't use bcmenet_sw_mdio_write directly as we need to sort out + * external/internal status of the given phy_id first. + */ +static void bcm_enetsw_mii_mdio_write(struct net_device *dev, int phy_id, + int location, + int val) +{ + struct bcm_enet_priv *priv; + + priv = netdev_priv(dev); + bcmenet_sw_mdio_write(priv, bcm_enetsw_phy_is_external(priv, phy_id), + phy_id, location, val); +} + +static int bcm_enetsw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct mii_if_info mii; + + mii.dev = dev; + mii.mdio_read = bcm_enetsw_mii_mdio_read; + mii.mdio_write = bcm_enetsw_mii_mdio_write; + mii.phy_id = 0; + mii.phy_id_mask = 0x3f; + mii.reg_num_mask = 0x1f; + return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL); + +} + +static const struct net_device_ops bcm_enetsw_ops = { + .ndo_open = bcm_enetsw_open, + .ndo_stop = bcm_enetsw_stop, + .ndo_start_xmit = bcm_enet_start_xmit, + .ndo_change_mtu = bcm_enet_change_mtu, + .ndo_do_ioctl = bcm_enetsw_ioctl, +}; + + +static const struct bcm_enet_stats bcm_enetsw_gstrings_stats[] = { + { "rx_packets", DEV_STAT(rx_packets), -1 }, + { "tx_packets", DEV_STAT(tx_packets), -1 }, + { "rx_bytes", DEV_STAT(rx_bytes), -1 }, + { "tx_bytes", DEV_STAT(tx_bytes), -1 }, + { "rx_errors", DEV_STAT(rx_errors), -1 }, + { "tx_errors", DEV_STAT(tx_errors), -1 }, + { "rx_dropped", DEV_STAT(rx_dropped), -1 }, + { "tx_dropped", DEV_STAT(tx_dropped), -1 }, + + { "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETHSW_MIB_RX_GD_OCT }, + { "tx_unicast", GEN_STAT(mib.tx_unicast), ETHSW_MIB_RX_BRDCAST }, + { "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETHSW_MIB_RX_BRDCAST }, + { "tx_multicast", GEN_STAT(mib.tx_mult), ETHSW_MIB_RX_MULT }, + { "tx_64_octets", GEN_STAT(mib.tx_64), ETHSW_MIB_RX_64 }, + { "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETHSW_MIB_RX_65_127 }, + { "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETHSW_MIB_RX_128_255 }, + { "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETHSW_MIB_RX_256_511 }, + { "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETHSW_MIB_RX_512_1023}, + { "tx_1024_1522_oct", GEN_STAT(mib.tx_1024_max), + ETHSW_MIB_RX_1024_1522 }, + { "tx_1523_2047_oct", GEN_STAT(mib.tx_1523_2047), + ETHSW_MIB_RX_1523_2047 }, + { "tx_2048_4095_oct", GEN_STAT(mib.tx_2048_4095), + ETHSW_MIB_RX_2048_4095 }, + { "tx_4096_8191_oct", GEN_STAT(mib.tx_4096_8191), + ETHSW_MIB_RX_4096_8191 }, + { "tx_8192_9728_oct", GEN_STAT(mib.tx_8192_9728), + ETHSW_MIB_RX_8192_9728 }, + { "tx_oversize", GEN_STAT(mib.tx_ovr), ETHSW_MIB_RX_OVR }, + { "tx_oversize_drop", GEN_STAT(mib.tx_ovr), ETHSW_MIB_RX_OVR_DISC }, + { "tx_dropped", GEN_STAT(mib.tx_drop), ETHSW_MIB_RX_DROP }, + { "tx_undersize", GEN_STAT(mib.tx_underrun), ETHSW_MIB_RX_UND }, + { "tx_pause", GEN_STAT(mib.tx_pause), ETHSW_MIB_RX_PAUSE }, + + { "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETHSW_MIB_TX_ALL_OCT }, + { "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETHSW_MIB_TX_BRDCAST }, + { "rx_multicast", GEN_STAT(mib.rx_mult), ETHSW_MIB_TX_MULT }, + { "rx_unicast", GEN_STAT(mib.rx_unicast), ETHSW_MIB_TX_MULT }, + { "rx_pause", GEN_STAT(mib.rx_pause), ETHSW_MIB_TX_PAUSE }, + { "rx_dropped", GEN_STAT(mib.rx_drop), ETHSW_MIB_TX_DROP_PKTS }, + +}; + +#define BCM_ENETSW_STATS_LEN \ + (sizeof(bcm_enetsw_gstrings_stats) / sizeof(struct bcm_enet_stats)) + +static void bcm_enetsw_get_strings(struct net_device *netdev, + u32 stringset, u8 *data) +{ + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) { + memcpy(data + i * ETH_GSTRING_LEN, + bcm_enetsw_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + } + break; + } +} + +static int bcm_enetsw_get_sset_count(struct net_device *netdev, + int string_set) +{ + switch (string_set) { + case ETH_SS_STATS: + return BCM_ENETSW_STATS_LEN; + default: + return -EINVAL; + } +} + +static void bcm_enetsw_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + strncpy(drvinfo->driver, bcm_enet_driver_name, 32); + strncpy(drvinfo->version, bcm_enet_driver_version, 32); + strncpy(drvinfo->fw_version, "N/A", 32); + strncpy(drvinfo->bus_info, "bcm63xx", 32); + drvinfo->n_stats = BCM_ENETSW_STATS_LEN; +} + +static void bcm_enetsw_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, + u64 *data) +{ + struct bcm_enet_priv *priv; + int i; + + priv = netdev_priv(netdev); + + for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) { + const struct bcm_enet_stats *s; + u32 lo, hi; + char *p; + int reg; + + s = &bcm_enetsw_gstrings_stats[i]; + + reg = s->mib_reg; + if (reg == -1) + continue; + + lo = enetsw_readl(priv, ENETSW_MIB_REG(reg)); + p = (char *)priv + s->stat_offset; + + if (s->sizeof_stat == sizeof(u64)) { + hi = enetsw_readl(priv, ENETSW_MIB_REG(reg + 1)); + *(u64 *)p = ((u64)hi << 32 | lo); + } else { + *(u32 *)p = lo; + } + } + + for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) { + const struct bcm_enet_stats *s; + char *p; + + s = &bcm_enetsw_gstrings_stats[i]; + + if (s->mib_reg == -1) + p = (char *)&netdev->stats + s->stat_offset; + else + p = (char *)priv + s->stat_offset; + + data[i] = (s->sizeof_stat == sizeof(u64)) ? + *(u64 *)p : *(u32 *)p; + } +} + +static void bcm_enetsw_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *ering) +{ + struct bcm_enet_priv *priv; + + priv = netdev_priv(dev); + + /* rx/tx ring is actually only limited by memory */ + ering->rx_max_pending = 8192; + ering->tx_max_pending = 8192; + ering->rx_mini_max_pending = 0; + ering->rx_jumbo_max_pending = 0; + ering->rx_pending = priv->rx_ring_size; + ering->tx_pending = priv->tx_ring_size; +} + +static int bcm_enetsw_set_ringparam(struct net_device *dev, + struct ethtool_ringparam *ering) +{ + struct bcm_enet_priv *priv; + int was_running; + + priv = netdev_priv(dev); + + was_running = 0; + if (netif_running(dev)) { + bcm_enetsw_stop(dev); + was_running = 1; + } + + priv->rx_ring_size = ering->rx_pending; + priv->tx_ring_size = ering->tx_pending; + + if (was_running) { + int err; + + err = bcm_enetsw_open(dev); + if (err) + dev_close(dev); + } + return 0; +} + +static struct ethtool_ops bcm_enetsw_ethtool_ops = { + .get_strings = bcm_enetsw_get_strings, + .get_sset_count = bcm_enetsw_get_sset_count, + .get_ethtool_stats = bcm_enetsw_get_ethtool_stats, + .get_drvinfo = bcm_enetsw_get_drvinfo, + .get_ringparam = bcm_enetsw_get_ringparam, + .set_ringparam = bcm_enetsw_set_ringparam, +}; + +/* allocate netdevice, request register memory and register device. */ +static int bcm_enetsw_probe(struct platform_device *pdev) +{ + struct bcm_enet_priv *priv; + struct net_device *dev; + struct bcm63xx_enetsw_platform_data *pd; + struct resource *res_mem; + int ret, irq_rx, irq_tx; + + /* stop if shared driver failed, assume driver->probe will be + * called in the same order we register devices (correct ?) + */ + if (!bcm_enet_shared_base[0]) + return -ENODEV; + + res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + irq_rx = platform_get_irq(pdev, 0); + irq_tx = platform_get_irq(pdev, 1); + if (!res_mem || irq_rx < 0) return -ENODEV; - bcm_enet_shared_base = devm_request_and_ioremap(&pdev->dev, res); - if (!bcm_enet_shared_base) + ret = 0; + dev = alloc_etherdev(sizeof(*priv)); + if (!dev) return -ENOMEM; + priv = netdev_priv(dev); + memset(priv, 0, sizeof(*priv)); + + /* initialize default and fetch platform data */ + priv->enet_is_sw = true; + priv->irq_rx = irq_rx; + priv->irq_tx = irq_tx; + priv->rx_ring_size = BCMENET_DEF_RX_DESC; + priv->tx_ring_size = BCMENET_DEF_TX_DESC; + priv->dma_maxburst = BCMENETSW_DMA_MAXBURST; + + pd = pdev->dev.platform_data; + if (pd) { + memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN); + memcpy(priv->used_ports, pd->used_ports, + sizeof(pd->used_ports)); + priv->num_ports = pd->num_ports; + priv->dma_has_sram = pd->dma_has_sram; + priv->dma_chan_en_mask = pd->dma_chan_en_mask; + priv->dma_chan_int_mask = pd->dma_chan_int_mask; + priv->dma_chan_width = pd->dma_chan_width; + } + + ret = compute_hw_mtu(priv, dev->mtu); + if (ret) + goto out; + + if (!request_mem_region(res_mem->start, resource_size(res_mem), + "bcm63xx_enetsw")) { + ret = -EBUSY; + goto out; + } + + priv->base = ioremap(res_mem->start, resource_size(res_mem)); + if (priv->base == NULL) { + ret = -ENOMEM; + goto out_release_mem; + } + + priv->mac_clk = clk_get(&pdev->dev, "enetsw"); + if (IS_ERR(priv->mac_clk)) { + ret = PTR_ERR(priv->mac_clk); + goto out_unmap; + } + clk_enable(priv->mac_clk); + + priv->rx_chan = 0; + priv->tx_chan = 1; + spin_lock_init(&priv->rx_lock); + + /* init rx timeout (used for oom) */ + init_timer(&priv->rx_timeout); + priv->rx_timeout.function = bcm_enet_refill_rx_timer; + priv->rx_timeout.data = (unsigned long)dev; + + /* register netdevice */ + dev->netdev_ops = &bcm_enetsw_ops; + netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16); + SET_ETHTOOL_OPS(dev, &bcm_enetsw_ethtool_ops); + SET_NETDEV_DEV(dev, &pdev->dev); + + spin_lock_init(&priv->enetsw_mdio_lock); + + ret = register_netdev(dev); + if (ret) + goto out_put_clk; + + netif_carrier_off(dev); + platform_set_drvdata(pdev, dev); + priv->pdev = pdev; + priv->net_dev = dev; + + return 0; + +out_put_clk: + clk_put(priv->mac_clk); + +out_unmap: + iounmap(priv->base); + +out_release_mem: + release_mem_region(res_mem->start, resource_size(res_mem)); +out: + free_netdev(dev); + return ret; +} + + +/* exit func, stops hardware and unregisters netdevice */ +static int bcm_enetsw_remove(struct platform_device *pdev) +{ + struct bcm_enet_priv *priv; + struct net_device *dev; + struct resource *res; + + /* stop netdevice */ + dev = platform_get_drvdata(pdev); + priv = netdev_priv(dev); + unregister_netdev(dev); + + /* release device resources */ + iounmap(priv->base); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + release_mem_region(res->start, resource_size(res)); + + platform_set_drvdata(pdev, NULL); + free_netdev(dev); + return 0; +} + +struct platform_driver bcm63xx_enetsw_driver = { + .probe = bcm_enetsw_probe, + .remove = bcm_enetsw_remove, + .driver = { + .name = "bcm63xx_enetsw", + .owner = THIS_MODULE, + }, +}; + +/* reserve & remap memory space shared between all macs */ +static int bcm_enet_shared_probe(struct platform_device *pdev) +{ + struct resource *res; + void __iomem *p[3]; + unsigned int i; + + memset(bcm_enet_shared_base, 0, sizeof(bcm_enet_shared_base)); + + for (i = 0; i < 3; i++) { + res = platform_get_resource(pdev, IORESOURCE_MEM, i); + p[i] = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(p[i])) + return PTR_ERR(p[i]); + } + + memcpy(bcm_enet_shared_base, p, sizeof(bcm_enet_shared_base)); return 0; } @@ -1884,8 +2876,7 @@ static int bcm_enet_shared_remove(struct platform_device *pdev) return 0; } -/* - * this "shared" driver is needed because both macs share a single +/* this "shared" driver is needed because both macs share a single * address space */ struct platform_driver bcm63xx_enet_shared_driver = { @@ -1897,9 +2888,7 @@ struct platform_driver bcm63xx_enet_shared_driver = { }, }; -/* - * entry point - */ +/* entry point */ static int __init bcm_enet_init(void) { int ret; @@ -1912,12 +2901,19 @@ static int __init bcm_enet_init(void) if (ret) platform_driver_unregister(&bcm63xx_enet_shared_driver); + ret = platform_driver_register(&bcm63xx_enetsw_driver); + if (ret) { + platform_driver_unregister(&bcm63xx_enet_driver); + platform_driver_unregister(&bcm63xx_enet_shared_driver); + } + return ret; } static void __exit bcm_enet_exit(void) { platform_driver_unregister(&bcm63xx_enet_driver); + platform_driver_unregister(&bcm63xx_enetsw_driver); platform_driver_unregister(&bcm63xx_enet_shared_driver); } diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.h b/drivers/net/ethernet/broadcom/bcm63xx_enet.h index 133d5857b9e2..f55af4310085 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.h +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.h @@ -18,6 +18,7 @@ /* maximum burst len for dma (4 bytes unit) */ #define BCMENET_DMA_MAXBURST 16 +#define BCMENETSW_DMA_MAXBURST 8 /* tx transmit threshold (4 bytes unit), fifo is 256 bytes, the value * must be low enough so that a DMA transfer of above burst length can @@ -84,11 +85,60 @@ #define ETH_MIB_RX_CNTRL 54 +/* + * SW MIB Counters register definitions +*/ +#define ETHSW_MIB_TX_ALL_OCT 0 +#define ETHSW_MIB_TX_DROP_PKTS 2 +#define ETHSW_MIB_TX_QOS_PKTS 3 +#define ETHSW_MIB_TX_BRDCAST 4 +#define ETHSW_MIB_TX_MULT 5 +#define ETHSW_MIB_TX_UNI 6 +#define ETHSW_MIB_TX_COL 7 +#define ETHSW_MIB_TX_1_COL 8 +#define ETHSW_MIB_TX_M_COL 9 +#define ETHSW_MIB_TX_DEF 10 +#define ETHSW_MIB_TX_LATE 11 +#define ETHSW_MIB_TX_EX_COL 12 +#define ETHSW_MIB_TX_PAUSE 14 +#define ETHSW_MIB_TX_QOS_OCT 15 + +#define ETHSW_MIB_RX_ALL_OCT 17 +#define ETHSW_MIB_RX_UND 19 +#define ETHSW_MIB_RX_PAUSE 20 +#define ETHSW_MIB_RX_64 21 +#define ETHSW_MIB_RX_65_127 22 +#define ETHSW_MIB_RX_128_255 23 +#define ETHSW_MIB_RX_256_511 24 +#define ETHSW_MIB_RX_512_1023 25 +#define ETHSW_MIB_RX_1024_1522 26 +#define ETHSW_MIB_RX_OVR 27 +#define ETHSW_MIB_RX_JAB 28 +#define ETHSW_MIB_RX_ALIGN 29 +#define ETHSW_MIB_RX_CRC 30 +#define ETHSW_MIB_RX_GD_OCT 31 +#define ETHSW_MIB_RX_DROP 33 +#define ETHSW_MIB_RX_UNI 34 +#define ETHSW_MIB_RX_MULT 35 +#define ETHSW_MIB_RX_BRDCAST 36 +#define ETHSW_MIB_RX_SA_CHANGE 37 +#define ETHSW_MIB_RX_FRAG 38 +#define ETHSW_MIB_RX_OVR_DISC 39 +#define ETHSW_MIB_RX_SYM 40 +#define ETHSW_MIB_RX_QOS_PKTS 41 +#define ETHSW_MIB_RX_QOS_OCT 42 +#define ETHSW_MIB_RX_1523_2047 44 +#define ETHSW_MIB_RX_2048_4095 45 +#define ETHSW_MIB_RX_4096_8191 46 +#define ETHSW_MIB_RX_8192_9728 47 + + struct bcm_enet_mib_counters { u64 tx_gd_octets; u32 tx_gd_pkts; u32 tx_all_octets; u32 tx_all_pkts; + u32 tx_unicast; u32 tx_brdcast; u32 tx_mult; u32 tx_64; @@ -97,7 +147,12 @@ struct bcm_enet_mib_counters { u32 tx_256_511; u32 tx_512_1023; u32 tx_1024_max; + u32 tx_1523_2047; + u32 tx_2048_4095; + u32 tx_4096_8191; + u32 tx_8192_9728; u32 tx_jab; + u32 tx_drop; u32 tx_ovr; u32 tx_frag; u32 tx_underrun; @@ -114,6 +169,7 @@ struct bcm_enet_mib_counters { u32 rx_all_octets; u32 rx_all_pkts; u32 rx_brdcast; + u32 rx_unicast; u32 rx_mult; u32 rx_64; u32 rx_65_127; @@ -197,6 +253,9 @@ struct bcm_enet_priv { /* number of dma desc in tx ring */ int tx_ring_size; + /* maximum dma burst size */ + int dma_maxburst; + /* cpu view of rx dma ring */ struct bcm_enet_desc *tx_desc_cpu; @@ -269,6 +328,33 @@ struct bcm_enet_priv { /* maximum hardware transmit/receive size */ unsigned int hw_mtu; + + bool enet_is_sw; + + /* port mapping for switch devices */ + int num_ports; + struct bcm63xx_enetsw_port used_ports[ENETSW_MAX_PORT]; + int sw_port_link[ENETSW_MAX_PORT]; + + /* used to poll switch port state */ + struct timer_list swphy_poll; + spinlock_t enetsw_mdio_lock; + + /* dma channel enable mask */ + u32 dma_chan_en_mask; + + /* dma channel interrupt mask */ + u32 dma_chan_int_mask; + + /* DMA engine has internal SRAM */ + bool dma_has_sram; + + /* dma channel width */ + unsigned int dma_chan_width; + + /* dma descriptor shift value */ + unsigned int dma_desc_shift; }; + #endif /* ! BCM63XX_ENET_H_ */ diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index 5d204492c603..6a2de1d79ff6 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -8104,7 +8104,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) pci_set_master(pdev); - bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); + bp->pm_cap = pdev->pm_cap; if (bp->pm_cap == 0) { dev_err(&pdev->dev, "Cannot find power management capability, aborting\n"); @@ -8764,18 +8764,4 @@ static struct pci_driver bnx2_pci_driver = { .err_handler = &bnx2_err_handler, }; -static int __init bnx2_init(void) -{ - return pci_register_driver(&bnx2_pci_driver); -} - -static void __exit bnx2_cleanup(void) -{ - pci_unregister_driver(&bnx2_pci_driver); -} - -module_init(bnx2_init); -module_exit(bnx2_cleanup); - - - +module_pci_driver(bnx2_pci_driver); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index 3dba2a70a00e..dedbd76c033e 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -34,12 +34,10 @@ #define BCM_DCBNL #endif - #include "bnx2x_hsi.h" #include "../cnic_if.h" - #define BNX2X_MIN_MSIX_VEC_CNT(bp) ((bp)->min_msix_vec_cnt) #include <linux/mdio.h> @@ -114,7 +112,6 @@ do { \ #define BNX2X_ERROR(fmt, ...) \ pr_err("[%s:%d]" fmt, __func__, __LINE__, ##__VA_ARGS__) - /* before we have a dev->name use dev_info() */ #define BNX2X_DEV_INFO(fmt, ...) \ do { \ @@ -147,7 +144,6 @@ do { \ #define U64_HI(x) ((u32)(((u64)(x)) >> 32)) #define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo)) - #define REG_ADDR(bp, offset) ((bp->regview) + (offset)) #define REG_RD(bp, offset) readl(REG_ADDR(bp, offset)) @@ -366,7 +362,7 @@ union db_prod { /* * Number of required SGEs is the sum of two: * 1. Number of possible opened aggregations (next packet for - * these aggregations will probably consume SGE immidiatelly) + * these aggregations will probably consume SGE immediately) * 2. Rest of BRB blocks divided by 2 (block will consume new SGE only * after placement on BD for new TPA aggregation) * @@ -387,7 +383,6 @@ union db_prod { #define BIT_VEC64_ELEM_SHIFT 6 #define BIT_VEC64_ELEM_MASK ((u64)BIT_VEC64_ELEM_SZ - 1) - #define __BIT_VEC64_SET_BIT(el, bit) \ do { \ el = ((el) | ((u64)0x1 << (bit))); \ @@ -398,7 +393,6 @@ union db_prod { el = ((el) & (~((u64)0x1 << (bit)))); \ } while (0) - #define BIT_VEC64_SET_BIT(vec64, idx) \ __BIT_VEC64_SET_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \ (idx) & BIT_VEC64_ELEM_MASK) @@ -419,8 +413,6 @@ union db_prod { /*******************************************************/ - - /* Number of u64 elements in SGE mask array */ #define RX_SGE_MASK_LEN (NUM_RX_SGE / BIT_VEC64_ELEM_SZ) #define RX_SGE_MASK_LEN_MASK (RX_SGE_MASK_LEN - 1) @@ -493,11 +485,26 @@ struct bnx2x_fastpath { struct bnx2x *bp; /* parent */ struct napi_struct napi; + +#ifdef CONFIG_NET_LL_RX_POLL + unsigned int state; +#define BNX2X_FP_STATE_IDLE 0 +#define BNX2X_FP_STATE_NAPI (1 << 0) /* NAPI owns this FP */ +#define BNX2X_FP_STATE_POLL (1 << 1) /* poll owns this FP */ +#define BNX2X_FP_STATE_NAPI_YIELD (1 << 2) /* NAPI yielded this FP */ +#define BNX2X_FP_STATE_POLL_YIELD (1 << 3) /* poll yielded this FP */ +#define BNX2X_FP_YIELD (BNX2X_FP_STATE_NAPI_YIELD | BNX2X_FP_STATE_POLL_YIELD) +#define BNX2X_FP_LOCKED (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL) +#define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD) + /* protect state */ + spinlock_t lock; +#endif /* CONFIG_NET_LL_RX_POLL */ + union host_hc_status_block status_blk; - /* chip independed shortcuts into sb structure */ + /* chip independent shortcuts into sb structure */ __le16 *sb_index_values; __le16 *sb_running_index; - /* chip independed shortcut into rx_prods_offset memory */ + /* chip independent shortcut into rx_prods_offset memory */ u32 ustorm_rx_prods_offset; u32 rx_buf_size; @@ -565,6 +572,116 @@ struct bnx2x_fastpath { #define bnx2x_fp_stats(bp, fp) (&((bp)->fp_stats[(fp)->index])) #define bnx2x_fp_qstats(bp, fp) (&((bp)->fp_stats[(fp)->index].eth_q_stats)) +#ifdef CONFIG_NET_LL_RX_POLL +static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp) +{ + spin_lock_init(&fp->lock); + fp->state = BNX2X_FP_STATE_IDLE; +} + +/* called from the device poll routine to get ownership of a FP */ +static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp) +{ + bool rc = true; + + spin_lock(&fp->lock); + if (fp->state & BNX2X_FP_LOCKED) { + WARN_ON(fp->state & BNX2X_FP_STATE_NAPI); + fp->state |= BNX2X_FP_STATE_NAPI_YIELD; + rc = false; + } else { + /* we don't care if someone yielded */ + fp->state = BNX2X_FP_STATE_NAPI; + } + spin_unlock(&fp->lock); + return rc; +} + +/* returns true is someone tried to get the FP while napi had it */ +static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp) +{ + bool rc = false; + + spin_lock(&fp->lock); + WARN_ON(fp->state & + (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_NAPI_YIELD)); + + if (fp->state & BNX2X_FP_STATE_POLL_YIELD) + rc = true; + fp->state = BNX2X_FP_STATE_IDLE; + spin_unlock(&fp->lock); + return rc; +} + +/* called from bnx2x_low_latency_poll() */ +static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp) +{ + bool rc = true; + + spin_lock_bh(&fp->lock); + if ((fp->state & BNX2X_FP_LOCKED)) { + fp->state |= BNX2X_FP_STATE_POLL_YIELD; + rc = false; + } else { + /* preserve yield marks */ + fp->state |= BNX2X_FP_STATE_POLL; + } + spin_unlock_bh(&fp->lock); + return rc; +} + +/* returns true if someone tried to get the FP while it was locked */ +static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp) +{ + bool rc = false; + + spin_lock_bh(&fp->lock); + WARN_ON(fp->state & BNX2X_FP_STATE_NAPI); + + if (fp->state & BNX2X_FP_STATE_POLL_YIELD) + rc = true; + fp->state = BNX2X_FP_STATE_IDLE; + spin_unlock_bh(&fp->lock); + return rc; +} + +/* true if a socket is polling, even if it did not get the lock */ +static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp) +{ + WARN_ON(!(fp->state & BNX2X_FP_LOCKED)); + return fp->state & BNX2X_FP_USER_PEND; +} +#else +static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp) +{ +} + +static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp) +{ + return true; +} + +static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp) +{ + return false; +} + +static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp) +{ + return false; +} + +static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp) +{ + return false; +} + +static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp) +{ + return false; +} +#endif /* CONFIG_NET_LL_RX_POLL */ + /* Use 2500 as a mini-jumbo MTU for FCoE */ #define BNX2X_FCOE_MINI_JUMBO_MTU 2500 @@ -580,12 +697,10 @@ struct bnx2x_fastpath { txdata_ptr[FIRST_TX_COS_INDEX] \ ->var) - #define IS_ETH_FP(fp) ((fp)->index < BNX2X_NUM_ETH_QUEUES((fp)->bp)) #define IS_FCOE_FP(fp) ((fp)->index == FCOE_IDX((fp)->bp)) #define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX(bp)) - /* MC hsi */ #define MAX_FETCH_BD 13 /* HW max BDs per packet */ #define RX_COPY_THRESH 92 @@ -613,7 +728,7 @@ struct bnx2x_fastpath { * START_BD(splitted) - includes unpaged data segment for GSO * PARSING_BD - for TSO and CSUM data * PARSING_BD2 - for encapsulation data - * Frag BDs - decribes pages for frags + * Frag BDs - describes pages for frags */ #define BDS_PER_TX_PKT 4 #define MAX_BDS_PER_TX_PKT (MAX_SKB_FRAGS + BDS_PER_TX_PKT) @@ -693,12 +808,10 @@ struct bnx2x_fastpath { FW_DROP_LEVEL(bp)) #define RCQ_TH_HI(bp) (RCQ_TH_LO(bp) + DROPLESS_FC_HEADROOM) - /* This is needed for determining of last_max */ #define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b)) #define SUB_S32(a, b) (s32)((s32)(a) - (s32)(b)) - #define BNX2X_SWCID_SHIFT 17 #define BNX2X_SWCID_MASK ((0x1 << BNX2X_SWCID_SHIFT) - 1) @@ -723,7 +836,6 @@ struct bnx2x_fastpath { DPM_TRIGER_TYPE); \ } while (0) - /* TX CSUM helpers */ #define SKB_CS_OFF(skb) (offsetof(struct tcphdr, check) - \ skb->csum_offset) @@ -766,7 +878,6 @@ struct bnx2x_fastpath { #define BNX2X_RX_SUM_FIX(cqe) \ BNX2X_PRS_FLAG_OVERETH_IPV4(cqe->fast_path_cqe.pars_flags.flags) - #define FP_USB_FUNC_OFF \ offsetof(struct cstorm_status_block_u, func) #define FP_CSB_FUNC_OFF \ @@ -900,14 +1011,14 @@ struct bnx2x_common { #define CHIP_IS_E3A0(bp) (CHIP_IS_E3(bp) && \ (CHIP_REV(bp) == CHIP_REV_Ax)) /* This define is used in two main places: - * 1. In the early stages of nic_load, to know if to configrue Parser / Searcher + * 1. In the early stages of nic_load, to know if to configure Parser / Searcher * to nic-only mode or to offload mode. Offload mode is configured if either the * chip is E1x (where MIC_MODE register is not applicable), or if cnic already * registered for this port (which means that the user wants storage services). * 2. During cnic-related load, to know if offload mode is already configured in - * the HW or needs to be configrued. + * the HW or needs to be configured. * Since the transition from nic-mode to offload-mode in HW causes traffic - * coruption, nic-mode is configured only in ports on which storage services + * corruption, nic-mode is configured only in ports on which storage services * where never requested. */ #define CONFIGURE_NIC_MODE(bp) (!CHIP_IS_E1x(bp) && !CNIC_ENABLED(bp)) @@ -1008,14 +1119,14 @@ extern struct workqueue_struct *bnx2x_wq; * If the maximum number of FP-SB available is X then: * a. If CNIC is supported it consumes 1 FP-SB thus the max number of * regular L2 queues is Y=X-1 - * b. in MF mode the actual number of L2 queues is Y= (X-1/MF_factor) + * b. In MF mode the actual number of L2 queues is Y= (X-1/MF_factor) * c. If the FCoE L2 queue is supported the actual number of L2 queues * is Y+1 * d. The number of irqs (MSIX vectors) is either Y+1 (one extra for * slow-path interrupts) or Y+2 if CNIC is supported (one additional * FP interrupt context for the CNIC). * e. The number of HW context (CID count) is always X or X+1 if FCoE - * L2 queue is supported. the cid for the FCoE L2 queue is always X. + * L2 queue is supported. The cid for the FCoE L2 queue is always X. */ /* fast-path interrupt contexts E1x */ @@ -1068,7 +1179,6 @@ struct bnx2x_slowpath { struct eth_classify_rules_ramrod_data e2; } mac_rdata; - union { struct tstorm_eth_mac_filter_config e1x; struct eth_filter_rules_ramrod_data e2; @@ -1119,7 +1229,6 @@ struct bnx2x_slowpath { #define bnx2x_sp_mapping(bp, var) \ (bp->slowpath_mapping + offsetof(struct bnx2x_slowpath, var)) - /* attn group wiring */ #define MAX_DYNAMIC_ATTN_GRPS 8 @@ -1221,11 +1330,11 @@ enum { BNX2X_SP_RTNL_AFEX_F_UPDATE, BNX2X_SP_RTNL_ENABLE_SRIOV, BNX2X_SP_RTNL_VFPF_MCAST, + BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN, BNX2X_SP_RTNL_VFPF_STORM_RX_MODE, BNX2X_SP_RTNL_HYPERVISOR_VLAN, }; - struct bnx2x_prev_path_list { struct list_head list; u8 bus; @@ -1392,6 +1501,7 @@ struct bnx2x { #define USING_SINGLE_MSIX_FLAG (1 << 20) #define BC_SUPPORTS_DCBX_MSG_NON_PMF (1 << 21) #define IS_VF_FLAG (1 << 22) +#define INTERRUPTS_ENABLED_FLAG (1 << 23) #define BP_NOMCP(bp) ((bp)->flags & NO_MCP_FLAG) @@ -1585,7 +1695,7 @@ struct bnx2x { struct mutex cnic_mutex; struct bnx2x_vlan_mac_obj iscsi_l2_mac_obj; - /* Start index of the "special" (CNIC related) L2 cleints */ + /* Start index of the "special" (CNIC related) L2 clients */ u8 cnic_base_cl_id; int dmae_ready; @@ -1699,7 +1809,7 @@ struct bnx2x { /* operation indication for the sp_rtnl task */ unsigned long sp_rtnl_state; - /* DCBX Negotation results */ + /* DCBX Negotiation results */ struct dcbx_features dcbx_local_feat; u32 dcbx_error; @@ -1755,7 +1865,6 @@ extern int num_queues; #define FUNC_FLG_SPQ 0x0010 #define FUNC_FLG_LEADING 0x0020 /* PF only */ - struct bnx2x_func_init_params { /* dma */ dma_addr_t fw_stat_map; /* valid iff FUNC_FLG_STATS */ @@ -1853,9 +1962,6 @@ struct bnx2x_func_init_params { #define skip_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx)) - - - /** * bnx2x_set_mac_one - configure a single MAC address * @@ -1921,7 +2027,6 @@ u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type, void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae, u8 src_type, u8 dst_type); int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae); -void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, int msglvl); /* FLR related routines */ u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp); @@ -1937,6 +2042,8 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, void bnx2x_update_coalesce(struct bnx2x *bp); int bnx2x_get_cur_phy_idx(struct bnx2x *bp); +bool bnx2x_port_after_undi(struct bnx2x *bp); + static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, int wait) { @@ -1998,7 +2105,6 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, #define UNLOAD_CLOSE 1 #define UNLOAD_RECOVERY 2 - /* DMAE command defines */ #define DMAE_TIMEOUT -1 #define DMAE_PCI_ERROR -2 /* E2 and onward */ @@ -2062,7 +2168,8 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, #define DMAE_LEN32_WR_MAX(bp) (CHIP_IS_E1(bp) ? 0x400 : 0x2000) #define DMAE_COMP_VAL 0x60d0d0ae /* E2 and on - upper bit - indicates eror */ + * indicates error + */ #define MAX_DMAE_C_PER_PORT 8 #define INIT_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ @@ -2100,7 +2207,6 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, #define SP_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_spe)) #define MAX_SP_DESC_CNT (SP_DESC_CNT - 1) - #define BNX2X_BTR 4 #define MAX_SPQ_PENDING 8 @@ -2137,6 +2243,8 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, #define ATTN_HARD_WIRED_MASK 0xff00 #define ATTENTION_ID 4 +#define IS_MF_STORAGE_ONLY(bp) (IS_MF_STORAGE_SD(bp) || \ + IS_MF_FCOE_AFEX(bp)) /* stuff added to make the code fit 80Col */ @@ -2338,4 +2446,9 @@ enum { #define NUM_MACS 8 +enum bnx2x_pci_bus_speed { + BNX2X_PCI_LINK_SPEED_2500 = 2500, + BNX2X_PCI_LINK_SPEED_5000 = 5000, + BNX2X_PCI_LINK_SPEED_8000 = 8000 +}; #endif /* bnx2x.h */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 638e55435b04..ee350bde1818 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -24,6 +24,7 @@ #include <net/tcp.h> #include <net/ipv6.h> #include <net/ip6_checksum.h> +#include <net/busy_poll.h> #include <linux/prefetch.h> #include "bnx2x_cmn.h" #include "bnx2x_init.h" @@ -124,7 +125,7 @@ static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta) int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp); /* Queue pointer cannot be re-set on an fp-basis, as moving pointer - * backward along the array could cause memory to be overriden + * backward along the array could cause memory to be overridden */ for (cos = 1; cos < bp->max_cos; cos++) { for (i = 0; i < old_eth_num - delta; i++) { @@ -165,7 +166,6 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata, dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd), BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE); - nbd = le16_to_cpu(tx_start_bd->nbd) - 1; #ifdef BNX2X_STOP_ON_ERROR if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) { @@ -259,7 +259,7 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata) smp_mb(); if (unlikely(netif_tx_queue_stopped(txq))) { - /* Taking tx_lock() is needed to prevent reenabling the queue + /* Taking tx_lock() is needed to prevent re-enabling the queue * while it's empty. This could have happen if rx_action() gets * suspended in bnx2x_tx_int() after the condition before * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()): @@ -572,7 +572,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, return err; } - /* Unmap the page as we r going to pass it to the stack */ + /* Unmap the page as we're going to pass it to the stack */ dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(&old_rx_pg, mapping), SGE_PAGES, DMA_FROM_DEVICE); @@ -733,7 +733,6 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, dev_kfree_skb_any(skb); } - /* put new data in bin */ rx_buf->data = new_data; @@ -805,40 +804,32 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) { struct bnx2x *bp = fp->bp; u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons; - u16 hw_comp_cons, sw_comp_cons, sw_comp_prod; + u16 sw_comp_cons, sw_comp_prod; int rx_pkt = 0; + union eth_rx_cqe *cqe; + struct eth_fast_path_rx_cqe *cqe_fp; #ifdef BNX2X_STOP_ON_ERROR if (unlikely(bp->panic)) return 0; #endif - /* CQ "next element" is of the size of the regular element, - that's why it's ok here */ - hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb); - if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT) - hw_comp_cons++; - bd_cons = fp->rx_bd_cons; bd_prod = fp->rx_bd_prod; bd_prod_fw = bd_prod; sw_comp_cons = fp->rx_comp_cons; sw_comp_prod = fp->rx_comp_prod; - /* Memory barrier necessary as speculative reads of the rx - * buffer can be ahead of the index in the status block - */ - rmb(); + comp_ring_cons = RCQ_BD(sw_comp_cons); + cqe = &fp->rx_comp_ring[comp_ring_cons]; + cqe_fp = &cqe->fast_path_cqe; DP(NETIF_MSG_RX_STATUS, - "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n", - fp->index, hw_comp_cons, sw_comp_cons); + "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons); - while (sw_comp_cons != hw_comp_cons) { + while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) { struct sw_rx_bd *rx_buf = NULL; struct sk_buff *skb; - union eth_rx_cqe *cqe; - struct eth_fast_path_rx_cqe *cqe_fp; u8 cqe_fp_flags; enum eth_rx_cqe_type cqe_fp_type; u16 len, pad, queue; @@ -850,12 +841,9 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) return 0; #endif - comp_ring_cons = RCQ_BD(sw_comp_cons); bd_prod = RX_BD(bd_prod); bd_cons = RX_BD(bd_cons); - cqe = &fp->rx_comp_ring[comp_ring_cons]; - cqe_fp = &cqe->fast_path_cqe; cqe_fp_flags = cqe_fp->type_error_flags; cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE; @@ -899,7 +887,6 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) cqe_fp); goto next_rx; - } queue = cqe->end_agg_cqe.queue_index; tpa_info = &fp->tpa_info[queue]; @@ -1002,9 +989,13 @@ reuse_rx: PARSING_FLAGS_VLAN) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), le16_to_cpu(cqe_fp->vlan_tag)); - napi_gro_receive(&fp->napi, skb); + skb_mark_napi_id(skb, &fp->napi); + if (bnx2x_fp_ll_polling(fp)) + netif_receive_skb(skb); + else + napi_gro_receive(&fp->napi, skb); next_rx: rx_buf->data = NULL; @@ -1016,8 +1007,15 @@ next_cqe: sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod); sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons); + /* mark CQE as free */ + BNX2X_SEED_CQE(cqe_fp); + if (rx_pkt == budget) break; + + comp_ring_cons = RCQ_BD(sw_comp_cons); + cqe = &fp->rx_comp_ring[comp_ring_cons]; + cqe_fp = &cqe->fast_path_cqe; } /* while */ fp->rx_bd_cons = bd_cons; @@ -1053,8 +1051,6 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie) #endif /* Handle Rx and Tx according to MSI-X vector */ - prefetch(fp->rx_cons_sb); - for_each_cos_in_tx_queue(fp, cos) prefetch(fp->txdata_ptr[cos]->tx_cons_sb); @@ -1118,7 +1114,7 @@ static void bnx2x_fill_report_data(struct bnx2x *bp, memset(data, 0, sizeof(*data)); - /* Fill the report data: efective line speed */ + /* Fill the report data: effective line speed */ data->line_speed = line_speed; /* Link is down */ @@ -1161,7 +1157,7 @@ void bnx2x_link_report(struct bnx2x *bp) * * @bp: driver handle * - * None atomic inmlementation. + * None atomic implementation. * Should be called under the phy_lock. */ void __bnx2x_link_report(struct bnx2x *bp) @@ -1304,7 +1300,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp) "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size); if (!fp->disable_tpa) { - /* Fill the per-aggregtion pool */ + /* Fill the per-aggregation pool */ for (i = 0; i < MAX_AGG_QS(bp); i++) { struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i]; @@ -1726,7 +1722,7 @@ static int bnx2x_req_irq(struct bnx2x *bp) return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev); } -int bnx2x_setup_irqs(struct bnx2x *bp) +static int bnx2x_setup_irqs(struct bnx2x *bp) { int rc = 0; if (bp->flags & USING_MSIX_FLAG && @@ -1759,32 +1755,46 @@ static void bnx2x_napi_enable_cnic(struct bnx2x *bp) { int i; - for_each_rx_queue_cnic(bp, i) + for_each_rx_queue_cnic(bp, i) { + bnx2x_fp_init_lock(&bp->fp[i]); napi_enable(&bnx2x_fp(bp, i, napi)); + } } static void bnx2x_napi_enable(struct bnx2x *bp) { int i; - for_each_eth_queue(bp, i) + for_each_eth_queue(bp, i) { + bnx2x_fp_init_lock(&bp->fp[i]); napi_enable(&bnx2x_fp(bp, i, napi)); + } } static void bnx2x_napi_disable_cnic(struct bnx2x *bp) { int i; - for_each_rx_queue_cnic(bp, i) + local_bh_disable(); + for_each_rx_queue_cnic(bp, i) { napi_disable(&bnx2x_fp(bp, i, napi)); + while (!bnx2x_fp_lock_napi(&bp->fp[i])) + mdelay(1); + } + local_bh_enable(); } static void bnx2x_napi_disable(struct bnx2x *bp) { int i; - for_each_eth_queue(bp, i) + local_bh_disable(); + for_each_eth_queue(bp, i) { napi_disable(&bnx2x_fp(bp, i, napi)); + while (!bnx2x_fp_lock_napi(&bp->fp[i])) + mdelay(1); + } + local_bh_enable(); } void bnx2x_netif_start(struct bnx2x *bp) @@ -1829,7 +1839,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb) } /* select a non-FCoE queue */ - return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp)); + return __netdev_pick_tx(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp); } void bnx2x_set_num_queues(struct bnx2x *bp) @@ -1862,7 +1872,7 @@ void bnx2x_set_num_queues(struct bnx2x *bp) * * If the actual number of Tx queues (for each CoS) is less than 16 then there * will be the holes at the end of each group of 16 ETh L2 indices (0..15, - * 16..31,...) with indicies that are not coupled with any real Tx queue. + * 16..31,...) with indices that are not coupled with any real Tx queue. * * The proper configuration of skb->queue_mapping is handled by * bnx2x_select_queue() and __skb_tx_hash(). @@ -1924,7 +1934,7 @@ static void bnx2x_set_rx_buf_size(struct bnx2x *bp) ETH_OVREHEAD + mtu + BNX2X_FW_RX_ALIGN_END; - /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */ + /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */ if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE) fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD; else @@ -1937,7 +1947,7 @@ static int bnx2x_init_rss_pf(struct bnx2x *bp) int i; u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp); - /* Prepare the initial contents fo the indirection table if RSS is + /* Prepare the initial contents for the indirection table if RSS is * enabled */ for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++) @@ -2015,7 +2025,7 @@ static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code) /* * Cleans the object that have internal lists without sending - * ramrods. Should be run when interrutps are disabled. + * ramrods. Should be run when interrupts are disabled. */ void bnx2x_squeeze_objects(struct bnx2x *bp) { @@ -2166,10 +2176,10 @@ static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp) bp->fw_stats_data_mapping = bp->fw_stats_mapping + bp->fw_stats_req_sz; - DP(BNX2X_MSG_SP, "statistics request base address set to %x %x", + DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n", U64_HI(bp->fw_stats_req_mapping), U64_LO(bp->fw_stats_req_mapping)); - DP(BNX2X_MSG_SP, "statistics data base address set to %x %x", + DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n", U64_HI(bp->fw_stats_data_mapping), U64_LO(bp->fw_stats_data_mapping)); return 0; @@ -2183,6 +2193,8 @@ alloc_mem_err: /* send load request to mcp and analyze response */ static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code) { + u32 param; + /* init fw_seq */ bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & @@ -2195,9 +2207,13 @@ static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code) DRV_PULSE_SEQ_MASK); BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq); + param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA; + + if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp)) + param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA; + /* load request */ - (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, - DRV_MSG_CODE_LOAD_REQ_WITH_LFA); + (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param); /* if mcp fails to respond we must abort */ if (!(*load_code)) { @@ -2238,7 +2254,7 @@ int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code) /* abort nic load if version mismatch */ if (my_fw != loaded_fw) { - BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. aborting\n", + BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n", loaded_fw, my_fw); return -EBUSY; } @@ -2316,10 +2332,10 @@ static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code) static void bnx2x_bz_fp(struct bnx2x *bp, int index) { struct bnx2x_fastpath *fp = &bp->fp[index]; - int cos; struct napi_struct orig_napi = fp->napi; struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info; + /* bzero bnx2x_fastpath contents */ if (fp->tpa_info) memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 * @@ -2345,8 +2361,7 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index) fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos * BNX2X_NUM_ETH_QUEUES(bp) + index]; - /* - * set the tpa flag for each queue. The tpa flag determines the queue + /* set the tpa flag for each queue. The tpa flag determines the queue * minimal size so it must be set prior to queue memory allocation */ fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG || @@ -2429,7 +2444,6 @@ int bnx2x_load_cnic(struct bnx2x *bp) if (bp->state == BNX2X_STATE_OPEN) bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD); - DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n"); return 0; @@ -2472,6 +2486,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; + /* zero the structure w/o any lock, before SP handler is initialized */ memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link)); __set_bit(BNX2X_LINK_REPORT_LINK_DOWN, &bp->last_reported_link.link_report_flags); @@ -2536,8 +2551,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) } /* configure multi cos mappings in kernel. - * this configuration may be overriden by a multi class queue discipline - * or by a dcbx negotiation result. + * this configuration may be overridden by a multi class queue + * discipline or by a dcbx negotiation result. */ bnx2x_setup_tc(bp->dev, bp->max_cos); @@ -2696,7 +2711,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) /* Start the Tx */ switch (load_mode) { case LOAD_NORMAL: - /* Tx queue should be only reenabled */ + /* Tx queue should be only re-enabled */ netif_tx_wake_all_queues(bp->dev); break; @@ -2841,7 +2856,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) } /* Nothing to do during unload if previous bnx2x_nic_load() - * have not completed succesfully - all resourses are released. + * have not completed successfully - all resources are released. * * we can get here only after unsuccessful ndo_* callback, during which * dev->IFF_UP flag is still on. @@ -2856,6 +2871,9 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; smp_mb(); + /* indicate to VFs that the PF is going down */ + bnx2x_iov_channel_down(bp); + if (CNIC_LOADED(bp)) bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD); @@ -2890,10 +2908,9 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) /* Send the UNLOAD_REQUEST to the MCP */ bnx2x_send_unload_req(bp, unload_mode); - /* - * Prevent transactions to host from the functions on the + /* Prevent transactions to host from the functions on the * engine that doesn't reset global blocks in case of global - * attention once gloabl blocks are reset and gates are opened + * attention once global blocks are reset and gates are opened * (the engine which leader will perform the recovery * last). */ @@ -2914,7 +2931,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) } /* - * At this stage no more interrupts will arrive so we may safly clean + * At this stage no more interrupts will arrive so we may safely clean * the queueable objects here in case they failed to get cleaned so far. */ if (IS_PF(bp)) @@ -2955,7 +2972,6 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) bnx2x_set_reset_global(bp); } - /* The last driver must disable a "close the gate" if there is no * parity attention or "process kill" pending. */ @@ -3040,6 +3056,8 @@ int bnx2x_poll(struct napi_struct *napi, int budget) return 0; } #endif + if (!bnx2x_fp_lock_napi(fp)) + return work_done; for_each_cos_in_tx_queue(fp, cos) if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos])) @@ -3049,12 +3067,15 @@ int bnx2x_poll(struct napi_struct *napi, int budget) work_done += bnx2x_rx_int(fp, budget - work_done); /* must not complete if we consumed full budget */ - if (work_done >= budget) + if (work_done >= budget) { + bnx2x_fp_unlock_napi(fp); break; + } } /* Fall out from the NAPI loop if needed */ - if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { + if (!bnx2x_fp_unlock_napi(fp) && + !(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { /* No need to update SB for FCoE L2 ring as long as * it's connected to the default SB and the SB @@ -3096,6 +3117,32 @@ int bnx2x_poll(struct napi_struct *napi, int budget) return work_done; } +#ifdef CONFIG_NET_LL_RX_POLL +/* must be called with local_bh_disable()d */ +int bnx2x_low_latency_recv(struct napi_struct *napi) +{ + struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath, + napi); + struct bnx2x *bp = fp->bp; + int found = 0; + + if ((bp->state == BNX2X_STATE_CLOSED) || + (bp->state == BNX2X_STATE_ERROR) || + (bp->flags & (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG))) + return LL_FLUSH_FAILED; + + if (!bnx2x_fp_lock_poll(fp)) + return LL_FLUSH_BUSY; + + if (bnx2x_has_rx_work(fp)) + found = bnx2x_rx_int(fp, 4); + + bnx2x_fp_unlock_poll(fp); + + return found; +} +#endif + /* we split the first BD into headers and data BDs * to ease the pain of our fellow microcode engineers * we use one mapping for both BDs @@ -3496,9 +3543,12 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb, /* outer IP header info */ if (xmit_type & XMIT_CSUM_V4) { struct iphdr *iph = ip_hdr(skb); + u32 csum = (__force u32)(~iph->check) - + (__force u32)iph->tot_len - + (__force u32)iph->frag_off; + pbd2->fw_ip_csum_wo_len_flags_frag = - bswab16(csum_fold((~iph->check) - - iph->tot_len - iph->frag_off)); + bswab16(csum_fold((__force __wsum)csum)); } else { pbd2->fw_ip_hdr_to_payload_w = hlen_w - ((sizeof(struct ipv6hdr)) >> 1); @@ -3586,7 +3636,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n", txq_index, fp_index, txdata_index); */ - /* enable this debug print to view the tranmission details + /* enable this debug print to view the transmission details DP(NETIF_MSG_TX_QUEUED, "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n", txdata->cid, fp_index, txdata_index, txdata, fp); */ @@ -3968,7 +4018,7 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc) /* setup tc must be called under rtnl lock */ ASSERT_RTNL(); - /* no traffic classes requested. aborting */ + /* no traffic classes requested. Aborting */ if (!num_tc) { netdev_reset_tc(dev); return 0; @@ -3976,7 +4026,7 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc) /* requested to support too many traffic classes */ if (num_tc > bp->max_cos) { - BNX2X_ERR("support for too many traffic classes requested: %d. max supported is %d\n", + BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n", num_tc, bp->max_cos); return -EINVAL; } @@ -3995,8 +4045,7 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc) prio, bp->prio_to_cos[prio]); } - - /* Use this configuration to diffrentiate tc0 from other COSes + /* Use this configuration to differentiate tc0 from other COSes This can be used for ets or pfc, and save the effort of setting up a multio class queue disc or negotiating DCBX with a switch netdev_set_prio_tc_map(dev, 0, 0); @@ -4288,10 +4337,11 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index) &bnx2x_fp(bp, index, rx_desc_mapping), sizeof(struct eth_rx_bd) * NUM_RX_BD); - BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring), - &bnx2x_fp(bp, index, rx_comp_mapping), - sizeof(struct eth_fast_path_rx_cqe) * - NUM_RCQ_BD); + /* Seed all CQEs by 1s */ + BNX2X_PCI_FALLOC(bnx2x_fp(bp, index, rx_comp_ring), + &bnx2x_fp(bp, index, rx_comp_mapping), + sizeof(struct eth_fast_path_rx_cqe) * + NUM_RCQ_BD); /* SGE ring */ BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring), @@ -4472,7 +4522,6 @@ int bnx2x_alloc_mem_bp(struct bnx2x *bp) alloc_err: bnx2x_free_mem_bp(bp); return -ENOMEM; - } int bnx2x_reload_if_running(struct net_device *dev) @@ -4514,7 +4563,6 @@ int bnx2x_get_cur_phy_idx(struct bnx2x *bp) } return sel_phy_idx; - } int bnx2x_get_link_cfg_idx(struct bnx2x *bp) { @@ -4602,6 +4650,7 @@ int bnx2x_set_features(struct net_device *dev, netdev_features_t features) { struct bnx2x *bp = netdev_priv(dev); u32 flags = bp->flags; + u32 changes; bool bnx2x_reload = false; if (features & NETIF_F_LRO) @@ -4626,10 +4675,16 @@ int bnx2x_set_features(struct net_device *dev, netdev_features_t features) } } - if (flags ^ bp->flags) { - bp->flags = flags; + changes = flags ^ bp->flags; + + /* if GRO is changed while LRO is enabled, don't force a reload */ + if ((changes & GRO_ENABLE_FLAG) && (flags & TPA_ENABLE_FLAG)) + changes &= ~GRO_ENABLE_FLAG; + + if (changes) bnx2x_reload = true; - } + + bp->flags = flags; if (bnx2x_reload) { if (bp->recovery_state == BNX2X_RECOVERY_DONE) @@ -4724,7 +4779,6 @@ int bnx2x_resume(struct pci_dev *pdev) return rc; } - void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt, u32 cid) { @@ -4742,7 +4796,6 @@ static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port, u8 fw_sb_id, u8 sb_index, u8 ticks) { - u32 addr = BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index); REG_WR8(bp, addr, ticks); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index 151675d66b0d..c07a6d054cfe 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -22,7 +22,6 @@ #include <linux/netdevice.h> #include <linux/etherdevice.h> - #include "bnx2x.h" #include "bnx2x_sriov.h" @@ -50,13 +49,25 @@ extern int int_mode; } \ } while (0) -#define BNX2X_PCI_ALLOC(x, y, size) \ -do { \ - x = dma_alloc_coherent(&bp->pdev->dev, size, y, \ - GFP_KERNEL | __GFP_ZERO); \ - if (x == NULL) \ - goto alloc_mem_err; \ -} while (0) +#define BNX2X_PCI_ALLOC(x, y, size) \ + do { \ + x = dma_alloc_coherent(&bp->pdev->dev, size, y, \ + GFP_KERNEL | __GFP_ZERO); \ + if (x == NULL) \ + goto alloc_mem_err; \ + DP(NETIF_MSG_HW, "BNX2X_PCI_ALLOC: Physical %Lx Virtual %p\n", \ + (unsigned long long)(*y), x); \ + } while (0) + +#define BNX2X_PCI_FALLOC(x, y, size) \ + do { \ + x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ + if (x == NULL) \ + goto alloc_mem_err; \ + memset((void *)x, 0xFFFFFFFF, size); \ + DP(NETIF_MSG_HW, "BNX2X_PCI_FALLOC: Physical %Lx Virtual %p\n",\ + (unsigned long long)(*y), x); \ + } while (0) #define BNX2X_ALLOC(x, size) \ do { \ @@ -494,9 +505,6 @@ void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value); /* Error handling */ void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl); -/* validate currect fw is loaded */ -bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err); - /* dev_close main block */ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link); @@ -607,6 +615,13 @@ int bnx2x_enable_msi(struct bnx2x *bp); int bnx2x_poll(struct napi_struct *napi, int budget); /** + * bnx2x_low_latency_recv - LL callback + * + * @napi: napi structure + */ +int bnx2x_low_latency_recv(struct napi_struct *napi); + +/** * bnx2x_alloc_mem_bp - allocate memories outsize main driver structure * * @bp: driver handle @@ -800,16 +815,18 @@ static inline bool bnx2x_has_tx_work(struct bnx2x_fastpath *fp) return false; } +#define BNX2X_IS_CQE_COMPLETED(cqe_fp) (cqe_fp->marker == 0x0) +#define BNX2X_SEED_CQE(cqe_fp) (cqe_fp->marker = 0xFFFFFFFF) static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp) { - u16 rx_cons_sb; + u16 cons; + union eth_rx_cqe *cqe; + struct eth_fast_path_rx_cqe *cqe_fp; - /* Tell compiler that status block fields can change */ - barrier(); - rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb); - if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT) - rx_cons_sb++; - return (fp->rx_comp_cons != rx_cons_sb); + cons = RCQ_BD(fp->rx_comp_cons); + cqe = &fp->rx_comp_ring[cons]; + cqe_fp = &cqe->fast_path_cqe; + return BNX2X_IS_CQE_COMPLETED(cqe_fp); } /** @@ -848,9 +865,11 @@ static inline void bnx2x_add_all_napi_cnic(struct bnx2x *bp) int i; /* Add NAPI objects */ - for_each_rx_queue_cnic(bp, i) + for_each_rx_queue_cnic(bp, i) { netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), bnx2x_poll, NAPI_POLL_WEIGHT); + napi_hash_add(&bnx2x_fp(bp, i, napi)); + } } static inline void bnx2x_add_all_napi(struct bnx2x *bp) @@ -858,25 +877,31 @@ static inline void bnx2x_add_all_napi(struct bnx2x *bp) int i; /* Add NAPI objects */ - for_each_eth_queue(bp, i) + for_each_eth_queue(bp, i) { netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), bnx2x_poll, NAPI_POLL_WEIGHT); + napi_hash_add(&bnx2x_fp(bp, i, napi)); + } } static inline void bnx2x_del_all_napi_cnic(struct bnx2x *bp) { int i; - for_each_rx_queue_cnic(bp, i) + for_each_rx_queue_cnic(bp, i) { + napi_hash_del(&bnx2x_fp(bp, i, napi)); netif_napi_del(&bnx2x_fp(bp, i, napi)); + } } static inline void bnx2x_del_all_napi(struct bnx2x *bp) { int i; - for_each_eth_queue(bp, i) + for_each_eth_queue(bp, i) { + napi_hash_del(&bnx2x_fp(bp, i, napi)); netif_napi_del(&bnx2x_fp(bp, i, napi)); + } } int bnx2x_set_int_mode(struct bnx2x *bp); @@ -1171,7 +1196,6 @@ static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx) static inline u8 bnx2x_cnic_fw_sb_id(struct bnx2x *bp) { - /* the 'first' id is allocated for the cnic */ return bp->base_fw_ndsb; } @@ -1181,7 +1205,6 @@ static inline u8 bnx2x_cnic_igu_sb_id(struct bnx2x *bp) return bp->igu_base_sb; } - static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp) { struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); @@ -1334,8 +1357,8 @@ static inline bool bnx2x_mtu_allows_gro(int mtu) int fpp = SGE_PAGE_SIZE / (mtu - ETH_MAX_TPA_HEADER_SIZE); /* - * 1. number of frags should not grow above MAX_SKB_FRAGS - * 2. frag must fit the page + * 1. Number of frags should not grow above MAX_SKB_FRAGS + * 2. Frag must fit the page */ return mtu <= SGE_PAGE_SIZE && (U_ETH_SGL_SIZE * fpp) <= MAX_SKB_FRAGS; } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c index 4b077a7f16af..0c94df47e0e8 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c @@ -253,7 +253,6 @@ static void bnx2x_dcbx_get_ets_feature(struct bnx2x *bp, memset(&pg_help_data, 0, sizeof(struct pg_help_data)); - if (GET_FLAGS(error, DCBX_LOCAL_ETS_ERROR)) DP(BNX2X_MSG_DCB, "DCBX_LOCAL_ETS_ERROR\n"); @@ -298,7 +297,6 @@ static void bnx2x_dcbx_get_ets_feature(struct bnx2x *bp, static void bnx2x_dcbx_get_pfc_feature(struct bnx2x *bp, struct dcbx_pfc_feature *pfc, u32 error) { - if (GET_FLAGS(error, DCBX_LOCAL_PFC_ERROR)) DP(BNX2X_MSG_DCB, "DCBX_LOCAL_PFC_ERROR\n"); @@ -367,7 +365,6 @@ static int bnx2x_dcbx_read_mib(struct bnx2x *bp, struct lldp_remote_mib *remote_mib ; struct lldp_local_mib *local_mib; - switch (read_mib_type) { case DCBX_READ_LOCAL_MIB: mib_size = sizeof(struct lldp_local_mib); @@ -629,7 +626,6 @@ static int bnx2x_dcbx_read_shmem_neg_results(struct bnx2x *bp) return 0; } - #ifdef BCM_DCBNL static inline u8 bnx2x_dcbx_dcbnl_app_up(struct dcbx_app_priority_entry *ent) @@ -691,7 +687,7 @@ static inline void bnx2x_dcbx_update_tc_mapping(struct bnx2x *bp) } /* setup tc must be called under rtnl lock, but we can't take it here - * as we are handling an attetntion on a work queue which must be + * as we are handling an attention on a work queue which must be * flushed at some rtnl-locked contexts (e.g. if down) */ if (!test_and_set_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state)) @@ -711,7 +707,7 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) */ bnx2x_dcbnl_update_applist(bp, true); - /* Read rmeote mib if dcbx is in the FW */ + /* Read remote mib if dcbx is in the FW */ if (bnx2x_dcbx_read_shmem_remote_mib(bp)) return; #endif @@ -742,7 +738,7 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) bnx2x_dcbx_update_tc_mapping(bp); /* - * allow other funtions to update their netdevices + * allow other functions to update their netdevices * accordingly */ if (IS_MF(bp)) @@ -864,7 +860,7 @@ static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp, i, DCBX_PRI_PG_GET(af->ets.pri_pg_tbl, i)); } - /*For IEEE admin_recommendation_bw_precentage + /*For IEEE admin_recommendation_bw_percentage *For IEEE admin_recommendation_ets_pg */ af->pfc.pri_en_bitmap = (u8)dp->admin_pfc_bitmap; for (i = 0; i < DCBX_CONFIG_MAX_APP_PROTOCOL; i++) { @@ -896,13 +892,11 @@ static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp, } af->app.default_pri = (u8)dp->admin_default_priority; - } /* Write the data. */ bnx2x_write_data(bp, (u32 *)&admin_mib, offset, sizeof(struct lldp_admin_mib)); - } void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled) @@ -1076,7 +1070,7 @@ static void bnx2x_dcbx_get_num_pg_traf_type(struct bnx2x *bp, bool pg_found = false; u32 i, traf_type, add_traf_type, add_pg; u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority; - struct pg_entry_help_data *data = help_data->data; /*shotcut*/ + struct pg_entry_help_data *data = help_data->data; /*shortcut*/ /* Set to invalid */ for (i = 0; i < LLFC_DRIVER_TRAFFIC_TYPE_MAX; i++) @@ -1172,7 +1166,8 @@ static void bnx2x_dcbx_separate_pauseable_from_non(struct bnx2x *bp, DCBX_PG_BW_GET(ets->pg_bw_tbl, pg_entry)); else /* If we join a group and one is strict - * than the bw rulls */ + * than the bw rules + */ cos_data->data[entry].strict = BNX2X_DCBX_STRICT_COS_HIGHEST; } @@ -1181,7 +1176,6 @@ static void bnx2x_dcbx_separate_pauseable_from_non(struct bnx2x *bp, BNX2X_ERR("dcbx error: Both groups must have priorities\n"); } - #ifndef POWER_OF_2 #define POWER_OF_2(x) ((0 != x) && (0 == (x & (x-1)))) #endif @@ -1284,7 +1278,7 @@ static void bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params(struct bnx2x *bp, } else { /* If there are only pauseable priorities or * only non-pauseable,* the lower priorities go - * to the first queue and the higherpriorities go + * to the first queue and the higher priorities go * to the second queue. */ cos_data->data[0].pausable = @@ -1484,7 +1478,7 @@ static void bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params( * queue and one priority goes to the second queue. * * We will join this two cases: - * if one is BW limited it will go to the secoend queue + * if one is BW limited it will go to the second queue * otherwise the last priority will get it */ @@ -1504,7 +1498,8 @@ static void bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params( false == b_found_strict) /* last entry will be handled separately * If no priority is strict than last - * enty goes to last queue.*/ + * entry goes to last queue. + */ entry = 1; cos_data->data[entry].pri_join_mask |= pri_tested; @@ -1516,7 +1511,8 @@ static void bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params( b_found_strict = true; cos_data->data[1].pri_join_mask |= pri_tested; /* If we join a group and one is strict - * than the bw rulls */ + * than the bw rules + */ cos_data->data[1].strict = BNX2X_DCBX_STRICT_COS_HIGHEST; } @@ -1524,7 +1520,6 @@ static void bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params( } } - static void bnx2x_dcbx_2cos_limit_cee_fill_cos_params(struct bnx2x *bp, struct pg_help_data *help_data, struct dcbx_ets_feature *ets, @@ -1533,7 +1528,6 @@ static void bnx2x_dcbx_2cos_limit_cee_fill_cos_params(struct bnx2x *bp, u32 pri_join_mask, u8 num_of_dif_pri) { - /* default E2 settings */ cos_data->num_of_cos = DCBX_COS_MAX_NUM_E2; @@ -1629,7 +1623,6 @@ static u8 bnx2x_dcbx_cee_fill_strict_pri(struct bnx2x *bp, u8 num_spread_of_entries, u8 strict_app_pris) { - if (bnx2x_dcbx_spread_strict_pri(bp, cos_data, entry, num_spread_of_entries, strict_app_pris)) { @@ -1848,7 +1841,7 @@ static void bnx2x_dcbx_fw_struct(struct bnx2x *bp, void bnx2x_dcbx_pmf_update(struct bnx2x *bp) { - /* if we need to syncronize DCBX result from prev PMF + /* if we need to synchronize DCBX result from prev PMF * read it from shmem and update bp and netdev accordingly */ if (SHMEM2_HAS(bp, drv_flags) && @@ -1876,7 +1869,6 @@ void bnx2x_dcbx_pmf_update(struct bnx2x *bp) * dcbx negotiation. */ bnx2x_dcbx_update_tc_mapping(bp); - } } @@ -1943,14 +1935,14 @@ static void bnx2x_dcbnl_set_pg_tccfg_tx(struct net_device *netdev, int prio, return; /** - * bw_pct ingnored - band-width percentage devision between user + * bw_pct ignored - band-width percentage devision between user * priorities within the same group is not * standard and hence not supported * - * prio_type igonred - priority levels within the same group are not + * prio_type ignored - priority levels within the same group are not * standard and hence are not supported. According * to the standard pgid 15 is dedicated to strict - * prioirty traffic (on the port level). + * priority traffic (on the port level). * * up_map ignored */ @@ -1995,14 +1987,14 @@ static void bnx2x_dcbnl_get_pg_tccfg_tx(struct net_device *netdev, int prio, DP(BNX2X_MSG_DCB, "prio = %d\n", prio); /** - * bw_pct ingnored - band-width percentage devision between user + * bw_pct ignored - band-width percentage devision between user * priorities within the same group is not * standard and hence not supported * - * prio_type igonred - priority levels within the same group are not + * prio_type ignored - priority levels within the same group are not * standard and hence are not supported. According * to the standard pgid 15 is dedicated to strict - * prioirty traffic (on the port level). + * priority traffic (on the port level). * * up_map ignored */ @@ -2389,7 +2381,7 @@ static u8 bnx2x_dcbnl_get_featcfg(struct net_device *netdev, int featid, *flags |= DCB_FEATCFG_ERROR; break; default: - BNX2X_ERR("Non valid featrue-ID\n"); + BNX2X_ERR("Non valid feature-ID\n"); rval = 1; break; } @@ -2430,7 +2422,7 @@ static u8 bnx2x_dcbnl_set_featcfg(struct net_device *netdev, int featid, flags & DCB_FEATCFG_WILLING ? 1 : 0; break; default: - BNX2X_ERR("Non valid featrue-ID\n"); + BNX2X_ERR("Non valid feature-ID\n"); rval = 1; break; } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h index d153f44cf8f9..125bd1b6586f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h @@ -134,8 +134,6 @@ enum { #define PFC_BRB1_REG_HIGH_LLFC_LOW_THRESHOLD 130 #define PFC_BRB1_REG_HIGH_LLFC_HIGH_THRESHOLD 170 - - struct cos_entry_help_data { u32 pri_join_mask; u32 cos_bw; @@ -170,7 +168,6 @@ struct cos_help_data { (!(IS_DCBX_PFC_PRI_ONLY_NON_PAUSE((bp), (pg_pri)) || \ IS_DCBX_PFC_PRI_ONLY_PAUSE((bp), (pg_pri)))) - struct pg_entry_help_data { u8 num_of_dif_pri; u8 pg; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h index bff5e33eaa14..12eb4baee9f6 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h @@ -13,12 +13,6 @@ * consent. */ - -/* This struct holds a signature to ensure the dump returned from the driver - * match the meta data file inserted to grc_dump.tcl - * The signature is time stamp, diag version and grc_dump version - */ - #ifndef BNX2X_DUMP_H #define BNX2X_DUMP_H @@ -28,7 +22,6 @@ #define DRV_DUMP_USTORM_WAITP_ADDRESS 0x338a80 #define DRV_DUMP_CSTORM_WAITP_ADDRESS 0x238a80 - /* Possible Chips */ #define DUMP_CHIP_E1 1 #define DUMP_CHIP_E1H 2 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index ce1a91618677..c5f225101684 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -320,7 +320,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) speed = ethtool_cmd_speed(cmd); - /* If recieved a request for an unknown duplex, assume full*/ + /* If received a request for an unknown duplex, assume full*/ if (cmd->duplex == DUPLEX_UNKNOWN) cmd->duplex = DUPLEX_FULL; @@ -733,7 +733,6 @@ static bool bnx2x_is_reg_in_chip(struct bnx2x *bp, return false; } - static bool bnx2x_is_wreg_in_chip(struct bnx2x *bp, const struct wreg_addr *wreg_info) { @@ -850,7 +849,7 @@ static int __bnx2x_get_preset_regs(struct bnx2x *bp, u32 *p, u32 preset) /* Paged registers are supported in E2 & E3 only */ if (CHIP_IS_E2(bp) || CHIP_IS_E3(bp)) { - /* Read "paged" registes */ + /* Read "paged" registers */ bnx2x_read_pages_regs(bp, p, preset); } @@ -960,6 +959,9 @@ static int bnx2x_set_dump(struct net_device *dev, struct ethtool_dump *val) struct bnx2x *bp = netdev_priv(dev); /* Use the ethtool_dump "flag" field as the dump preset index */ + if (val->flag < 1 || val->flag > DUMP_MAX_PRESETS) + return -EINVAL; + bp->dump_preset_idx = val->flag; return 0; } @@ -969,12 +971,12 @@ static int bnx2x_get_dump_flag(struct net_device *dev, { struct bnx2x *bp = netdev_priv(dev); + dump->version = BNX2X_DUMP_VERSION; + dump->flag = bp->dump_preset_idx; /* Calculate the requested preset idx length */ dump->len = bnx2x_get_preset_regs_len(dev, bp->dump_preset_idx); DP(BNX2X_MSG_ETHTOOL, "Get dump preset %d length=%d\n", bp->dump_preset_idx, dump->len); - - dump->flag = ETHTOOL_GET_DUMP_DATA; return 0; } @@ -986,8 +988,6 @@ static int bnx2x_get_dump_data(struct net_device *dev, struct bnx2x *bp = netdev_priv(dev); struct dump_header dump_hdr = {0}; - memset(p, 0, dump->len); - /* Disable parity attentions as long as following dump may * cause false alarms by reading never written registers. We * will re-enable parity attentions right after the dump. @@ -1155,8 +1155,8 @@ static int bnx2x_get_eeprom_len(struct net_device *dev) return bp->common.flash_size; } -/* Per pf misc lock must be aquired before the per port mcp lock. Otherwise, had - * we done things the other way around, if two pfs from the same port would +/* Per pf misc lock must be acquired before the per port mcp lock. Otherwise, + * had we done things the other way around, if two pfs from the same port would * attempt to access nvram at the same time, we could run into a scenario such * as: * pf A takes the port lock. @@ -1381,12 +1381,29 @@ static int bnx2x_nvram_read32(struct bnx2x *bp, u32 offset, u32 *buf, return rc; } +static bool bnx2x_is_nvm_accessible(struct bnx2x *bp) +{ + int rc = 1; + u16 pm = 0; + struct net_device *dev = pci_get_drvdata(bp->pdev); + + if (bp->pm_cap) + rc = pci_read_config_word(bp->pdev, + bp->pm_cap + PCI_PM_CTRL, &pm); + + if ((rc && !netif_running(dev)) || + (!rc && ((pm & PCI_PM_CTRL_STATE_MASK) != (__force u16)PCI_D0))) + return false; + + return true; +} + static int bnx2x_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *eebuf) { struct bnx2x *bp = netdev_priv(dev); - if (!netif_running(dev)) { + if (!bnx2x_is_nvm_accessible(bp)) { DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "cannot access eeprom when the interface is down\n"); return -EAGAIN; @@ -1411,7 +1428,7 @@ static int bnx2x_get_module_eeprom(struct net_device *dev, u8 *user_data = data; unsigned int start_addr = ee->offset, xfer_size = 0; - if (!netif_running(dev)) { + if (!bnx2x_is_nvm_accessible(bp)) { DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "cannot access eeprom when the interface is down\n"); return -EAGAIN; @@ -1474,7 +1491,7 @@ static int bnx2x_get_module_info(struct net_device *dev, int phy_idx, rc; u8 sff8472_comp, diag_type; - if (!netif_running(dev)) { + if (!bnx2x_is_nvm_accessible(bp)) { DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "cannot access eeprom when the interface is down\n"); return -EAGAIN; @@ -1594,8 +1611,10 @@ static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf, */ val = be32_to_cpu(val_be); - val &= ~le32_to_cpu(0xff << BYTE_OFFSET(offset)); - val |= le32_to_cpu(*data_buf << BYTE_OFFSET(offset)); + val &= ~le32_to_cpu((__force __le32) + (0xff << BYTE_OFFSET(offset))); + val |= le32_to_cpu((__force __le32) + (*data_buf << BYTE_OFFSET(offset))); rc = bnx2x_nvram_write_dword(bp, align_offset, val, cmd_flags); @@ -1676,7 +1695,8 @@ static int bnx2x_set_eeprom(struct net_device *dev, int port = BP_PORT(bp); int rc = 0; u32 ext_phy_config; - if (!netif_running(dev)) { + + if (!bnx2x_is_nvm_accessible(bp)) { DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "cannot access eeprom when the interface is down\n"); return -EAGAIN; @@ -1921,6 +1941,19 @@ static const char bnx2x_tests_str_arr[BNX2X_NUM_TESTS_SF][ETH_GSTRING_LEN] = { "link_test (online) " }; +enum { + BNX2X_PRI_FLAG_ISCSI, + BNX2X_PRI_FLAG_FCOE, + BNX2X_PRI_FLAG_STORAGE, + BNX2X_PRI_FLAG_LEN, +}; + +static const char bnx2x_private_arr[BNX2X_PRI_FLAG_LEN][ETH_GSTRING_LEN] = { + "iSCSI offload support", + "FCoE offload support", + "Storage only interface" +}; + static u32 bnx2x_eee_to_adv(u32 eee_adv) { u32 modes = 0; @@ -2041,7 +2074,7 @@ static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata) EEE_MODE_OVERRIDE_NVRAM | EEE_MODE_OUTPUT_TIME; - /* Restart link to propogate changes */ + /* Restart link to propagate changes */ if (netif_running(dev)) { bnx2x_stats_handle(bp, STATS_EVENT_STOP); bnx2x_force_link_reset(bp); @@ -2160,7 +2193,7 @@ static int bnx2x_test_registers(struct bnx2x *bp) { BNX2X_CHIP_MASK_ALL, 0xffffffff, 0, 0x00000000 } }; - if (!netif_running(bp->dev)) { + if (!bnx2x_is_nvm_accessible(bp)) { DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "cannot access eeprom when the interface is down\n"); return rc; @@ -2264,7 +2297,7 @@ static int bnx2x_test_memory(struct bnx2x *bp) { NULL, 0xffffffff, {0, 0, 0, 0} } }; - if (!netif_running(bp->dev)) { + if (!bnx2x_is_nvm_accessible(bp)) { DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "cannot access eeprom when the interface is down\n"); return rc; @@ -2978,32 +3011,47 @@ static int bnx2x_num_stat_queues(struct bnx2x *bp) static int bnx2x_get_sset_count(struct net_device *dev, int stringset) { struct bnx2x *bp = netdev_priv(dev); - int i, num_stats; + int i, num_strings = 0; switch (stringset) { case ETH_SS_STATS: if (is_multi(bp)) { - num_stats = bnx2x_num_stat_queues(bp) * - BNX2X_NUM_Q_STATS; + num_strings = bnx2x_num_stat_queues(bp) * + BNX2X_NUM_Q_STATS; } else - num_stats = 0; + num_strings = 0; if (IS_MF_MODE_STAT(bp)) { for (i = 0; i < BNX2X_NUM_STATS; i++) if (IS_FUNC_STAT(i)) - num_stats++; + num_strings++; } else - num_stats += BNX2X_NUM_STATS; + num_strings += BNX2X_NUM_STATS; - return num_stats; + return num_strings; case ETH_SS_TEST: return BNX2X_NUM_TESTS(bp); + case ETH_SS_PRIV_FLAGS: + return BNX2X_PRI_FLAG_LEN; + default: return -EINVAL; } } +static u32 bnx2x_get_private_flags(struct net_device *dev) +{ + struct bnx2x *bp = netdev_priv(dev); + u32 flags = 0; + + flags |= (!(bp->flags & NO_ISCSI_FLAG) ? 1 : 0) << BNX2X_PRI_FLAG_ISCSI; + flags |= (!(bp->flags & NO_FCOE_FLAG) ? 1 : 0) << BNX2X_PRI_FLAG_FCOE; + flags |= (!!IS_MF_STORAGE_ONLY(bp)) << BNX2X_PRI_FLAG_STORAGE; + + return flags; +} + static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) { struct bnx2x *bp = netdev_priv(dev); @@ -3026,7 +3074,6 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) } } - for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) { if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i)) continue; @@ -3045,6 +3092,12 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) start = 4; memcpy(buf, bnx2x_tests_str_arr + start, ETH_GSTRING_LEN * BNX2X_NUM_TESTS(bp)); + break; + + case ETH_SS_PRIV_FLAGS: + memcpy(buf, bnx2x_private_arr, + ETH_GSTRING_LEN * BNX2X_PRI_FLAG_LEN); + break; } } @@ -3106,17 +3159,12 @@ static int bnx2x_set_phys_id(struct net_device *dev, { struct bnx2x *bp = netdev_priv(dev); - if (!netif_running(dev)) { + if (!bnx2x_is_nvm_accessible(bp)) { DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "cannot access eeprom when the interface is down\n"); return -EAGAIN; } - if (!bp->port.pmf) { - DP(BNX2X_MSG_ETHTOOL, "Interface is not pmf\n"); - return -EOPNOTSUPP; - } - switch (state) { case ETHTOOL_ID_ACTIVE: return 1; /* cycle on/off once per second */ @@ -3148,7 +3196,6 @@ static int bnx2x_set_phys_id(struct net_device *dev, static int bnx2x_get_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info) { - switch (info->flow_type) { case TCP_V4_FLOW: case TCP_V6_FLOW: @@ -3384,7 +3431,6 @@ static int bnx2x_set_channels(struct net_device *dev, { struct bnx2x *bp = netdev_priv(dev); - DP(BNX2X_MSG_ETHTOOL, "set-channels command parameters: rx = %d, tx = %d, other = %d, combined = %d\n", channels->rx_count, channels->tx_count, channels->other_count, @@ -3445,6 +3491,7 @@ static const struct ethtool_ops bnx2x_ethtool_ops = { .set_pauseparam = bnx2x_set_pauseparam, .self_test = bnx2x_self_test, .get_sset_count = bnx2x_get_sset_count, + .get_priv_flags = bnx2x_get_private_flags, .get_strings = bnx2x_get_strings, .set_phys_id = bnx2x_set_phys_id, .get_ethtool_stats = bnx2x_get_ethtool_stats, diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h index 12f00a40cdf0..5018e52ae2ad 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h @@ -1323,6 +1323,8 @@ struct drv_func_mb { #define DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET 0x00000002 #define DRV_MSG_CODE_LOAD_REQ_WITH_LFA 0x0000100a + #define DRV_MSG_CODE_LOAD_REQ_FORCE_LFA 0x00002000 + u32 fw_mb_header; #define FW_MSG_CODE_MASK 0xffff0000 #define FW_MSG_CODE_DRV_LOAD_COMMON 0x10100000 @@ -3816,7 +3818,8 @@ struct eth_fast_path_rx_cqe { __le16 len_on_bd; struct parsing_flags pars_flags; union eth_sgl_or_raw_data sgl_or_raw_data; - __le32 reserved1[8]; + __le32 reserved1[7]; + u32 marker; }; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index b4c9dea93a53..e5da07858a2f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -93,7 +93,6 @@ MODULE_FIRMWARE(FW_FILE_NAME_E1); MODULE_FIRMWARE(FW_FILE_NAME_E1H); MODULE_FIRMWARE(FW_FILE_NAME_E2); - int num_queues; module_param(num_queues, int, 0); MODULE_PARM_DESC(num_queues, @@ -103,8 +102,6 @@ static int disable_tpa; module_param(disable_tpa, int, 0); MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature"); -#define INT_MODE_INTx 1 -#define INT_MODE_MSI 2 int int_mode; module_param(int_mode, int, 0); MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X " @@ -122,8 +119,6 @@ static int debug; module_param(debug, int, 0); MODULE_PARM_DESC(debug, " Default debug msglevel"); - - struct workqueue_struct *bnx2x_wq; struct bnx2x_mac_vals { @@ -376,9 +371,11 @@ static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr) #define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]" #define DMAE_DP_DST_NONE "dst_addr [none]" -void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, int msglvl) +static void bnx2x_dp_dmae(struct bnx2x *bp, + struct dmae_command *dmae, int msglvl) { u32 src_type = dmae->opcode & DMAE_COMMAND_SRC; + int i; switch (dmae->opcode & DMAE_COMMAND_DST) { case DMAE_CMD_DST_PCI: @@ -434,6 +431,10 @@ void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, int msglvl) dmae->comp_val); break; } + + for (i = 0; i < (sizeof(struct dmae_command)/4); i++) + DP(msglvl, "DMAE RAW [%02d]: 0x%08x\n", + i, *(((u32 *)dmae) + i)); } /* copy command into DMAE command memory and set DMAE command go */ @@ -508,8 +509,9 @@ int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae) int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000; int rc = 0; - /* - * Lock the dmae channel. Disable BHs to prevent a dead-lock + bnx2x_dp_dmae(bp, dmae, BNX2X_MSG_DMAE); + + /* Lock the dmae channel. Disable BHs to prevent a dead-lock * as long as this code is called both from syscall context and * from ndo_set_rx_mode() flow that may be called from BH. */ @@ -548,6 +550,7 @@ unlock: void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, u32 len32) { + int rc; struct dmae_command dmae; if (!bp->dmae_ready) { @@ -571,11 +574,16 @@ void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, dmae.len = len32; /* issue the command and wait for completion */ - bnx2x_issue_dmae_with_comp(bp, &dmae); + rc = bnx2x_issue_dmae_with_comp(bp, &dmae); + if (rc) { + BNX2X_ERR("DMAE returned failure %d\n", rc); + bnx2x_panic(); + } } void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32) { + int rc; struct dmae_command dmae; if (!bp->dmae_ready) { @@ -603,7 +611,11 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32) dmae.len = len32; /* issue the command and wait for completion */ - bnx2x_issue_dmae_with_comp(bp, &dmae); + rc = bnx2x_issue_dmae_with_comp(bp, &dmae); + if (rc) { + BNX2X_ERR("DMAE returned failure %d\n", rc); + bnx2x_panic(); + } } static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr, @@ -811,8 +823,8 @@ static void bnx2x_hc_int_disable(struct bnx2x *bp) u32 val = REG_RD(bp, addr); /* in E1 we must use only PCI configuration space to disable - * MSI/MSIX capablility - * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block + * MSI/MSIX capability + * It's forbidden to disable IGU_PF_CONF_MSI_MSIX_EN in HC block */ if (CHIP_IS_E1(bp)) { /* Since IGU_PF_CONF_MSI_MSIX_EN still always on @@ -839,7 +851,7 @@ static void bnx2x_hc_int_disable(struct bnx2x *bp) REG_WR(bp, addr, val); if (REG_RD(bp, addr) != val) - BNX2X_ERR("BUG! proper val not read from IGU!\n"); + BNX2X_ERR("BUG! Proper val not read from IGU!\n"); } static void bnx2x_igu_int_disable(struct bnx2x *bp) @@ -857,7 +869,7 @@ static void bnx2x_igu_int_disable(struct bnx2x *bp) REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val) - BNX2X_ERR("BUG! proper val not read from IGU!\n"); + BNX2X_ERR("BUG! Proper val not read from IGU!\n"); } static void bnx2x_int_disable(struct bnx2x *bp) @@ -917,7 +929,6 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int) sp_sb_data.p_func.vf_valid, sp_sb_data.state); - for_each_eth_queue(bp, i) { struct bnx2x_fastpath *fp = &bp->fp[i]; int loop; @@ -1016,7 +1027,7 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int) hc_sm_p[j].timer_value); } - /* Indecies data */ + /* Indices data */ for (j = 0; j < loop; j++) { pr_cont("INDEX[%d] flags (0x%x) timeout (0x%x)\n", j, hc_index_p[j].flags, @@ -1027,6 +1038,7 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int) #ifdef BNX2X_STOP_ON_ERROR /* event queue */ + BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod); for (i = 0; i < NUM_EQ_DESC; i++) { u32 *data = (u32 *)&bp->eq_ring[i].message.data; @@ -1111,7 +1123,7 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int) * bnx2x_pf_flr_clnup() is called during nic_load in the per function HW * initialization. */ -#define FLR_WAIT_USEC 10000 /* 10 miliseconds */ +#define FLR_WAIT_USEC 10000 /* 10 milliseconds */ #define FLR_WAIT_INTERVAL 50 /* usec */ #define FLR_POLL_CNT (FLR_WAIT_USEC/FLR_WAIT_INTERVAL) /* 200 */ @@ -1290,7 +1302,6 @@ void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count) for (i = 0; i < ARRAY_SIZE(cmd_regs); i++) bnx2x_pbf_pN_cmd_flushed(bp, &cmd_regs[i], poll_count); - /* Verify the transmission buffers are flushed P0, P1, P4 */ for (i = 0; i < ARRAY_SIZE(buf_regs); i++) bnx2x_pbf_pN_buf_flushed(bp, &buf_regs[i], poll_count); @@ -1305,11 +1316,9 @@ void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count) #define OP_GEN_AGG_VECT(index) \ (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX) - int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt) { u32 op_gen_command = 0; - u32 comp_addr = BAR_CSTRORM_INTMEM + CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func); int ret = 0; @@ -1334,7 +1343,7 @@ int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt) bnx2x_panic(); return 1; } - /* Zero completion for nxt FLR */ + /* Zero completion for next FLR */ REG_WR(bp, comp_addr, 0); return ret; @@ -1352,7 +1361,6 @@ u8 bnx2x_is_pcie_pending(struct pci_dev *dev) */ static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt) { - /* wait for CFC PF usage-counter to zero (includes all the VFs) */ if (bnx2x_flr_clnup_poll_hw_counter(bp, CFC_REG_NUM_LCIDS_INSIDE_PF, @@ -1360,7 +1368,6 @@ static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt) poll_cnt)) return 1; - /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */ if (bnx2x_flr_clnup_poll_hw_counter(bp, DORQ_REG_PF_USAGE_CNT, @@ -1390,7 +1397,7 @@ static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt) /* Wait DMAE PF usage counter to zero */ if (bnx2x_flr_clnup_poll_hw_counter(bp, dmae_reg_go_c[INIT_DMAE_C(bp)], - "DMAE dommand register timed out", + "DMAE command register timed out", poll_cnt)) return 1; @@ -1770,7 +1777,7 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe) break; case (RAMROD_CMD_ID_ETH_TERMINATE): - DP(BNX2X_MSG_SP, "got MULTI[%d] teminate ramrod\n", cid); + DP(BNX2X_MSG_SP, "got MULTI[%d] terminate ramrod\n", cid); drv_cmd = BNX2X_Q_CMD_TERMINATE; break; @@ -1859,7 +1866,6 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) mask = 0x2 << (fp->index + CNIC_SUPPORT(bp)); if (status & mask) { /* Handle Rx or Tx according to SB id */ - prefetch(fp->rx_cons_sb); for_each_cos_in_tx_queue(fp, cos) prefetch(fp->txdata_ptr[cos]->tx_cons_sb); prefetch(&fp->sb_running_index[SM_RX_ID]); @@ -1947,7 +1953,7 @@ int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource) if (lock_status & resource_bit) return 0; - msleep(5); + usleep_range(5000, 10000); } BNX2X_ERR("Timeout\n"); return -EAGAIN; @@ -1982,8 +1988,8 @@ int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource) /* Validating that the resource is currently taken */ lock_status = REG_RD(bp, hw_lock_control_reg); if (!(lock_status & resource_bit)) { - BNX2X_ERR("lock_status 0x%x resource_bit 0x%x. unlock was called but lock wasn't taken!\n", - lock_status, resource_bit); + BNX2X_ERR("lock_status 0x%x resource_bit 0x%x. Unlock was called but lock wasn't taken!\n", + lock_status, resource_bit); return -EFAULT; } @@ -1991,7 +1997,6 @@ int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource) return 0; } - int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port) { /* The GPIO should be swapped if swap register is set and active */ @@ -2347,14 +2352,13 @@ u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes) return rc; } - /* Calculates the sum of vn_min_rates. It's needed for further normalizing of the min_rates. Returns: sum of vn_min_rates. or 0 - if all the min_rates are 0. - In the later case fainess algorithm should be deactivated. + In the later case fairness algorithm should be deactivated. If not all min_rates are zero then those that are zeroes will be set to 1. */ static void bnx2x_calc_vn_min(struct bnx2x *bp, @@ -2419,7 +2423,6 @@ static void bnx2x_calc_vn_max(struct bnx2x *bp, int vn, input->vnic_max_rate[vn] = vn_max_rate; } - static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp) { if (CHIP_REV_IS_SLOW(bp)) @@ -2435,7 +2438,7 @@ void bnx2x_read_mf_cfg(struct bnx2x *bp) int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1); if (BP_NOMCP(bp)) - return; /* what should be the default bvalue in this case */ + return; /* what should be the default value in this case */ /* For 2 port configuration the absolute function number formula * is: @@ -2901,7 +2904,6 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param) return rc; } - static void storm_memset_func_cfg(struct bnx2x *bp, struct tstorm_eth_function_common_config *tcfg, u16 abs_fid) @@ -2935,7 +2937,7 @@ void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p) } /** - * bnx2x_get_tx_only_flags - Return common flags + * bnx2x_get_common_flags - Return common flags * * @bp device handle * @fp queue handle @@ -3006,7 +3008,6 @@ static unsigned long bnx2x_get_q_flags(struct bnx2x *bp, if (IS_MF_AFEX(bp)) __set_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, &flags); - return flags | bnx2x_get_common_flags(bp, fp, true); } @@ -3082,7 +3083,7 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp, * placed on the BD (not including paddings). */ rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN_START - - BNX2X_FW_RX_ALIGN_END - IP_HEADER_ALIGNMENT_PADDING; + BNX2X_FW_RX_ALIGN_END - IP_HEADER_ALIGNMENT_PADDING; rxq_init->cl_qzone_id = fp->cl_qzone_id; rxq_init->tpa_agg_sz = tpa_agg_size; @@ -3124,7 +3125,7 @@ static void bnx2x_pf_tx_q_prep(struct bnx2x *bp, txq_init->fw_sb_id = fp->fw_sb_id; /* - * set the tss leading client id for TX classfication == + * set the tss leading client id for TX classification == * leading RSS client id */ txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id); @@ -3196,7 +3197,6 @@ static void bnx2x_pf_init(struct bnx2x *bp) storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp)); } - static void bnx2x_e1h_disable(struct bnx2x *bp) { int port = BP_PORT(bp); @@ -3212,7 +3212,7 @@ static void bnx2x_e1h_enable(struct bnx2x *bp) REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1); - /* Tx queue should be only reenabled */ + /* Tx queue should be only re-enabled */ netif_tx_wake_all_queues(bp->dev); /* @@ -3540,10 +3540,8 @@ static bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type) return true; else return false; - } - /** * bnx2x_sp_post - place a single command on an SP ring * @@ -3608,14 +3606,13 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, /* * It's ok if the actual decrement is issued towards the memory * somewhere between the spin_lock and spin_unlock. Thus no - * more explict memory barrier is needed. + * more explicit memory barrier is needed. */ if (common) atomic_dec(&bp->eq_spq_left); else atomic_dec(&bp->cq_spq_left); - DP(BNX2X_MSG_SP, "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%x,%x)\n", bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping), @@ -3637,15 +3634,14 @@ static int bnx2x_acquire_alr(struct bnx2x *bp) might_sleep(); for (j = 0; j < 1000; j++) { - val = (1UL << 31); - REG_WR(bp, GRCBASE_MCP + 0x9c, val); - val = REG_RD(bp, GRCBASE_MCP + 0x9c); - if (val & (1L << 31)) + REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, MCPR_ACCESS_LOCK_LOCK); + val = REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK); + if (val & MCPR_ACCESS_LOCK_LOCK) break; - msleep(5); + usleep_range(5000, 10000); } - if (!(val & (1L << 31))) { + if (!(val & MCPR_ACCESS_LOCK_LOCK)) { BNX2X_ERR("Cannot acquire MCP access lock register\n"); rc = -EBUSY; } @@ -3656,7 +3652,7 @@ static int bnx2x_acquire_alr(struct bnx2x *bp) /* release split MCP access lock register */ static void bnx2x_release_alr(struct bnx2x *bp) { - REG_WR(bp, GRCBASE_MCP + 0x9c, 0); + REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, 0); } #define BNX2X_DEF_SB_ATT_IDX 0x0001 @@ -3678,7 +3674,7 @@ static u16 bnx2x_update_dsb_idx(struct bnx2x *bp) rc |= BNX2X_DEF_SB_IDX; } - /* Do not reorder: indecies reading should complete before handling */ + /* Do not reorder: indices reading should complete before handling */ barrier(); return rc; } @@ -3827,8 +3823,7 @@ static void bnx2x_fan_failure(struct bnx2x *bp) netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n" "Please contact OEM Support for assistance\n"); - /* - * Schedule device reset (unload) + /* Schedule device reset (unload) * This is due to some boards consuming sufficient power when driver is * up to overheat if fan fails. */ @@ -3836,7 +3831,6 @@ static void bnx2x_fan_failure(struct bnx2x *bp) set_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state); smp_mb__after_clear_bit(); schedule_delayed_work(&bp->sp_rtnl_task, 0); - } static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) @@ -4106,7 +4100,7 @@ static void bnx2x_clear_reset_global(struct bnx2x *bp) */ static bool bnx2x_reset_is_global(struct bnx2x *bp) { - u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); + u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val); return (val & BNX2X_GLOBAL_RESET_BIT) ? true : false; @@ -4157,7 +4151,7 @@ void bnx2x_set_reset_in_progress(struct bnx2x *bp) */ bool bnx2x_reset_is_done(struct bnx2x *bp, int engine) { - u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); + u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); u32 bit = engine ? BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT; @@ -4260,13 +4254,18 @@ static bool bnx2x_get_load_status(struct bnx2x *bp, int engine) return val != 0; } +static void _print_parity(struct bnx2x *bp, u32 reg) +{ + pr_cont(" [0x%08x] ", REG_RD(bp, reg)); +} + static void _print_next_block(int idx, const char *blk) { pr_cont("%s%s", idx ? ", " : "", blk); } -static int bnx2x_check_blocks_with_parity0(u32 sig, int par_num, - bool print) +static int bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig, + int par_num, bool print) { int i = 0; u32 cur_bit = 0; @@ -4275,33 +4274,54 @@ static int bnx2x_check_blocks_with_parity0(u32 sig, int par_num, if (sig & cur_bit) { switch (cur_bit) { case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "BRB"); + _print_parity(bp, + BRB1_REG_BRB1_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "PARSER"); + _print_parity(bp, PRS_REG_PRS_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "TSDM"); + _print_parity(bp, + TSDM_REG_TSDM_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "SEARCHER"); + _print_parity(bp, SRC_REG_SRC_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "TCM"); + _print_parity(bp, + TCM_REG_TCM_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "TSEMI"); + _print_parity(bp, + TSEM_REG_TSEM_PRTY_STS_0); + _print_parity(bp, + TSEM_REG_TSEM_PRTY_STS_1); + } break; case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "XPB"); + _print_parity(bp, GRCBASE_XPB + + PB_REG_PB_PRTY_STS); + } break; } @@ -4313,8 +4333,9 @@ static int bnx2x_check_blocks_with_parity0(u32 sig, int par_num, return par_num; } -static int bnx2x_check_blocks_with_parity1(u32 sig, int par_num, - bool *global, bool print) +static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig, + int par_num, bool *global, + bool print) { int i = 0; u32 cur_bit = 0; @@ -4323,37 +4344,66 @@ static int bnx2x_check_blocks_with_parity1(u32 sig, int par_num, if (sig & cur_bit) { switch (cur_bit) { case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "PBF"); + _print_parity(bp, PBF_REG_PBF_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "QM"); + _print_parity(bp, QM_REG_QM_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "TM"); + _print_parity(bp, TM_REG_TM_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "XSDM"); + _print_parity(bp, + XSDM_REG_XSDM_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "XCM"); + _print_parity(bp, XCM_REG_XCM_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "XSEMI"); + _print_parity(bp, + XSEM_REG_XSEM_PRTY_STS_0); + _print_parity(bp, + XSEM_REG_XSEM_PRTY_STS_1); + } break; case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "DOORBELLQ"); + _print_parity(bp, + DORQ_REG_DORQ_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "NIG"); + if (CHIP_IS_E1x(bp)) { + _print_parity(bp, + NIG_REG_NIG_PRTY_STS); + } else { + _print_parity(bp, + NIG_REG_NIG_PRTY_STS_0); + _print_parity(bp, + NIG_REG_NIG_PRTY_STS_1); + } + } break; case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR: if (print) @@ -4362,32 +4412,52 @@ static int bnx2x_check_blocks_with_parity1(u32 sig, int par_num, *global = true; break; case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "DEBUG"); + _print_parity(bp, DBG_REG_DBG_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "USDM"); + _print_parity(bp, + USDM_REG_USDM_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "UCM"); + _print_parity(bp, UCM_REG_UCM_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "USEMI"); + _print_parity(bp, + USEM_REG_USEM_PRTY_STS_0); + _print_parity(bp, + USEM_REG_USEM_PRTY_STS_1); + } break; case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "UPB"); + _print_parity(bp, GRCBASE_UPB + + PB_REG_PB_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "CSDM"); + _print_parity(bp, + CSDM_REG_CSDM_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "CCM"); + _print_parity(bp, CCM_REG_CCM_PRTY_STS); + } break; } @@ -4399,8 +4469,8 @@ static int bnx2x_check_blocks_with_parity1(u32 sig, int par_num, return par_num; } -static int bnx2x_check_blocks_with_parity2(u32 sig, int par_num, - bool print) +static int bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig, + int par_num, bool print) { int i = 0; u32 cur_bit = 0; @@ -4409,12 +4479,23 @@ static int bnx2x_check_blocks_with_parity2(u32 sig, int par_num, if (sig & cur_bit) { switch (cur_bit) { case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "CSEMI"); + _print_parity(bp, + CSEM_REG_CSEM_PRTY_STS_0); + _print_parity(bp, + CSEM_REG_CSEM_PRTY_STS_1); + } break; case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "PXP"); + _print_parity(bp, PXP_REG_PXP_PRTY_STS); + _print_parity(bp, + PXP2_REG_PXP2_PRTY_STS_0); + _print_parity(bp, + PXP2_REG_PXP2_PRTY_STS_1); + } break; case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR: if (print) @@ -4422,24 +4503,42 @@ static int bnx2x_check_blocks_with_parity2(u32 sig, int par_num, "PXPPCICLOCKCLIENT"); break; case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "CFC"); + _print_parity(bp, + CFC_REG_CFC_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "CDU"); + _print_parity(bp, CDU_REG_CDU_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "DMAE"); + _print_parity(bp, + DMAE_REG_DMAE_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "IGU"); + if (CHIP_IS_E1x(bp)) + _print_parity(bp, + HC_REG_HC_PRTY_STS); + else + _print_parity(bp, + IGU_REG_IGU_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "MISC"); + _print_parity(bp, + MISC_REG_MISC_PRTY_STS); + } break; } @@ -4493,8 +4592,8 @@ static int bnx2x_check_blocks_with_parity3(u32 sig, int par_num, return par_num; } -static int bnx2x_check_blocks_with_parity4(u32 sig, int par_num, - bool print) +static int bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig, + int par_num, bool print) { int i = 0; u32 cur_bit = 0; @@ -4503,12 +4602,18 @@ static int bnx2x_check_blocks_with_parity4(u32 sig, int par_num, if (sig & cur_bit) { switch (cur_bit) { case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "PGLUE_B"); + _print_parity(bp, + PGLUE_B_REG_PGLUE_B_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "ATC"); + _print_parity(bp, + ATC_REG_ATC_PRTY_STS); + } break; } @@ -4539,15 +4644,15 @@ static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print, if (print) netdev_err(bp->dev, "Parity errors detected in blocks: "); - par_num = bnx2x_check_blocks_with_parity0( + par_num = bnx2x_check_blocks_with_parity0(bp, sig[0] & HW_PRTY_ASSERT_SET_0, par_num, print); - par_num = bnx2x_check_blocks_with_parity1( + par_num = bnx2x_check_blocks_with_parity1(bp, sig[1] & HW_PRTY_ASSERT_SET_1, par_num, global, print); - par_num = bnx2x_check_blocks_with_parity2( + par_num = bnx2x_check_blocks_with_parity2(bp, sig[2] & HW_PRTY_ASSERT_SET_2, par_num, print); par_num = bnx2x_check_blocks_with_parity3( sig[3] & HW_PRTY_ASSERT_SET_3, par_num, global, print); - par_num = bnx2x_check_blocks_with_parity4( + par_num = bnx2x_check_blocks_with_parity4(bp, sig[4] & HW_PRTY_ASSERT_SET_4, par_num, print); if (print) @@ -4591,7 +4696,6 @@ bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print) return bnx2x_parity_attn(bp, global, print, attn.sig); } - static void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn) { u32 val; @@ -4643,7 +4747,6 @@ static void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn) (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR))); } - } static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted) @@ -4878,7 +4981,6 @@ static void bnx2x_handle_classification_eqe(struct bnx2x *bp, BNX2X_ERR("Failed to schedule new commands: %d\n", rc); else if (rc > 0) DP(BNX2X_MSG_SP, "Scheduled next pending commands...\n"); - } static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start); @@ -5009,7 +5111,7 @@ static void bnx2x_eq_int(struct bnx2x *bp) hw_cons = le16_to_cpu(*bp->eq_cons_sb); /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256. - * when we get the the next-page we nned to adjust so the loop + * when we get the next-page we need to adjust so the loop * condition below will be met. The next element is the size of a * regular element and hence incrementing by 1 */ @@ -5075,8 +5177,6 @@ static void bnx2x_eq_int(struct bnx2x *bp) if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL)) break; - - goto next_spqe; case EVENT_RING_OPCODE_STOP_TRAFFIC: @@ -5218,7 +5318,7 @@ static void bnx2x_sp_task(struct work_struct *work) DP(BNX2X_MSG_SP, "sp task invoked\n"); - /* make sure the atomic interupt_occurred has been written */ + /* make sure the atomic interrupt_occurred has been written */ smp_rmb(); if (atomic_read(&bp->interrupt_occurred)) { @@ -5265,7 +5365,6 @@ static void bnx2x_sp_task(struct work_struct *work) /* ack status block only if something was actually handled */ bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID, le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1); - } /* must be called after the EQ processing (since eq leads to sriov @@ -5316,7 +5415,6 @@ irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance) /* end of slow path */ - void bnx2x_drv_pulse(struct bnx2x *bp) { SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb, @@ -5360,7 +5458,7 @@ static void bnx2x_timer(unsigned long data) /* sample pf vf bulletin board for new posts from pf */ if (IS_VF(bp)) - bnx2x_sample_bulletin(bp); + bnx2x_timer_sriov(bp); mod_timer(&bp->timer, jiffies + bp->current_interval); } @@ -5382,7 +5480,6 @@ static void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len) else for (i = 0; i < len; i++) REG_WR8(bp, addr + i, fill); - } /* helper: writes FP SP data to FW - data_size in dwords */ @@ -5461,10 +5558,8 @@ static void bnx2x_zero_sp_sb(struct bnx2x *bp) bnx2x_fill(bp, BAR_CSTRORM_INTMEM + CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0, CSTORM_SP_SYNC_BLOCK_SIZE); - } - static void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm, int igu_sb_id, int igu_seg_id) { @@ -5474,7 +5569,6 @@ static void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm, hc_sm->time_to_expire = 0xFFFFFFFF; } - /* allocates state machine ids. */ static void bnx2x_map_sb_state_machines(struct hc_index_data *index_data) { @@ -5700,7 +5794,7 @@ static void bnx2x_init_eq_ring(struct bnx2x *bp) bp->eq_cons = 0; bp->eq_prod = NUM_EQ_DESC; bp->eq_cons_sb = BNX2X_EQ_INDEX; - /* we want a warning message before it gets rought... */ + /* we want a warning message before it gets wrought... */ atomic_set(&bp->eq_spq_left, min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1); } @@ -5784,7 +5878,7 @@ static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode, break; case BNX2X_RX_MODE_PROMISC: - /* According to deffinition of SI mode, iface in promisc mode + /* According to definition of SI mode, iface in promisc mode * should receive matched and unmatched (in resolution of port) * unicast packets. */ @@ -5927,7 +6021,7 @@ static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx) /* init shortcut */ fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp); - /* Setup SB indicies */ + /* Setup SB indices */ fp->rx_cons_sb = BNX2X_RX_SB_INDEX; /* Configure Queue State object */ @@ -5983,6 +6077,8 @@ static void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata) BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); } + *txdata->tx_cons_sb = cpu_to_le16(0); + SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1); txdata->tx_db.data.zero_fill1 = 0; txdata->tx_db.data.prod = 0; @@ -6001,6 +6097,7 @@ static void bnx2x_init_tx_rings_cnic(struct bnx2x *bp) for_each_tx_queue_cnic(bp, i) bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[0]); } + static void bnx2x_init_tx_rings(struct bnx2x *bp) { int i; @@ -6043,11 +6140,6 @@ void bnx2x_pre_irq_nic_init(struct bnx2x *bp) bnx2x_init_rx_rings(bp); bnx2x_init_tx_rings(bp); - if (IS_VF(bp)) { - bnx2x_memset_stats(bp); - return; - } - if (IS_PF(bp)) { /* Initialize MOD_ABS interrupts */ bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id, @@ -6058,6 +6150,8 @@ void bnx2x_pre_irq_nic_init(struct bnx2x *bp) bnx2x_init_def_sb(bp); bnx2x_update_dsb_idx(bp); bnx2x_init_sp_ring(bp); + } else { + bnx2x_memset_stats(bp); } } @@ -6236,7 +6330,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp) if (val == 0x10) break; - msleep(10); + usleep_range(10000, 20000); count--; } if (val != 0x10) { @@ -6251,7 +6345,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp) if (val == 1) break; - msleep(10); + usleep_range(10000, 20000); count--; } if (val != 0x1) { @@ -6292,7 +6386,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp) if (val == 0xb0) break; - msleep(10); + usleep_range(10000, 20000); count--; } if (val != 0xb0) { @@ -6681,7 +6775,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp) * stay set) * f. If this is VNIC 3 of a port then also init * first_timers_ilt_entry to zero and last_timers_ilt_entry - * to the last enrty in the ILT. + * to the last entry in the ILT. * * Notes: * Currently the PF error in the PGLC is non recoverable. @@ -6772,7 +6866,6 @@ static int bnx2x_init_hw_common(struct bnx2x *bp) bnx2x_init_block(bp, BLOCK_QM, PHASE_COMMON); - /* QM queues pointers table */ bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET); @@ -7013,7 +7106,6 @@ static int bnx2x_init_hw_port(struct bnx2x *bp) u32 low, high; u32 val; - DP(NETIF_MSG_HW, "starting port init port %d\n", port); REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); @@ -7078,7 +7170,6 @@ static int bnx2x_init_hw_port(struct bnx2x *bp) BRB1_REG_MAC_GUARANTIED_1 : BRB1_REG_MAC_GUARANTIED_0), 40); - bnx2x_init_block(bp, BLOCK_PRS, init_phase); if (CHIP_IS_E3B0(bp)) { if (IS_MF_AFEX(bp)) { @@ -7150,8 +7241,8 @@ static int bnx2x_init_hw_port(struct bnx2x *bp) bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase); /* init aeu_mask_attn_func_0/1: - * - SF mode: bits 3-7 are masked. only bits 0-2 are in use - * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF + * - SF mode: bits 3-7 are masked. Only bits 0-2 are in use + * - MF mode: bit 3 is masked. Bits 0-2 are in use as in SF * bits 4-7 are used for "per vn group attention" */ val = IS_MF(bp) ? 0xF7 : 0x7; /* Enable DCBX attention for all but E1 */ @@ -7275,7 +7366,6 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, bool is_pf) while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt) msleep(20); - if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) { DP(NETIF_MSG_HW, "Unable to finish IGU cleanup: idu_sb_id %d offset %d bit %d (cnt %d)\n", @@ -7295,7 +7385,6 @@ static void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func) bnx2x_ilt_wr(bp, i, 0); } - static void bnx2x_init_searcher(struct bnx2x *bp) { int port = BP_PORT(bp); @@ -7331,7 +7420,6 @@ static int bnx2x_reset_nic_mode(struct bnx2x *bp) int rc, i, port = BP_PORT(bp); int vlan_en = 0, mac_en[NUM_MACS]; - /* Close input from network */ if (bp->mf_mode == SINGLE_FUNCTION) { bnx2x_set_rx_filter(&bp->link_params, 0); @@ -7406,7 +7494,7 @@ int bnx2x_init_hw_func_cnic(struct bnx2x *bp) bnx2x_ilt_init_op_cnic(bp, INITOP_SET); if (CONFIGURE_NIC_MODE(bp)) { - /* Configrue searcher as part of function hw init */ + /* Configure searcher as part of function hw init */ bnx2x_init_searcher(bp); /* Reset NIC mode */ @@ -7479,8 +7567,7 @@ static int bnx2x_init_hw_func(struct bnx2x *bp) } else { /* Set NIC mode */ REG_WR(bp, PRS_REG_NIC_MODE, 1); - DP(NETIF_MSG_IFUP, "NIC MODE configrued\n"); - + DP(NETIF_MSG_IFUP, "NIC MODE configured\n"); } if (!CHIP_IS_E1x(bp)) { @@ -7677,7 +7764,7 @@ static int bnx2x_init_hw_func(struct bnx2x *bp) } bnx2x_igu_clear_sb(bp, bp->igu_dsb_id); - /* !!! these should become driver const once + /* !!! These should become driver const once rf-tool supports split-68 const */ REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); @@ -7734,7 +7821,6 @@ static int bnx2x_init_hw_func(struct bnx2x *bp) return 0; } - void bnx2x_free_mem_cnic(struct bnx2x *bp) { bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_FREE); @@ -7779,7 +7865,6 @@ void bnx2x_free_mem(struct bnx2x *bp) bnx2x_iov_free_mem(bp); } - int bnx2x_alloc_mem_cnic(struct bnx2x *bp) { if (!CHIP_IS_E1x(bp)) @@ -7793,7 +7878,7 @@ int bnx2x_alloc_mem_cnic(struct bnx2x *bp) host_hc_status_block_e1x)); if (CONFIGURE_NIC_MODE(bp) && !bp->t2) - /* allocate searcher T2 table, as it wan't allocated before */ + /* allocate searcher T2 table, as it wasn't allocated before */ BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ); /* write address to which L5 should insert its values */ @@ -8068,7 +8153,6 @@ void bnx2x_ilt_set_info(struct bnx2x *bp) ilt_client->page_size, ilt_client->flags, ilog2(ilt_client->page_size >> 12)); - } if (CNIC_SUPPORT(bp)) { @@ -8124,7 +8208,6 @@ void bnx2x_ilt_set_info(struct bnx2x *bp) static void bnx2x_pf_q_prep_init(struct bnx2x *bp, struct bnx2x_fastpath *fp, struct bnx2x_queue_init_params *init_params) { - u8 cos; int cxt_index, cxt_offset; @@ -8133,7 +8216,7 @@ static void bnx2x_pf_q_prep_init(struct bnx2x *bp, __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags); __set_bit(BNX2X_Q_FLG_HC, &init_params->tx.flags); - /* If HC is supporterd, enable host coalescing in the transition + /* If HC is supported, enable host coalescing in the transition * to INIT state. */ __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->rx.flags); @@ -8205,7 +8288,6 @@ static int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp, return bnx2x_queue_state_change(bp, q_params); } - /** * bnx2x_setup_queue - setup queue * @@ -8254,7 +8336,6 @@ int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp, DP(NETIF_MSG_IFUP, "init complete\n"); - /* Now move the Queue to the SETUP state... */ memset(setup_params, 0, sizeof(*setup_params)); @@ -8315,7 +8396,6 @@ static int bnx2x_stop_queue(struct bnx2x *bp, int index) /* We want to wait for completion in this context */ __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); - /* close tx-only connections */ for (tx_index = FIRST_TX_ONLY_COS_INDEX; tx_index < fp->max_cos; @@ -8369,7 +8449,6 @@ static int bnx2x_stop_queue(struct bnx2x *bp, int index) return bnx2x_queue_state_change(bp, &q_params); } - static void bnx2x_reset_func(struct bnx2x *bp) { int port = BP_PORT(bp); @@ -8422,7 +8501,7 @@ static void bnx2x_reset_func(struct bnx2x *bp) * scan to complete */ for (i = 0; i < 200; i++) { - msleep(10); + usleep_range(10000, 20000); if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4)) break; } @@ -8623,14 +8702,14 @@ static int bnx2x_func_wait_started(struct bnx2x *bp) /* * (assumption: No Attention from MCP at this stage) - * PMF probably in the middle of TXdisable/enable transaction + * PMF probably in the middle of TX disable/enable transaction * 1. Sync IRS for default SB - * 2. Sync SP queue - this guarantes us that attention handling started - * 3. Wait, that TXdisable/enable transaction completes + * 2. Sync SP queue - this guarantees us that attention handling started + * 3. Wait, that TX disable/enable transaction completes * - * 1+2 guranty that if DCBx attention was scheduled it already changed - * pending bit of transaction from STARTED-->TX_STOPPED, if we alredy - * received complettion for the transaction the state is TX_STOPPED. + * 1+2 guarantee that if DCBx attention was scheduled it already changed + * pending bit of transaction from STARTED-->TX_STOPPED, if we already + * received completion for the transaction the state is TX_STOPPED. * State will return to STARTED after completion of TX_STOPPED-->STARTED * transaction. */ @@ -8660,7 +8739,7 @@ static int bnx2x_func_wait_started(struct bnx2x *bp) struct bnx2x_func_state_params func_params = {NULL}; DP(NETIF_MSG_IFDOWN, - "Hmmm... unexpected function state! Forcing STARTED-->TX_ST0PPED-->STARTED\n"); + "Hmmm... Unexpected function state! Forcing STARTED-->TX_ST0PPED-->STARTED\n"); func_params.f_obj = &bp->func_obj; __set_bit(RAMROD_DRV_CLR_ONLY, @@ -8740,7 +8819,6 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link) bnx2x_iov_chip_cleanup(bp); - /* * Send the UNLOAD_REQUEST to the MCP. This will return if * this function should perform FUNC, PORT or COMMON HW @@ -8750,7 +8828,7 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link) /* * (assumption: No Attention from MCP at this stage) - * PMF probably in the middle of TXdisable/enable transaction + * PMF probably in the middle of TX disable/enable transaction */ rc = bnx2x_func_wait_started(bp); if (rc) { @@ -8813,7 +8891,6 @@ unload_error: if (rc) BNX2X_ERR("HW_RESET failed\n"); - /* Report UNLOAD_DONE to MCP */ bnx2x_send_unload_done(bp, keep_link); } @@ -9179,7 +9256,6 @@ static int bnx2x_process_kill(struct bnx2x *bp, bool global) if (!CHIP_IS_E1x(bp) && bnx2x_er_poll_igu_vq(bp)) return -EAGAIN; - /* TBD: Indicate that "process kill" is in progress to MCP */ /* Clear "unprepared" bit */ @@ -9367,7 +9443,7 @@ static void bnx2x_parity_recover(struct bnx2x *bp) * the first leader that performs a * leader_reset() reset the global blocks in * order to clear global attentions. Otherwise - * the the gates will remain closed for that + * the gates will remain closed for that * engine. */ if (load_status || @@ -9480,14 +9556,12 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work) return; } - /* if stop on error is defined no recovery flows should be executed */ + if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) { #ifdef BNX2X_STOP_ON_ERROR - BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n" - "you will need to reboot when done\n"); - goto sp_rtnl_not_reset; + BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n" + "you will need to reboot when done\n"); + goto sp_rtnl_not_reset; #endif - - if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) { /* * Clear all pending SP commands as we are going to reset the * function anyway. @@ -9502,6 +9576,12 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work) } if (test_and_clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state)) { +#ifdef BNX2X_STOP_ON_ERROR + BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n" + "you will need to reboot when done\n"); + goto sp_rtnl_not_reset; +#endif + /* * Clear all pending SP commands as we are going to reset the * function anyway. @@ -9540,6 +9620,13 @@ sp_rtnl_not_reset: "sending set mcast vf pf channel message from rtnl sp-task\n"); bnx2x_vfpf_set_mcast(bp->dev); } + if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN, + &bp->sp_rtnl_state)){ + if (!test_bit(__LINK_STATE_NOCARRIER, &bp->dev->state)) { + bnx2x_tx_disable(bp); + BNX2X_ERR("PF indicated channel is not servicable anymore. This means this VF device is no longer operational\n"); + } + } if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_STORM_RX_MODE, &bp->sp_rtnl_state)) { @@ -9647,7 +9734,6 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp, wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE; REG_WR(bp, vals->bmac_addr, wb_data[0]); REG_WR(bp, vals->bmac_addr + 0x4, wb_data[1]); - } BNX2X_DEV_INFO("Disable emac Rx\n"); vals->emac_addr = NIG_REG_NIG_EMAC0_EN + BP_PORT(bp)*4; @@ -9681,7 +9767,6 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp, if (mac_stopped) msleep(20); - } #define BNX2X_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4)) @@ -9780,6 +9865,21 @@ static bool bnx2x_prev_is_path_marked(struct bnx2x *bp) return rc; } +bool bnx2x_port_after_undi(struct bnx2x *bp) +{ + struct bnx2x_prev_path_list *entry; + bool val; + + down(&bnx2x_prev_sem); + + entry = bnx2x_prev_path_get_entry(bp); + val = !!(entry && (entry->undi & (1 << BP_PORT(bp)))); + + up(&bnx2x_prev_sem); + + return val; +} + static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi) { struct bnx2x_prev_path_list *tmp_list; @@ -9839,7 +9939,6 @@ static int bnx2x_do_flr(struct bnx2x *bp) u16 status; struct pci_dev *dev = bp->pdev; - if (CHIP_IS_E1x(bp)) { BNX2X_DEV_INFO("FLR not supported in E1/E1H\n"); return -EINVAL; @@ -9986,7 +10085,6 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp) if (!timer_count) BNX2X_ERR("Failed to empty BRB, hope for the best\n"); - } /* No packets are in the pipeline, path is ready for reset */ @@ -10036,7 +10134,6 @@ static int bnx2x_prev_unload(struct bnx2x *bp) { int time_counter = 10; u32 rc, fw, hw_lock_reg, hw_lock_val; - struct bnx2x_prev_path_list *prev_list; BNX2X_DEV_INFO("Entering Previous Unload Flow\n"); /* clear hw from errors which may have resulted from an interrupted @@ -10049,7 +10146,7 @@ static int bnx2x_prev_unload(struct bnx2x *bp) (MISC_REG_DRIVER_CONTROL_1 + BP_FUNC(bp) * 8) : (MISC_REG_DRIVER_CONTROL_7 + (BP_FUNC(bp) - 6) * 8); - hw_lock_val = (REG_RD(bp, hw_lock_reg)); + hw_lock_val = REG_RD(bp, hw_lock_reg); if (hw_lock_val) { if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) { BNX2X_DEV_INFO("Release Previously held NVRAM lock\n"); @@ -10064,7 +10161,7 @@ static int bnx2x_prev_unload(struct bnx2x *bp) if (MCPR_ACCESS_LOCK_LOCK & REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK)) { BNX2X_DEV_INFO("Release previously held alr\n"); - REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, 0); + bnx2x_release_alr(bp); } do { @@ -10093,7 +10190,7 @@ static int bnx2x_prev_unload(struct bnx2x *bp) break; } - /* non-common reply from MCP night require looping */ + /* non-common reply from MCP might require looping */ rc = bnx2x_prev_unload_uncommon(bp); if (rc != BNX2X_PREV_WAIT_NEEDED) break; @@ -10107,8 +10204,7 @@ static int bnx2x_prev_unload(struct bnx2x *bp) } /* Mark function if its port was used to boot from SAN */ - prev_list = bnx2x_prev_path_get_entry(bp); - if (prev_list && (prev_list->undi & (1 << BP_PORT(bp)))) + if (bnx2x_port_after_undi(bp)) bp->link_params.feature_config_flags |= FEATURE_CONFIG_BOOT_FROM_SAN; @@ -10192,8 +10288,6 @@ static void bnx2x_get_common_hwinfo(struct bnx2x *bp) bnx2x_init_shmem(bp); - - bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ? MISC_REG_GENERIC_CR_1 : MISC_REG_GENERIC_CR_0)); @@ -10455,6 +10549,9 @@ static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg) PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full; + if (!(bp->link_params.speed_cap_mask[idx] & + PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) + bp->port.supported[idx] &= ~SUPPORTED_20000baseKR2_Full; } BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0], @@ -10765,7 +10862,6 @@ void bnx2x_get_iscsi_info(struct bnx2x *bp) */ if (!bp->cnic_eth_dev.max_iscsi_conn) bp->flags |= no_flags; - } static void bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func) @@ -10782,12 +10878,56 @@ static void bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func) bp->cnic_eth_dev.fcoe_wwn_node_name_lo = MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_lower); } + +static int bnx2x_shared_fcoe_funcs(struct bnx2x *bp) +{ + u8 count = 0; + + if (IS_MF(bp)) { + u8 fid; + + /* iterate over absolute function ids for this path: */ + for (fid = BP_PATH(bp); fid < E2_FUNC_MAX * 2; fid += 2) { + if (IS_MF_SD(bp)) { + u32 cfg = MF_CFG_RD(bp, + func_mf_config[fid].config); + + if (!(cfg & FUNC_MF_CFG_FUNC_HIDE) && + ((cfg & FUNC_MF_CFG_PROTOCOL_MASK) == + FUNC_MF_CFG_PROTOCOL_FCOE)) + count++; + } else { + u32 cfg = MF_CFG_RD(bp, + func_ext_config[fid]. + func_cfg); + + if ((cfg & MACP_FUNC_CFG_FLAGS_ENABLED) && + (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD)) + count++; + } + } + } else { /* SF */ + int port, port_cnt = CHIP_MODE_IS_4_PORT(bp) ? 2 : 1; + + for (port = 0; port < port_cnt; port++) { + u32 lic = SHMEM_RD(bp, + drv_lic_key[port].max_fcoe_conn) ^ + FW_ENCODE_32BIT_PATTERN; + if (lic) + count++; + } + } + + return count; +} + static void bnx2x_get_fcoe_info(struct bnx2x *bp) { int port = BP_PORT(bp); int func = BP_ABS_FUNC(bp); u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp, drv_lic_key[port].max_fcoe_conn); + u8 num_fcoe_func = bnx2x_shared_fcoe_funcs(bp); if (!CNIC_SUPPORT(bp)) { bp->flags |= NO_FCOE_FLAG; @@ -10801,9 +10941,10 @@ static void bnx2x_get_fcoe_info(struct bnx2x *bp) /* Calculate the number of maximum allowed FCoE tasks */ bp->cnic_eth_dev.max_fcoe_exchanges = MAX_NUM_FCOE_TASKS_PER_ENGINE; - if (IS_MF(bp) || CHIP_MODE_IS_4_PORT(bp)) - bp->cnic_eth_dev.max_fcoe_exchanges /= - MAX_FCOE_FUNCS_PER_ENGINE; + + /* check if FCoE resources must be shared between different functions */ + if (num_fcoe_func) + bp->cnic_eth_dev.max_fcoe_exchanges /= num_fcoe_func; /* Read the WWN: */ if (!IS_MF(bp)) { @@ -11031,7 +11172,7 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp) } else { bp->common.int_block = INT_BLOCK_IGU; - /* do not allow device reset during IGU info preocessing */ + /* do not allow device reset during IGU info processing */ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET); val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION); @@ -11110,7 +11251,7 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp) E1H_FUNC_MAX * sizeof(struct drv_func_mb); /* * get mf configuration: - * 1. existence of MF configuration + * 1. Existence of MF configuration * 2. MAC address must be legal (check only upper bytes) * for Switch-Independent mode; * OVLAN must be legal for Switch-Dependent mode @@ -11384,7 +11525,6 @@ static int bnx2x_init_bp(struct bnx2x *bp) mutex_init(&bp->fw_mb_mutex); spin_lock_init(&bp->stats_lock); - INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task); INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task); @@ -11393,7 +11533,7 @@ static int bnx2x_init_bp(struct bnx2x *bp) if (rc) return rc; } else { - random_ether_addr(bp->dev->dev_addr); + eth_zero_addr(bp->dev->dev_addr); } bnx2x_set_modes_bitmap(bp); @@ -11417,7 +11557,6 @@ static int bnx2x_init_bp(struct bnx2x *bp) bnx2x_prev_unload(bp); } - if (CHIP_REV_IS_FPGA(bp)) dev_err(&bp->pdev->dev, "FPGA detected\n"); @@ -11489,7 +11628,7 @@ static int bnx2x_init_bp(struct bnx2x *bp) /* We need at least one default status block for slow-path events, * second status block for the L2 queue, and a third status block for - * CNIC if supproted. + * CNIC if supported. */ if (CNIC_SUPPORT(bp)) bp->min_msix_vec_cnt = 3; @@ -11497,10 +11636,11 @@ static int bnx2x_init_bp(struct bnx2x *bp) bp->min_msix_vec_cnt = 2; BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt); + bp->dump_preset_idx = 1; + return rc; } - /**************************************************************************** * General service functions ****************************************************************************/ @@ -11585,9 +11725,6 @@ static int bnx2x_close(struct net_device *dev) /* Unload the driver, release IRQs */ bnx2x_nic_unload(bp, UNLOAD_CLOSE, false); - /* Power off */ - bnx2x_set_power_state(bp, PCI_D3hot); - return 0; } @@ -11852,6 +11989,10 @@ static int bnx2x_validate_addr(struct net_device *dev) { struct bnx2x *bp = netdev_priv(dev); + /* query the bulletin board for mac address configured by the PF */ + if (IS_VF(bp)) + bnx2x_sample_bulletin(bp); + if (!bnx2x_is_valid_ether_addr(bp, dev->dev_addr)) { BNX2X_ERR("Non-valid Ethernet address\n"); return -EADDRNOTAVAIL; @@ -11878,12 +12019,16 @@ static const struct net_device_ops bnx2x_netdev_ops = { .ndo_setup_tc = bnx2x_setup_tc, #ifdef CONFIG_BNX2X_SRIOV .ndo_set_vf_mac = bnx2x_set_vf_mac, - .ndo_set_vf_vlan = bnx2x_set_vf_vlan, + .ndo_set_vf_vlan = bnx2x_set_vf_vlan, .ndo_get_vf_config = bnx2x_get_vf_config, #endif #ifdef NETDEV_FCOE_WWNN .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn, #endif + +#ifdef CONFIG_NET_LL_RX_POLL + .ndo_busy_poll = bnx2x_low_latency_recv, +#endif }; static int bnx2x_set_coherency_mask(struct bnx2x *bp) @@ -11959,7 +12104,7 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev, } if (IS_PF(bp)) { - bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); + bp->pm_cap = pdev->pm_cap; if (bp->pm_cap == 0) { dev_err(&bp->pdev->dev, "Cannot find power management capability, aborting\n"); @@ -12008,8 +12153,6 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev, } BNX2X_DEV_INFO("me reg PF num: %d\n", bp->pf_num); - bnx2x_set_power_state(bp, PCI_D0); - /* clean indirect addresses */ pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, PCICFG_VENDOR_ID_OFFSET); @@ -12094,15 +12237,26 @@ err_out: return rc; } -static void bnx2x_get_pcie_width_speed(struct bnx2x *bp, int *width, int *speed) +static void bnx2x_get_pcie_width_speed(struct bnx2x *bp, int *width, + enum bnx2x_pci_bus_speed *speed) { - u32 val = 0; + u32 link_speed, val = 0; pci_read_config_dword(bp->pdev, PCICFG_LINK_CONTROL, &val); *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT; - /* return value of 1=2.5GHz 2=5GHz */ - *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT; + link_speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT; + + switch (link_speed) { + case 3: + *speed = BNX2X_PCI_LINK_SPEED_8000; + break; + case 2: + *speed = BNX2X_PCI_LINK_SPEED_5000; + break; + default: + *speed = BNX2X_PCI_LINK_SPEED_2500; + } } static int bnx2x_check_firmware(struct bnx2x *bp) @@ -12327,7 +12481,6 @@ static void bnx2x_release_firmware(struct bnx2x *bp) bp->firmware = NULL; } - static struct bnx2x_func_sp_drv_ops bnx2x_func_sp_drv = { .init_hw_cmn_chip = bnx2x_init_hw_common_chip, .init_hw_cmn = bnx2x_init_hw_common, @@ -12465,7 +12618,8 @@ static int bnx2x_init_one(struct pci_dev *pdev, { struct net_device *dev = NULL; struct bnx2x *bp; - int pcie_width, pcie_speed; + int pcie_width; + enum bnx2x_pci_bus_speed pcie_speed; int rc, max_non_def_sbs; int rx_count, tx_count, rss_count, doorbell_size; int max_cos_est; @@ -12605,7 +12759,6 @@ static int bnx2x_init_one(struct pci_dev *pdev, } BNX2X_DEV_INFO("device name after netdev register %s\n", dev->name); - if (!NO_FCOE(bp)) { /* Add storage MAC address */ rtnl_lock(); @@ -12617,15 +12770,15 @@ static int bnx2x_init_one(struct pci_dev *pdev, BNX2X_DEV_INFO("got pcie width %d and speed %d\n", pcie_width, pcie_speed); - BNX2X_DEV_INFO( - "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n", - board_info[ent->driver_data].name, - (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4), - pcie_width, - ((!CHIP_IS_E2(bp) && pcie_speed == 2) || - (CHIP_IS_E2(bp) && pcie_speed == 1)) ? - "5GHz (Gen2)" : "2.5GHz", - dev->base_addr, bp->pdev->irq, dev->dev_addr); + BNX2X_DEV_INFO("%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n", + board_info[ent->driver_data].name, + (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4), + pcie_width, + pcie_speed == BNX2X_PCI_LINK_SPEED_2500 ? "2.5GHz" : + pcie_speed == BNX2X_PCI_LINK_SPEED_5000 ? "5.0GHz" : + pcie_speed == BNX2X_PCI_LINK_SPEED_8000 ? "8.0GHz" : + "Unknown", + dev->base_addr, bp->pdev->irq, dev->dev_addr); return 0; @@ -12647,17 +12800,11 @@ init_one_exit: return rc; } -static void bnx2x_remove_one(struct pci_dev *pdev) +static void __bnx2x_remove(struct pci_dev *pdev, + struct net_device *dev, + struct bnx2x *bp, + bool remove_netdev) { - struct net_device *dev = pci_get_drvdata(pdev); - struct bnx2x *bp; - - if (!dev) { - dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n"); - return; - } - bp = netdev_priv(dev); - /* Delete storage MAC address */ if (!NO_FCOE(bp)) { rtnl_lock(); @@ -12670,7 +12817,17 @@ static void bnx2x_remove_one(struct pci_dev *pdev) bnx2x_dcbnl_update_applist(bp, true); #endif - unregister_netdev(dev); + /* Close the interface - either directly or implicitly */ + if (remove_netdev) { + unregister_netdev(dev); + } else { + rtnl_lock(); + if (netif_running(dev)) + bnx2x_close(dev); + rtnl_unlock(); + } + + bnx2x_iov_remove_one(bp); /* Power on: we can't let PCI layer write to us while we are in D3 */ if (IS_PF(bp)) @@ -12686,12 +12843,16 @@ static void bnx2x_remove_one(struct pci_dev *pdev) /* Make sure RESET task is not scheduled before continuing */ cancel_delayed_work_sync(&bp->sp_rtnl_task); - bnx2x_iov_remove_one(bp); - /* send message via vfpf channel to release the resources of this vf */ if (IS_VF(bp)) bnx2x_vfpf_release(bp); + /* Assumes no further PCIe PM changes will occur */ + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, bp->wol); + pci_set_power_state(pdev, PCI_D3hot); + } + if (bp->regview) iounmap(bp->regview); @@ -12706,7 +12867,8 @@ static void bnx2x_remove_one(struct pci_dev *pdev) } bnx2x_free_mem_bp(bp); - free_netdev(dev); + if (remove_netdev) + free_netdev(dev); if (atomic_read(&pdev->enable_cnt) == 1) pci_release_regions(pdev); @@ -12715,6 +12877,20 @@ static void bnx2x_remove_one(struct pci_dev *pdev) pci_set_drvdata(pdev, NULL); } +static void bnx2x_remove_one(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct bnx2x *bp; + + if (!dev) { + dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n"); + return; + } + bp = netdev_priv(dev); + + __bnx2x_remove(pdev, dev, bp, true); +} + static int bnx2x_eeh_nic_unload(struct bnx2x *bp) { bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; @@ -12747,19 +12923,6 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp) return 0; } -static void bnx2x_eeh_recover(struct bnx2x *bp) -{ - u32 val; - - mutex_init(&bp->port.phy_mutex); - - - val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]); - if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) - != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) - BNX2X_ERR("BAD MCP validity signature\n"); -} - /** * bnx2x_io_error_detected - called when PCI error is detected * @pdev: Pointer to PCI device @@ -12828,6 +12991,10 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev) if (netif_running(dev)) { BNX2X_ERR("IO slot reset --> driver unload\n"); + + /* MCP should have been reset; Need to wait for validity */ + bnx2x_init_shmem(bp); + if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) { u32 v; @@ -12849,7 +13016,7 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev) bnx2x_prev_unload(bp); - /* We should have resetted the engine, so It's fair to + /* We should have reseted the engine, so It's fair to * assume the FW will no longer write to the bnx2x driver. */ bnx2x_squeeze_objects(bp); @@ -12886,8 +13053,6 @@ static void bnx2x_io_resume(struct pci_dev *pdev) rtnl_lock(); - bnx2x_eeh_recover(bp); - bp->fw_seq = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & DRV_MSG_SEQ_NUMBER_MASK; @@ -12905,6 +13070,29 @@ static const struct pci_error_handlers bnx2x_err_handler = { .resume = bnx2x_io_resume, }; +static void bnx2x_shutdown(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct bnx2x *bp; + + if (!dev) + return; + + bp = netdev_priv(dev); + if (!bp) + return; + + rtnl_lock(); + netif_device_detach(dev); + rtnl_unlock(); + + /* Don't remove the netdevice, as there are scenarios which will cause + * the kernel to hang, e.g., when trying to remove bnx2i while the + * rootfs is mounted from SAN. + */ + __bnx2x_remove(pdev, dev, bp, false); +} + static struct pci_driver bnx2x_pci_driver = { .name = DRV_MODULE_NAME, .id_table = bnx2x_pci_tbl, @@ -12916,6 +13104,7 @@ static struct pci_driver bnx2x_pci_driver = { #ifdef CONFIG_BNX2X_SRIOV .sriov_configure = bnx2x_sriov_configure, #endif + .shutdown = bnx2x_shutdown, }; static int __init bnx2x_init(void) @@ -12941,11 +13130,12 @@ static int __init bnx2x_init(void) static void __exit bnx2x_cleanup(void) { struct list_head *pos, *q; + pci_unregister_driver(&bnx2x_pci_driver); destroy_workqueue(bnx2x_wq); - /* Free globablly allocated resources */ + /* Free globally allocated resources */ list_for_each_safe(pos, q, &bnx2x_prev_list) { struct bnx2x_prev_path_list *tmp = list_entry(pos, struct bnx2x_prev_path_list, list); @@ -12968,7 +13158,7 @@ module_exit(bnx2x_cleanup); * @bp: driver handle * @set: set or clear the CAM entry * - * This function will wait until the ramdord completion returns. + * This function will wait until the ramrod completion returns. * Return 0 if success, -ENODEV if ramrod doesn't return. */ static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp) @@ -12996,7 +13186,6 @@ static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count) BUG_ON(bp->cnic_spq_pending < count); bp->cnic_spq_pending -= count; - for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) { u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type) & SPE_HDR_CONN_TYPE) >> @@ -13169,7 +13358,6 @@ static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err) bnx2x_cnic_sp_post(bp, 0); } - /* Called with netif_addr_lock_bh() taken. * Sets an rx_mode config for an iSCSI ETH client. * Doesn't block. @@ -13210,7 +13398,6 @@ static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start) } } - static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl) { struct bnx2x *bp = netdev_priv(dev); @@ -13398,7 +13585,6 @@ void bnx2x_setup_cnic_info(struct bnx2x *bp) { struct cnic_eth_dev *cp = &bp->cnic_eth_dev; - cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + bnx2x_cid_ilt_lines(bp); cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS; @@ -13434,7 +13620,6 @@ static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops, BNX2X_ERR("CNIC-related load failed\n"); return rc; } - } bp->cnic_enabled = true; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h index d22bc40091ec..8e627b886d7b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h @@ -35,6 +35,8 @@ #define ATC_REG_ATC_INT_STS_CLR 0x1101c0 /* [RW 5] Parity mask register #0 read/write */ #define ATC_REG_ATC_PRTY_MASK 0x1101d8 +/* [R 5] Parity register #0 read */ +#define ATC_REG_ATC_PRTY_STS 0x1101cc /* [RC 5] Parity register #0 read clear */ #define ATC_REG_ATC_PRTY_STS_CLR 0x1101d0 /* [RW 19] Interrupt mask register #0 read/write */ @@ -2750,6 +2752,8 @@ #define PBF_REG_PBF_INT_STS 0x1401c8 /* [RW 20] Parity mask register #0 read/write */ #define PBF_REG_PBF_PRTY_MASK 0x1401e4 +/* [R 28] Parity register #0 read */ +#define PBF_REG_PBF_PRTY_STS 0x1401d8 /* [RC 20] Parity register #0 read clear */ #define PBF_REG_PBF_PRTY_STS_CLR 0x1401dc /* [RW 16] The Ethernet type value for L2 tag 0 */ @@ -4517,6 +4521,8 @@ #define TM_REG_TM_INT_STS 0x1640f0 /* [RW 7] Parity mask register #0 read/write */ #define TM_REG_TM_PRTY_MASK 0x16410c +/* [R 7] Parity register #0 read */ +#define TM_REG_TM_PRTY_STS 0x164100 /* [RC 7] Parity register #0 read clear */ #define TM_REG_TM_PRTY_STS_CLR 0x164104 /* [RW 8] The event id for aggregated interrupt 0 */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index 32a9609cc98b..8f03c984550f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -35,9 +35,9 @@ /** * bnx2x_exe_queue_init - init the Exe Queue object * - * @o: poiter to the object + * @o: pointer to the object * @exe_len: length - * @owner: poiter to the owner + * @owner: pointer to the owner * @validate: validate function pointer * @optimize: optimize function pointer * @exec: execute function pointer @@ -142,7 +142,6 @@ free_and_exit: spin_unlock_bh(&o->lock); return rc; - } static inline void __bnx2x_exe_queue_reset_pending( @@ -163,13 +162,11 @@ static inline void __bnx2x_exe_queue_reset_pending( static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp, struct bnx2x_exe_queue_obj *o) { - spin_lock_bh(&o->lock); __bnx2x_exe_queue_reset_pending(bp, o); spin_unlock_bh(&o->lock); - } /** @@ -179,7 +176,7 @@ static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp, * @o: queue * @ramrod_flags: flags * - * (Atomicy is ensured using the exe_queue->lock). + * (Atomicity is ensured using the exe_queue->lock). */ static inline int bnx2x_exe_queue_step(struct bnx2x *bp, struct bnx2x_exe_queue_obj *o, @@ -192,8 +189,7 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp, spin_lock_bh(&o->lock); - /* - * Next step should not be performed until the current is finished, + /* Next step should not be performed until the current is finished, * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to * properly clear object internals without sending any command to the FW * which also implies there won't be any completion to clear the @@ -209,8 +205,7 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp, } } - /* - * Run through the pending commands list and create a next + /* Run through the pending commands list and create a next * execution chunk. */ while (!list_empty(&o->exe_queue)) { @@ -220,8 +215,7 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp, if (cur_len + elem->cmd_len <= o->exe_chunk_len) { cur_len += elem->cmd_len; - /* - * Prevent from both lists being empty when moving an + /* Prevent from both lists being empty when moving an * element. This will allow the call of * bnx2x_exe_queue_empty() without locking. */ @@ -241,14 +235,12 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp, rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags); if (rc < 0) - /* - * In case of an error return the commands back to the queue - * and reset the pending_comp. + /* In case of an error return the commands back to the queue + * and reset the pending_comp. */ list_splice_init(&o->pending_comp, &o->exe_queue); else if (!rc) - /* - * If zero is returned, means there are no outstanding pending + /* If zero is returned, means there are no outstanding pending * completions and we may dismiss the pending list. */ __bnx2x_exe_queue_reset_pending(bp, o); @@ -308,7 +300,6 @@ static inline int bnx2x_state_wait(struct bnx2x *bp, int state, /* can take a while if any port is running */ int cnt = 5000; - if (CHIP_REV_IS_EMUL(bp)) cnt *= 20; @@ -456,7 +447,6 @@ static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o, DP(BNX2X_MSG_SP, "copied element number %d to address %p element was:\n", counter, next); next += stride + size; - } } return counter * ETH_ALEN; @@ -518,7 +508,6 @@ static int bnx2x_check_vlan_mac_add(struct bnx2x *bp, return 0; } - /* check_del() callbacks */ static struct bnx2x_vlan_mac_registry_elem * bnx2x_check_mac_del(struct bnx2x *bp, @@ -609,7 +598,6 @@ static bool bnx2x_check_move_always_err( return false; } - static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o) { struct bnx2x_raw_obj *raw = &o->raw; @@ -626,7 +614,6 @@ static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o) return rx_tx_flag; } - void bnx2x_set_mac_in_nig(struct bnx2x *bp, bool add, unsigned char *dev_addr, int index) { @@ -693,7 +680,7 @@ static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp, * * @cid: connection id * @type: BNX2X_FILTER_XXX_PENDING - * @hdr: poiter to header to setup + * @hdr: pointer to header to setup * @rule_cnt: * * currently we always configure one rule and echo field to contain a CID and an @@ -707,7 +694,6 @@ static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid, int type, hdr->rule_cnt = (u8)rule_cnt; } - /* hw_config() callbacks */ static void bnx2x_set_one_mac_e2(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o, @@ -723,8 +709,7 @@ static void bnx2x_set_one_mac_e2(struct bnx2x *bp, unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags; u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac; - /* - * Set LLH CAM entry: currently only iSCSI and ETH macs are + /* Set LLH CAM entry: currently only iSCSI and ETH macs are * relevant. In addition, current implementation is tuned for a * single ETH MAC. * @@ -879,8 +864,7 @@ static void bnx2x_set_one_mac_e1x(struct bnx2x *bp, struct bnx2x_raw_obj *raw = &o->raw; struct mac_configuration_cmd *config = (struct mac_configuration_cmd *)(raw->rdata); - /* - * 57710 and 57711 do not support MOVE command, + /* 57710 and 57711 do not support MOVE command, * so it's either ADD or DEL */ bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ? @@ -960,7 +944,6 @@ static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp, u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan; u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac; - /* Reset the ramrod data buffer for the first rule */ if (rule_idx == 0) memset(data, 0, sizeof(*data)); @@ -969,7 +952,7 @@ static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp, bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR, &rule_entry->pair.header); - /* Set VLAN and MAC themselvs */ + /* Set VLAN and MAC themselves */ rule_entry->pair.vlan = cpu_to_le16(vlan); bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb, &rule_entry->pair.mac_mid, @@ -1021,8 +1004,7 @@ static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp, struct bnx2x_raw_obj *raw = &o->raw; struct mac_configuration_cmd *config = (struct mac_configuration_cmd *)(raw->rdata); - /* - * 57710 and 57711 do not support MOVE command, + /* 57710 and 57711 do not support MOVE command, * so it's either ADD or DEL */ bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ? @@ -1046,7 +1028,7 @@ static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp, * * @bp: device handle * @p: command parameters - * @ppos: pointer to the cooky + * @ppos: pointer to the cookie * * reconfigure next MAC/VLAN/VLAN-MAC element from the * previously configured elements list. @@ -1054,7 +1036,7 @@ static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp, * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken * into an account * - * pointer to the cooky - that should be given back in the next call to make + * pointer to the cookie - that should be given back in the next call to make * function handle the next element. If *ppos is set to NULL it will restart the * iterator. If returned *ppos == NULL this means that the last element has been * handled. @@ -1102,8 +1084,7 @@ static int bnx2x_vlan_mac_restore(struct bnx2x *bp, return bnx2x_config_vlan_mac(bp, p); } -/* - * bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a +/* bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a * pointer to an element with a specific criteria and NULL if such an element * hasn't been found. */ @@ -1187,8 +1168,7 @@ static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp, return rc; } - /* - * Check if there is a pending ADD command for this + /* Check if there is a pending ADD command for this * MAC/VLAN/VLAN-MAC. Return an error if there is. */ if (exeq->get(exeq, elem)) { @@ -1196,8 +1176,7 @@ static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp, return -EEXIST; } - /* - * TODO: Check the pending MOVE from other objects where this + /* TODO: Check the pending MOVE from other objects where this * object is a destination object. */ @@ -1240,8 +1219,7 @@ static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp, return -EEXIST; } - /* - * Check if there are pending DEL or MOVE commands for this + /* Check if there are pending DEL or MOVE commands for this * MAC/VLAN/VLAN-MAC. Return an error if so. */ memcpy(&query_elem, elem, sizeof(query_elem)); @@ -1292,8 +1270,7 @@ static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp, struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue; struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue; - /* - * Check if we can perform this operation based on the current registry + /* Check if we can perform this operation based on the current registry * state. */ if (!src_o->check_move(bp, src_o, dest_o, @@ -1302,8 +1279,7 @@ static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp, return -EINVAL; } - /* - * Check if there is an already pending DEL or MOVE command for the + /* Check if there is an already pending DEL or MOVE command for the * source object or ADD command for a destination object. Return an * error if so. */ @@ -1392,7 +1368,7 @@ static int bnx2x_remove_vlan_mac(struct bnx2x *bp, } /** - * bnx2x_wait_vlan_mac - passivly wait for 5 seconds until all work completes. + * bnx2x_wait_vlan_mac - passively wait for 5 seconds until all work completes. * * @bp: device handle * @o: bnx2x_vlan_mac_obj @@ -1550,9 +1526,8 @@ static inline int bnx2x_vlan_mac_get_registry_elem( /* Get a new CAM offset */ if (!o->get_cam_offset(o, ®_elem->cam_offset)) { - /* - * This shell never happen, because we have checked the - * CAM availiability in the 'validate'. + /* This shall never happen, because we have checked the + * CAM availability in the 'validate'. */ WARN_ON(1); kfree(reg_elem); @@ -1599,8 +1574,7 @@ static int bnx2x_execute_vlan_mac(struct bnx2x *bp, struct bnx2x_vlan_mac_registry_elem *reg_elem; enum bnx2x_vlan_mac_cmd cmd; - /* - * If DRIVER_ONLY execution is requested, cleanup a registry + /* If DRIVER_ONLY execution is requested, cleanup a registry * and exit. Otherwise send a ramrod to FW. */ if (!drv_only) { @@ -1609,11 +1583,10 @@ static int bnx2x_execute_vlan_mac(struct bnx2x *bp, /* Set pending */ r->set_pending(r); - /* Fill tha ramrod data */ + /* Fill the ramrod data */ list_for_each_entry(elem, exe_chunk, link) { cmd = elem->cmd_data.vlan_mac.cmd; - /* - * We will add to the target object in MOVE command, so + /* We will add to the target object in MOVE command, so * change the object for a CAM search. */ if (cmd == BNX2X_VLAN_MAC_MOVE) @@ -1646,12 +1619,11 @@ static int bnx2x_execute_vlan_mac(struct bnx2x *bp, idx++; } - /* - * No need for an explicit memory barrier here as long we would - * need to ensure the ordering of writing to the SPQ element - * and updating of the SPQ producer which involves a memory - * read and we will have to put a full memory barrier there - * (inside bnx2x_sp_post()). + /* No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside bnx2x_sp_post()). */ rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid, @@ -1766,8 +1738,7 @@ int bnx2x_config_vlan_mac( return rc; } - /* - * If nothing will be executed further in this iteration we want to + /* If nothing will be executed further in this iteration we want to * return PENDING if there are pending commands */ if (!bnx2x_exe_queue_empty(&o->exe_queue)) @@ -1786,13 +1757,11 @@ int bnx2x_config_vlan_mac( return rc; } - /* - * RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set + /* RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set * then user want to wait until the last command is done. */ if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) { - /* - * Wait maximum for the current exe_queue length iterations plus + /* Wait maximum for the current exe_queue length iterations plus * one (for the current pending command). */ int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1; @@ -1818,8 +1787,6 @@ int bnx2x_config_vlan_mac( return rc; } - - /** * bnx2x_vlan_mac_del_all - delete elements with given vlan_mac_flags spec * @@ -1829,7 +1796,7 @@ int bnx2x_config_vlan_mac( * @ramrod_flags: execution flags to be used for this deletion * * if the last operation has completed successfully and there are no - * moreelements left, positive value if the last operation has completed + * more elements left, positive value if the last operation has completed * successfully and there are more previously configured elements, negative * value is current operation has failed. */ @@ -1870,8 +1837,7 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp, p.ramrod_flags = *ramrod_flags; p.user_req.cmd = BNX2X_VLAN_MAC_DEL; - /* - * Add all but the last VLAN-MAC to the execution queue without actually + /* Add all but the last VLAN-MAC to the execution queue without actually * execution anything. */ __clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags); @@ -1934,7 +1900,6 @@ static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o, state, pstate, type); } - void bnx2x_init_mac_obj(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *mac_obj, u8 cl_id, u32 cid, u8 func_id, void *rdata, @@ -2048,8 +2013,7 @@ void bnx2x_init_vlan_mac_obj(struct bnx2x *bp, /* CAM pool handling */ vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac; vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac; - /* - * CAM offset is relevant for 57710 and 57711 chips only which have a + /* CAM offset is relevant for 57710 and 57711 chips only which have a * single CAM for both MACs and VLAN-MAC pairs. So the offset * will be taken from MACs' pool object only. */ @@ -2092,7 +2056,6 @@ void bnx2x_init_vlan_mac_obj(struct bnx2x *bp, bnx2x_execute_vlan_mac, bnx2x_exeq_get_vlan_mac); } - } /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */ @@ -2117,12 +2080,12 @@ static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp, struct tstorm_eth_mac_filter_config *mac_filters = (struct tstorm_eth_mac_filter_config *)p->rdata; - /* initial seeting is drop-all */ + /* initial setting is drop-all */ u8 drop_all_ucast = 1, drop_all_mcast = 1; u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0; u8 unmatched_unicast = 0; - /* In e1x there we only take into account rx acceot flag since tx switching + /* In e1x there we only take into account rx accept flag since tx switching * isn't enabled. */ if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags)) /* accept matched ucast */ @@ -2245,7 +2208,6 @@ static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp, } cmd->state = cpu_to_le16(state); - } static int bnx2x_set_rx_mode_e2(struct bnx2x *bp, @@ -2286,9 +2248,7 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp, false); } - - /* - * If FCoE Queue configuration has been requested configure the Rx and + /* If FCoE Queue configuration has been requested configure the Rx and * internal switching modes for this queue in separate rules. * * FCoE queue shell never be set to ACCEPT_ALL packets of any sort: @@ -2324,8 +2284,7 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp, } } - /* - * Set the ramrod header (most importantly - number of rules to + /* Set the ramrod header (most importantly - number of rules to * configure). */ bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx); @@ -2334,12 +2293,11 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp, data->header.rule_cnt, p->rx_accept_flags, p->tx_accept_flags); - /* - * No need for an explicit memory barrier here as long we would - * need to ensure the ordering of writing to the SPQ element - * and updating of the SPQ producer which involves a memory - * read and we will have to put a full memory barrier there - * (inside bnx2x_sp_post()). + /* No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside bnx2x_sp_post()). */ /* Send a ramrod */ @@ -2476,7 +2434,7 @@ static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp, cur_mac = (struct bnx2x_mcast_mac_elem *) ((u8 *)new_cmd + sizeof(*new_cmd)); - /* Push the MACs of the current command into the pendig command + /* Push the MACs of the current command into the pending command * MACs list: FIFO */ list_for_each_entry(pos, &p->mcast_list, link) { @@ -2909,7 +2867,6 @@ static int bnx2x_mcast_validate_e2(struct bnx2x *bp, default: BNX2X_ERR("Unknown command: %d\n", cmd); return -EINVAL; - } /* Increase the total number of MACs pending to be configured */ @@ -3034,20 +2991,18 @@ static int bnx2x_mcast_setup_e2(struct bnx2x *bp, if (!o->total_pending_num) bnx2x_mcast_refresh_registry_e2(bp, o); - /* - * If CLEAR_ONLY was requested - don't send a ramrod and clear + /* If CLEAR_ONLY was requested - don't send a ramrod and clear * RAMROD_PENDING status immediately. */ if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { raw->clear_pending(raw); return 0; } else { - /* - * No need for an explicit memory barrier here as long we would - * need to ensure the ordering of writing to the SPQ element - * and updating of the SPQ producer which involves a memory - * read and we will have to put a full memory barrier there - * (inside bnx2x_sp_post()). + /* No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside bnx2x_sp_post()). */ /* Send a ramrod */ @@ -3121,7 +3076,7 @@ static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp, } } -/* On 57711 we write the multicast MACs' aproximate match +/* On 57711 we write the multicast MACs' approximate match * table by directly into the TSTORM's internal RAM. So we don't * really need to handle any tricks to make it work. */ @@ -3223,7 +3178,6 @@ static int bnx2x_mcast_validate_e1(struct bnx2x *bp, default: BNX2X_ERR("Unknown command: %d\n", cmd); return -EINVAL; - } /* We want to ensure that commands are executed one by one for 57710. @@ -3245,7 +3199,7 @@ static void bnx2x_mcast_revert_e1(struct bnx2x *bp, /* If current command hasn't been handled yet and we are * here means that it's meant to be dropped and we have to - * update the number of outstandling MACs accordingly. + * update the number of outstanding MACs accordingly. */ if (p->mcast_list_len) o->total_pending_num -= o->max_cmd_len; @@ -3342,7 +3296,6 @@ static inline int bnx2x_mcast_handle_restore_cmd_e1( return -1; } - static inline int bnx2x_mcast_handle_pending_cmds_e1( struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p) { @@ -3352,7 +3305,6 @@ static inline int bnx2x_mcast_handle_pending_cmds_e1( union bnx2x_mcast_config_data cfg_data = {NULL}; int cnt = 0; - /* If nothing to be done - return */ if (list_empty(&o->pending_cmds_head)) return 0; @@ -3523,20 +3475,18 @@ static int bnx2x_mcast_setup_e1(struct bnx2x *bp, if (rc) return rc; - /* - * If CLEAR_ONLY was requested - don't send a ramrod and clear + /* If CLEAR_ONLY was requested - don't send a ramrod and clear * RAMROD_PENDING status immediately. */ if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { raw->clear_pending(raw); return 0; } else { - /* - * No need for an explicit memory barrier here as long we would - * need to ensure the ordering of writing to the SPQ element - * and updating of the SPQ producer which involves a memory - * read and we will have to put a full memory barrier there - * (inside bnx2x_sp_post()). + /* No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside bnx2x_sp_post()). */ /* Send a ramrod */ @@ -3550,7 +3500,6 @@ static int bnx2x_mcast_setup_e1(struct bnx2x *bp, /* Ramrod completion is pending */ return 1; } - } static int bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj *o) @@ -3848,7 +3797,6 @@ static bool bnx2x_credit_pool_always_true(struct bnx2x_credit_pool_obj *o, return true; } - static bool bnx2x_credit_pool_get_entry( struct bnx2x_credit_pool_obj *o, int *offset) @@ -3999,8 +3947,7 @@ void bnx2x_init_mac_credit_pool(struct bnx2x *bp, } else { - /* - * CAM credit is equaly divided between all active functions + /* CAM credit is equaly divided between all active functions * on the PATH. */ if ((func_num > 0)) { @@ -4009,8 +3956,7 @@ void bnx2x_init_mac_credit_pool(struct bnx2x *bp, else cam_sz = BNX2X_CAM_SIZE_EMUL; - /* - * No need for CAM entries handling for 57712 and + /* No need for CAM entries handling for 57712 and * newer. */ bnx2x_init_credit_pool(p, -1, cam_sz); @@ -4018,7 +3964,6 @@ void bnx2x_init_mac_credit_pool(struct bnx2x *bp, /* this should never happen! Block MAC operations. */ bnx2x_init_credit_pool(p, 0, 0); } - } } @@ -4028,14 +3973,12 @@ void bnx2x_init_vlan_credit_pool(struct bnx2x *bp, u8 func_num) { if (CHIP_IS_E1x(bp)) { - /* - * There is no VLAN credit in HW on 57710 and 57711 only + /* There is no VLAN credit in HW on 57710 and 57711 only * MAC / MAC-VLAN can be set */ bnx2x_init_credit_pool(p, 0, -1); } else { - /* - * CAM credit is equaly divided between all active functions + /* CAM credit is equally divided between all active functions * on the PATH. */ if (func_num > 0) { @@ -4051,7 +3994,7 @@ void bnx2x_init_vlan_credit_pool(struct bnx2x *bp, /** * bnx2x_debug_print_ind_table - prints the indirection table configuration. * - * @bp: driver hanlde + * @bp: driver handle * @p: pointer to rss configuration * * Prints it when NETIF_MSG_IFUP debug level is configured. @@ -4164,12 +4107,11 @@ static int bnx2x_setup_rss(struct bnx2x *bp, data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY; } - /* - * No need for an explicit memory barrier here as long we would - * need to ensure the ordering of writing to the SPQ element - * and updating of the SPQ producer which involves a memory - * read and we will have to put a full memory barrier there - * (inside bnx2x_sp_post()). + /* No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside bnx2x_sp_post()). */ /* Send a ramrod */ @@ -4215,7 +4157,6 @@ int bnx2x_config_rss(struct bnx2x *bp, return rc; } - void bnx2x_init_rss_config_obj(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, u8 cl_id, u32 cid, u8 func_id, u8 engine_id, @@ -4288,7 +4229,6 @@ int bnx2x_queue_state_change(struct bnx2x *bp, return !!test_bit(pending_bit, pending); } - static int bnx2x_queue_set_pending(struct bnx2x_queue_sp_obj *obj, struct bnx2x_queue_state_params *params) { @@ -4337,7 +4277,7 @@ static int bnx2x_queue_comp_cmd(struct bnx2x *bp, } if (o->next_tx_only >= o->max_cos) - /* >= becuase tx only must always be smaller than cos since the + /* >= because tx only must always be smaller than cos since the * primary connection supports COS 0 */ BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d", @@ -4403,7 +4343,6 @@ static void bnx2x_q_fill_init_general_data(struct bnx2x *bp, gen_data->mtu = cpu_to_le16(params->mtu); gen_data->func_id = o->func_id; - gen_data->cos = params->cos; gen_data->traffic_type = @@ -4530,7 +4469,6 @@ static void bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj *o, cpu_to_le16(params->silent_removal_value); rx_data->silent_vlan_mask = cpu_to_le16(params->silent_removal_mask); - } /* initialize the general, tx and rx parts of a queue object */ @@ -4652,12 +4590,11 @@ static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp, /* Fill the ramrod data */ bnx2x_q_fill_setup_data_cmn(bp, params, rdata); - /* - * No need for an explicit memory barrier here as long we would - * need to ensure the ordering of writing to the SPQ element - * and updating of the SPQ producer which involves a memory - * read and we will have to put a full memory barrier there - * (inside bnx2x_sp_post()). + /* No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside bnx2x_sp_post()). */ return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX], @@ -4681,12 +4618,11 @@ static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp, bnx2x_q_fill_setup_data_cmn(bp, params, rdata); bnx2x_q_fill_setup_data_e2(bp, params, rdata); - /* - * No need for an explicit memory barrier here as long we would - * need to ensure the ordering of writing to the SPQ element - * and updating of the SPQ producer which involves a memory - * read and we will have to put a full memory barrier there - * (inside bnx2x_sp_post()). + /* No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside bnx2x_sp_post()). */ return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX], @@ -4706,7 +4642,6 @@ static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp, ¶ms->params.tx_only; u8 cid_index = tx_only_params->cid_index; - if (cid_index >= o->max_cos) { BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n", o->cl_id, cid_index); @@ -4727,12 +4662,11 @@ static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp, o->cids[cid_index], rdata->general.client_id, rdata->general.sp_client_id, rdata->general.cos); - /* - * No need for an explicit memory barrier here as long we would - * need to ensure the ordering of writing to the SPQ element - * and updating of the SPQ producer which involves a memory - * read and we will have to put a full memory barrier there - * (inside bnx2x_sp_post()). + /* No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside bnx2x_sp_post()). */ return bnx2x_sp_post(bp, ramrod, o->cids[cid_index], @@ -4761,7 +4695,7 @@ static void bnx2x_q_fill_update_data(struct bnx2x *bp, test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG, ¶ms->update_flags); - /* Outer VLAN sripping */ + /* Outer VLAN stripping */ data->outer_vlan_removal_enable_flg = test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM, ¶ms->update_flags); data->outer_vlan_removal_change_flg = @@ -4816,19 +4750,17 @@ static inline int bnx2x_q_send_update(struct bnx2x *bp, return -EINVAL; } - /* Clear the ramrod data */ memset(rdata, 0, sizeof(*rdata)); /* Fill the ramrod data */ bnx2x_q_fill_update_data(bp, o, update_params, rdata); - /* - * No need for an explicit memory barrier here as long we would - * need to ensure the ordering of writing to the SPQ element - * and updating of the SPQ producer which involves a memory - * read and we will have to put a full memory barrier there - * (inside bnx2x_sp_post()). + /* No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside bnx2x_sp_post()). */ return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE, @@ -5038,8 +4970,7 @@ static int bnx2x_queue_chk_transition(struct bnx2x *bp, ¶ms->params.update; u8 next_tx_only = o->num_tx_only; - /* - * Forget all pending for completion commands if a driver only state + /* Forget all pending for completion commands if a driver only state * transition has been requested. */ if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) { @@ -5047,8 +4978,7 @@ static int bnx2x_queue_chk_transition(struct bnx2x *bp, o->next_state = BNX2X_Q_STATE_MAX; } - /* - * Don't allow a next state transition if we are in the middle of + /* Don't allow a next state transition if we are in the middle of * the previous one. */ if (o->pending) { @@ -5257,8 +5187,7 @@ enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp, if (o->pending) return BNX2X_F_STATE_MAX; - /* - * unsure the order of reading of o->pending and o->state + /* unsure the order of reading of o->pending and o->state * o->pending should be read first */ rmb(); @@ -5356,8 +5285,7 @@ static int bnx2x_func_chk_transition(struct bnx2x *bp, enum bnx2x_func_state state = o->state, next_state = BNX2X_F_STATE_MAX; enum bnx2x_func_cmd cmd = params->cmd; - /* - * Forget all pending for completion commands if a driver only state + /* Forget all pending for completion commands if a driver only state * transition has been requested. */ if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) { @@ -5365,8 +5293,7 @@ static int bnx2x_func_chk_transition(struct bnx2x *bp, o->next_state = BNX2X_F_STATE_MAX; } - /* - * Don't allow a next state transition if we are in the middle of + /* Don't allow a next state transition if we are in the middle of * the previous one. */ if (o->pending) @@ -5539,7 +5466,7 @@ static int bnx2x_func_hw_init(struct bnx2x *bp, goto init_err; } - /* Handle the beginning of COMMON_XXX pases separatelly... */ + /* Handle the beginning of COMMON_XXX pases separately... */ switch (load_code) { case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: rc = bnx2x_func_init_cmn_chip(bp, drv); @@ -5573,7 +5500,7 @@ static int bnx2x_func_hw_init(struct bnx2x *bp, init_err: drv->gunzip_end(bp); - /* In case of success, complete the comand immediatelly: no ramrods + /* In case of success, complete the command immediately: no ramrods * have been sent. */ if (!rc) @@ -5598,7 +5525,7 @@ static inline void bnx2x_func_reset_func(struct bnx2x *bp, } /** - * bnx2x_func_reset_port - reser HW at port stage + * bnx2x_func_reset_port - reset HW at port stage * * @bp: device handle * @drv: @@ -5620,7 +5547,7 @@ static inline void bnx2x_func_reset_port(struct bnx2x *bp, } /** - * bnx2x_func_reset_cmn - reser HW at common stage + * bnx2x_func_reset_cmn - reset HW at common stage * * @bp: device handle * @drv: @@ -5636,7 +5563,6 @@ static inline void bnx2x_func_reset_cmn(struct bnx2x *bp, drv->reset_hw_cmn(bp); } - static inline int bnx2x_func_hw_reset(struct bnx2x *bp, struct bnx2x_func_state_params *params) { @@ -5663,7 +5589,7 @@ static inline int bnx2x_func_hw_reset(struct bnx2x *bp, break; } - /* Complete the comand immediatelly: no ramrods have been sent. */ + /* Complete the command immediately: no ramrods have been sent. */ o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET); return 0; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h index 43c00bc84a08..798dfe996733 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h @@ -34,8 +34,7 @@ enum { RAMROD_RESTORE, /* Execute the next command now */ RAMROD_EXEC, - /* - * Don't add a new command and continue execution of posponed + /* Don't add a new command and continue execution of postponed * commands. If not set a new command will be added to the * pending commands list. */ @@ -129,8 +128,7 @@ enum bnx2x_vlan_mac_cmd { struct bnx2x_vlan_mac_data { /* Requested command: BNX2X_VLAN_MAC_XX */ enum bnx2x_vlan_mac_cmd cmd; - /* - * used to contain the data related vlan_mac_flags bits from + /* used to contain the data related vlan_mac_flags bits from * ramrod parameters. */ unsigned long vlan_mac_flags; @@ -190,14 +188,10 @@ typedef struct bnx2x_exeq_elem * struct bnx2x_exeq_elem *elem); struct bnx2x_exe_queue_obj { - /* - * Commands pending for an execution. - */ + /* Commands pending for an execution. */ struct list_head exe_queue; - /* - * Commands pending for an completion. - */ + /* Commands pending for an completion. */ struct list_head pending_comp; spinlock_t lock; @@ -245,14 +239,13 @@ struct bnx2x_exe_queue_obj { }; /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/ /* - * Element in the VLAN_MAC registry list having all currenty configured + * Element in the VLAN_MAC registry list having all currently configured * rules. */ struct bnx2x_vlan_mac_registry_elem { struct list_head link; - /* - * Used to store the cam offset used for the mac/vlan/vlan-mac. + /* Used to store the cam offset used for the mac/vlan/vlan-mac. * Relevant for 57710 and 57711 only. VLANs and MACs share the * same CAM for these chips. */ @@ -310,7 +303,7 @@ struct bnx2x_vlan_mac_obj { * @param n number of elements to get * @param buf buffer preallocated by caller into which elements * will be copied. Note elements are 4-byte aligned - * so buffer size must be able to accomodate the + * so buffer size must be able to accommodate the * aligned elements. * * @return number of copied bytes @@ -395,7 +388,7 @@ struct bnx2x_vlan_mac_obj { * @param bp * @param p Command parameters (RAMROD_COMP_WAIT bit in * ramrod_flags is only taken into an account) - * @param ppos a pointer to the cooky that should be given back in the + * @param ppos a pointer to the cookie that should be given back in the * next call to make function handle the next element. If * *ppos is set to NULL it will restart the iterator. * If returned *ppos == NULL this means that the last @@ -408,7 +401,7 @@ struct bnx2x_vlan_mac_obj { struct bnx2x_vlan_mac_registry_elem **ppos); /** - * Should be called on a completion arival. + * Should be called on a completion arrival. * * @param bp * @param o @@ -447,7 +440,7 @@ void bnx2x_set_mac_in_nig(struct bnx2x *bp, /** RX_MODE verbs:DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */ -/* RX_MODE ramrod spesial flags: set in rx_mode_flags field in +/* RX_MODE ramrod special flags: set in rx_mode_flags field in * a bnx2x_rx_mode_ramrod_params. */ enum { @@ -475,8 +468,7 @@ struct bnx2x_rx_mode_ramrod_params { unsigned long ramrod_flags; unsigned long rx_mode_flags; - /* - * rdata is either a pointer to eth_filter_rules_ramrod_data(e2) or to + /* rdata is either a pointer to eth_filter_rules_ramrod_data(e2) or to * a tstorm_eth_mac_filter_config (e1x). */ void *rdata; @@ -646,12 +638,11 @@ struct bnx2x_credit_pool_obj { /* Maximum allowed credit. put() will check against it. */ int pool_sz; - /* - * Allocate a pool table statically. + /* Allocate a pool table statically. * - * Currently the mamimum allowed size is MAX_MAC_CREDIT_E2(272) + * Currently the maximum allowed size is MAX_MAC_CREDIT_E2(272) * - * The set bit in the table will mean that the entry is available. + * The set bit in the table will mean that the entry is available. */ #define BNX2X_POOL_VEC_SIZE (MAX_MAC_CREDIT_E2 / 64) u64 pool_mirror[BNX2X_POOL_VEC_SIZE]; @@ -832,7 +823,7 @@ enum { BNX2X_Q_FLG_TUN_INC_INNER_IP_ID }; -/* Queue type options: queue type may be a compination of below. */ +/* Queue type options: queue type may be a combination of below. */ enum bnx2x_q_type { /** TODO: Consider moving both these flags into the init() * ramrod params. @@ -1002,10 +993,9 @@ struct bnx2x_queue_sp_obj { u8 cl_id; u8 func_id; - /* - * number of traffic classes supported by queue. - * The primary connection of the queue suppotrs the first traffic - * class. Any further traffic class is suppoted by a tx-only + /* number of traffic classes supported by queue. + * The primary connection of the queue supports the first traffic + * class. Any further traffic class is supported by a tx-only * connection. * * Therefore max_cos is also a number of valid entries in the cids @@ -1021,7 +1011,7 @@ struct bnx2x_queue_sp_obj { /* BNX2X_Q_CMD_XX bits. This object implements "one * pending" paradigm but for debug and tracing purposes it's - * more convinient to have different bits for different + * more convenient to have different bits for different * commands. */ unsigned long pending; @@ -1210,7 +1200,7 @@ struct bnx2x_func_sp_obj { /* BNX2X_FUNC_CMD_XX bits. This object implements "one * pending" paradigm but for debug and tracing purposes it's - * more convinient to have different bits for different + * more convenient to have different bits for different * commands. */ unsigned long pending; @@ -1329,7 +1319,7 @@ void bnx2x_init_rx_mode_obj(struct bnx2x *bp, * * @p: Command parameters * - * Return: 0 - if operation was successfull and there is no pending completions, + * Return: 0 - if operation was successful and there is no pending completions, * positive number - if there are pending completions, * negative - if there were errors */ @@ -1361,7 +1351,7 @@ void bnx2x_init_mcast_obj(struct bnx2x *bp, * the current command will be enqueued to the tail of the * pending commands list. * - * Return: 0 is operation was successfull and there are no pending completions, + * Return: 0 is operation was successful and there are no pending completions, * negative if there were errors, positive if there are pending * completions. */ @@ -1377,7 +1367,6 @@ void bnx2x_init_vlan_credit_pool(struct bnx2x *bp, struct bnx2x_credit_pool_obj *p, u8 func_id, u8 func_num); - /****************** RSS CONFIGURATION ****************/ void bnx2x_init_rss_config_obj(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 2ce7c7471367..95861efb5051 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -1341,7 +1341,7 @@ int bnx2x_vfop_qdown_cmd(struct bnx2x *bp, */ /* internal vf enable - until vf is enabled internally all transactions - * are blocked. this routine should always be called last with pretend. + * are blocked. This routine should always be called last with pretend. */ static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable) { @@ -1459,21 +1459,16 @@ static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid) struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); if (!vf) - goto unknown_dev; + return false; dev = pci_get_bus_and_slot(vf->bus, vf->devfn); if (dev) return bnx2x_is_pcie_pending(dev); - -unknown_dev: return false; } int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid) { - /* Wait 100ms */ - msleep(100); - /* Verify no pending pci transactions */ if (bnx2x_vf_is_pcie_pending(bp, abs_vfid)) BNX2X_ERR("PCIE Transactions still pending\n"); @@ -1620,7 +1615,7 @@ next_vf_to_clean: i++) ; - DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. num of vfs: %d\n", i, + DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n", i, BNX2X_NR_VIRTFN(bp)); if (i < BNX2X_NR_VIRTFN(bp)) { @@ -1743,7 +1738,7 @@ void bnx2x_iov_init_dq(struct bnx2x *bp) REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0); REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff); - /* set the number of VF alllowed doorbells to the full DQ range */ + /* set the number of VF allowed doorbells to the full DQ range */ REG_WR(bp, DORQ_REG_VF_NORM_MAX_CID_COUNT, 0x20000); /* set the VF doorbell threshold */ @@ -2176,6 +2171,9 @@ int bnx2x_iov_nic_init(struct bnx2x *bp) DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn); + /* let FLR complete ... */ + msleep(100); + /* initialize vf database */ for_each_vf(bp, vfid) { struct bnx2x_virtf *vf = BP_VF(bp, vfid); @@ -2403,7 +2401,7 @@ int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem) /* extract vf and rxq index from vf_cid - relies on the following: * 1. vfid on cid reflects the true abs_vfid - * 2. the max number of VFs (per path) is 64 + * 2. The max number of VFs (per path) is 64 */ qidx = cid & ((1 << BNX2X_VF_CID_WND)-1); abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1); @@ -2461,7 +2459,7 @@ static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid) { /* extract the vf from vf_cid - relies on the following: * 1. vfid on cid reflects the true abs_vfid - * 2. the max number of VFs (per path) is 64 + * 2. The max number of VFs (per path) is 64 */ int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1); return bnx2x_vf_by_abs_fid(bp, abs_vfid); @@ -2480,7 +2478,7 @@ void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, if (vf) { /* extract queue index from vf_cid - relies on the following: * 1. vfid on cid reflects the true abs_vfid - * 2. the max number of VFs (per path) is 64 + * 2. The max number of VFs (per path) is 64 */ int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1); *q_obj = &bnx2x_vfq(vf, q_index, sp_obj); @@ -2705,7 +2703,7 @@ int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, } /* static allocation: - * the global maximum number are fixed per VF. fail the request if + * the global maximum number are fixed per VF. Fail the request if * requested number exceed these globals */ if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) { @@ -2777,6 +2775,10 @@ int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map) vf->abs_vfid, vf->state); return -EINVAL; } + + /* let FLR complete ... */ + msleep(100); + /* FLR cleanup epilogue */ if (bnx2x_vf_flr_clnup_epilog(bp, vf->abs_vfid)) return -EBUSY; @@ -2890,7 +2892,7 @@ int bnx2x_vfop_close_cmd(struct bnx2x *bp, return -ENOMEM; } -/* VF release can be called either: 1. the VF was acquired but +/* VF release can be called either: 1. The VF was acquired but * not enabled 2. the vf was enabled or in the process of being * enabled */ @@ -3024,7 +3026,6 @@ void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param) { - struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev)); DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n", @@ -3032,7 +3033,7 @@ int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param) /* HW channel is only operational when PF is up */ if (bp->state != BNX2X_STATE_OPEN) { - BNX2X_ERR("VF num configurtion via sysfs not supported while PF is down"); + BNX2X_ERR("VF num configuration via sysfs not supported while PF is down\n"); return -EINVAL; } @@ -3086,6 +3087,11 @@ void bnx2x_disable_sriov(struct bnx2x *bp) static int bnx2x_vf_ndo_sanity(struct bnx2x *bp, int vfidx, struct bnx2x_virtf *vf) { + if (bp->state != BNX2X_STATE_OPEN) { + BNX2X_ERR("vf ndo called though PF is down\n"); + return -EINVAL; + } + if (!IS_SRIOV(bp)) { BNX2X_ERR("vf ndo called though sriov is disabled\n"); return -EINVAL; @@ -3141,7 +3147,7 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx, /* mac configured by ndo so its in bulletin board */ memcpy(&ivi->mac, bulletin->mac, ETH_ALEN); else - /* funtion has not been loaded yet. Show mac as 0s */ + /* function has not been loaded yet. Show mac as 0s */ memset(&ivi->mac, 0, ETH_ALEN); /* vlan */ @@ -3149,7 +3155,7 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx, /* vlan configured by ndo so its in bulletin board */ memcpy(&ivi->vlan, &bulletin->vlan, VLAN_HLEN); else - /* funtion has not been loaded yet. Show vlans as 0s */ + /* function has not been loaded yet. Show vlans as 0s */ memset(&ivi->vlan, 0, VLAN_HLEN); } @@ -3189,7 +3195,7 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac) return -EINVAL; } - /* update PF's copy of the VF's bulletin. will no longer accept mac + /* update PF's copy of the VF's bulletin. Will no longer accept mac * configuration requests from vf unless match this mac */ bulletin->valid_bitmap |= 1 << MAC_ADDR_VALID; @@ -3358,8 +3364,11 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) return 0; } -/* crc is the first field in the bulletin board. compute the crc over the - * entire bulletin board excluding the crc field itself +/* crc is the first field in the bulletin board. Compute the crc over the + * entire bulletin board excluding the crc field itself. Use the length field + * as the Bulletin Board was posted by a PF with possibly a different version + * from the vf which will sample it. Therefore, the length is computed by the + * PF and the used blindly by the VF. */ u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp, struct pf_vf_bulletin_content *bulletin) @@ -3389,7 +3398,7 @@ enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp) if (bulletin.crc == bnx2x_crc_vf_bulletin(bp, &bulletin)) break; - BNX2X_ERR("bad crc on bulletin board. contained %x computed %x\n", + BNX2X_ERR("bad crc on bulletin board. Contained %x computed %x\n", bulletin.crc, bnx2x_crc_vf_bulletin(bp, &bulletin)); } @@ -3417,6 +3426,20 @@ enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp) return PFVF_BULLETIN_UPDATED; } +void bnx2x_timer_sriov(struct bnx2x *bp) +{ + bnx2x_sample_bulletin(bp); + + /* if channel is down we need to self destruct */ + if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) { + smp_mb__before_clear_bit(); + set_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN, + &bp->sp_rtnl_state); + smp_mb__after_clear_bit(); + schedule_delayed_work(&bp->sp_rtnl_task, 0); + } +} + void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp) { /* vf doorbells are embedded within the regview */ @@ -3452,7 +3475,7 @@ int bnx2x_open_epilog(struct bnx2x *bp) * register_netdevice which must have rtnl lock taken. As we are holding * the lock right now, that could only work if the probe would not take * the lock. However, as the probe of the vf may be called from other - * contexts as well (such as passthrough to vm failes) it can't assume + * contexts as well (such as passthrough to vm fails) it can't assume * the lock is being held for it. Using delayed work here allows the * probe code to simply take the lock (i.e. wait for it to be released * if it is being held). We only want to do this if the number of VFs @@ -3467,3 +3490,23 @@ int bnx2x_open_epilog(struct bnx2x *bp) return 0; } + +void bnx2x_iov_channel_down(struct bnx2x *bp) +{ + int vf_idx; + struct pf_vf_bulletin_content *bulletin; + + if (!IS_SRIOV(bp)) + return; + + for_each_vf(bp, vf_idx) { + /* locate this VFs bulletin board and update the channel down + * bit + */ + bulletin = BP_VF_BULLETIN(bp, vf_idx); + bulletin->valid_bitmap |= 1 << CHANNEL_DOWN; + + /* update vf bulletin board */ + bnx2x_post_vf_bulletin(bp, vf_idx); + } +} diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h index d67ddc554c0f..d143a7cdbbbe 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h @@ -197,7 +197,7 @@ struct bnx2x_virtf { u8 state; #define VF_FREE 0 /* VF ready to be acquired holds no resc */ -#define VF_ACQUIRED 1 /* VF aquired, but not initalized */ +#define VF_ACQUIRED 1 /* VF acquired, but not initialized */ #define VF_ENABLED 2 /* VF Enabled */ #define VF_RESET 3 /* VF FLR'd, pending cleanup */ @@ -496,7 +496,7 @@ enum { else if ((next) == VFOP_VERIFY_PEND) \ BNX2X_ERR("expected pending\n"); \ else { \ - DP(BNX2X_MSG_IOV, "no ramrod. scheduling\n"); \ + DP(BNX2X_MSG_IOV, "no ramrod. Scheduling\n"); \ atomic_set(&vf->op_in_progress, 1); \ queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); \ return; \ @@ -722,7 +722,6 @@ u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp, struct pf_vf_bulletin_content *bulletin); int bnx2x_post_vf_bulletin(struct bnx2x *bp, int vf); - enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp); /* VF side vfpf channel functions */ @@ -752,6 +751,7 @@ static inline int bnx2x_vf_ustorm_prods_offset(struct bnx2x *bp, } enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp); +void bnx2x_timer_sriov(struct bnx2x *bp); void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp); int bnx2x_vf_pci_alloc(struct bnx2x *bp); int bnx2x_enable_sriov(struct bnx2x *bp); @@ -762,6 +762,7 @@ static inline int bnx2x_vf_headroom(struct bnx2x *bp) } void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp); int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs); +void bnx2x_iov_channel_down(struct bnx2x *bp); int bnx2x_open_epilog(struct bnx2x *bp); #else /* CONFIG_BNX2X_SRIOV */ @@ -809,6 +810,7 @@ static inline enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp { return PFVF_BULLETIN_UNCHANGED; } +static inline void bnx2x_timer_sriov(struct bnx2x *bp) {} static inline void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp) { @@ -818,6 +820,7 @@ static inline void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp) static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; } static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {} static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; } +static inline void bnx2x_iov_channel_down(struct bnx2x *bp) {} static inline int bnx2x_open_epilog(struct bnx2x *bp) {return 0; } #endif /* CONFIG_BNX2X_SRIOV */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c index 2ca3d94fcec2..98366abd02bd 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c @@ -1002,7 +1002,6 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp) qstats->valid_bytes_received_lo = qstats->total_bytes_received_lo; - UPDATE_EXTEND_TSTAT(rcv_ucast_pkts, total_unicast_packets_received); UPDATE_EXTEND_TSTAT(rcv_mcast_pkts, diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h index d117f472816c..853824d258e8 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h @@ -40,7 +40,6 @@ struct nig_stats { u32 egress_mac_pkt1_hi; }; - enum bnx2x_stats_event { STATS_EVENT_PMF = 0, STATS_EVENT_LINK_UP, @@ -208,7 +207,6 @@ struct bnx2x_eth_stats { u32 eee_tx_lpi; }; - struct bnx2x_eth_q_stats { u32 total_unicast_bytes_received_hi; u32 total_unicast_bytes_received_lo; @@ -331,7 +329,6 @@ struct bnx2x_fw_port_stats_old { u32 mac_discard; }; - /**************************************************************************** * Macros ****************************************************************************/ @@ -536,7 +533,6 @@ struct bnx2x_fw_port_stats_old { SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ } while (0) - /* forward */ struct bnx2x; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index 928b074d7d80..2088063151d6 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c @@ -113,7 +113,7 @@ static int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping) { struct cstorm_vf_zone_data __iomem *zone_data = REG_ADDR(bp, PXP_VF_ADDR_CSDM_GLOBAL_START); - int tout = 600, interval = 100; /* wait for 60 seconds */ + int tout = 100, interval = 100; /* wait for 10 seconds */ if (*done) { BNX2X_ERR("done was non zero before message to pf was sent\n"); @@ -121,6 +121,16 @@ static int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping) return -EINVAL; } + /* if PF indicated channel is down avoid sending message. Return success + * so calling flow can continue + */ + bnx2x_sample_bulletin(bp); + if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) { + DP(BNX2X_MSG_IOV, "detecting channel down. Aborting message\n"); + *done = PFVF_STATUS_SUCCESS; + return 0; + } + /* Write message address */ writel(U64_LO(msg_mapping), &zone_data->non_trigger.vf_pf_channel.msg_addr_lo); @@ -233,7 +243,7 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count) attempts++; - /* test whether the PF accepted our request. If not, humble the + /* test whether the PF accepted our request. If not, humble * the request and try again. */ if (bp->acquire_resp.hdr.status == PFVF_STATUS_SUCCESS) { @@ -333,7 +343,7 @@ int bnx2x_vfpf_release(struct bnx2x *bp) DP(BNX2X_MSG_SP, "vf released\n"); } else { /* PF reports error */ - BNX2X_ERR("PF failed our release request - are we out of sync? response status: %d\n", + BNX2X_ERR("PF failed our release request - are we out of sync? Response status: %d\n", resp->hdr.status); rc = -EAGAIN; goto out; @@ -787,7 +797,7 @@ static inline void bnx2x_set_vf_mbxs_valid(struct bnx2x *bp) storm_memset_vf_mbx_valid(bp, bnx2x_vf(bp, i, abs_vfid)); } -/* enable vf_pf mailbox (aka vf-pf-chanell) */ +/* enable vf_pf mailbox (aka vf-pf-channel) */ void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid) { bnx2x_vf_flr_clnup_epilog(bp, abs_vfid); @@ -844,7 +854,6 @@ static int bnx2x_copy32_vf_dmae(struct bnx2x *bp, u8 from_vf, dmae.dst_addr_hi = vf_addr_hi; } dmae.len = len32; - bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_DMAE); /* issue the command and wait for completion */ return bnx2x_issue_dmae_with_comp(bp, &dmae); @@ -1072,7 +1081,7 @@ static void bnx2x_vf_mbx_set_q_flags(struct bnx2x *bp, u32 mbx_q_flags, if (mbx_q_flags & VFPF_QUEUE_FLG_DHC) __set_bit(BNX2X_Q_FLG_DHC, sp_q_flags); - /* outer vlan removal is set according to the PF's multi fuction mode */ + /* outer vlan removal is set according to PF's multi function mode */ if (IS_MF_SD(bp)) __set_bit(BNX2X_Q_FLG_OV, sp_q_flags); } @@ -1104,7 +1113,7 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf, struct bnx2x_queue_init_params *init_p; struct bnx2x_queue_setup_params *setup_p; - /* reinit the VF operation context */ + /* re-init the VF operation context */ memset(&vf->op_params.qctor, 0 , sizeof(vf->op_params.qctor)); setup_p = &vf->op_params.qctor.prep_qsetup; init_p = &vf->op_params.qctor.qstate.params.init; @@ -1588,8 +1597,9 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf, * support them. Or this may be because someone wrote a crappy * VF driver and is sending garbage over the channel. */ - BNX2X_ERR("unknown TLV. type %d length %d. first 20 bytes of mailbox buffer:\n", - mbx->first_tlv.tl.type, mbx->first_tlv.tl.length); + BNX2X_ERR("unknown TLV. type %d length %d vf->state was %d. first 20 bytes of mailbox buffer:\n", + mbx->first_tlv.tl.type, mbx->first_tlv.tl.length, + vf->state); for (i = 0; i < 20; i++) DP_CONT(BNX2X_MSG_IOV, "%x ", mbx->msg->req.tlv_buf_size.tlv_buffer[i]); @@ -1605,8 +1615,11 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_vf_mbx_resp(bp, vf); } else { /* can't send a response since this VF is unknown to us - * just unlock the channel and be done with. + * just ack the FW to release the mailbox and unlock + * the channel. */ + storm_memset_vf_mbx_ack(bp, vf->abs_vfid); + mmiowb(); bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type); } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h index 41708faab575..f3ad174a3a63 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h @@ -331,7 +331,10 @@ struct pf_vf_bulletin_content { #define VLAN_VALID 1 /* when set, the vf should not access * the vfpf channel */ - +#define CHANNEL_DOWN 2 /* vfpf channel is disabled. VFs are not + * to attempt to send messages on the + * channel after this bit is set + */ u8 mac[ETH_ALEN]; u8 mac_padding[2]; diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index 6b0dc131b20e..d78d4cf140ed 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -5622,7 +5622,7 @@ static void cnic_rcv_netevent(struct cnic_local *cp, unsigned long event, static int cnic_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *netdev = ptr; + struct net_device *netdev = netdev_notifier_info_to_dev(ptr); struct cnic_dev *dev; int new_dev = 0; diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c index e80bfb60c3ef..c2777712da99 100644 --- a/drivers/net/ethernet/broadcom/sb1250-mac.c +++ b/drivers/net/ethernet/broadcom/sb1250-mac.c @@ -2197,7 +2197,7 @@ static const struct net_device_ops sbmac_netdev_ops = { static int sbmac_init(struct platform_device *pldev, long long base) { - struct net_device *dev = dev_get_drvdata(&pldev->dev); + struct net_device *dev = platform_get_drvdata(pldev); int idx = pldev->id; struct sbmac_softc *sc = netdev_priv(dev); unsigned char *eaddr; @@ -2275,7 +2275,7 @@ static int sbmac_init(struct platform_device *pldev, long long base) dev->name); goto free_mdio; } - dev_set_drvdata(&pldev->dev, sc->mii_bus); + platform_set_drvdata(pldev, sc->mii_bus); err = register_netdev(dev); if (err) { @@ -2300,7 +2300,6 @@ static int sbmac_init(struct platform_device *pldev, long long base) return 0; unreg_mdio: mdiobus_unregister(sc->mii_bus); - dev_set_drvdata(&pldev->dev, NULL); free_mdio: mdiobus_free(sc->mii_bus); uninit_ctx: @@ -2624,7 +2623,7 @@ static int sbmac_probe(struct platform_device *pldev) goto out_unmap; } - dev_set_drvdata(&pldev->dev, dev); + platform_set_drvdata(pldev, dev); SET_NETDEV_DEV(dev, &pldev->dev); sc = netdev_priv(dev); @@ -2649,7 +2648,7 @@ out_out: static int __exit sbmac_remove(struct platform_device *pldev) { - struct net_device *dev = dev_get_drvdata(&pldev->dev); + struct net_device *dev = platform_get_drvdata(pldev); struct sbmac_softc *sc = netdev_priv(dev); unregister_netdev(dev); diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index a13463e8a2c3..d964f302ac94 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -968,9 +968,6 @@ static void tg3_ape_driver_state_change(struct tg3 *tp, int kind) event = APE_EVENT_STATUS_STATE_UNLOAD; break; - case RESET_KIND_SUSPEND: - event = APE_EVENT_STATUS_STATE_SUSPEND; - break; default: return; } @@ -1317,8 +1314,8 @@ static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable) if (err) return err; - if (enable) + if (enable) val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA; else val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA; @@ -1745,10 +1742,6 @@ static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind) break; } } - - if (kind == RESET_KIND_INIT || - kind == RESET_KIND_SUSPEND) - tg3_ape_driver_state_change(tp, kind); } /* tp->lock is held. */ @@ -1770,9 +1763,6 @@ static void tg3_write_sig_post_reset(struct tg3 *tp, int kind) break; } } - - if (kind == RESET_KIND_SHUTDOWN) - tg3_ape_driver_state_change(tp, kind); } /* tp->lock is held. */ @@ -2341,6 +2331,46 @@ static void tg3_phy_apply_otp(struct tg3 *tp) tg3_phy_toggle_auxctl_smdsp(tp, false); } +static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee) +{ + u32 val; + struct ethtool_eee *dest = &tp->eee; + + if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) + return; + + if (eee) + dest = eee; + + if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val)) + return; + + /* Pull eee_active */ + if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T || + val == TG3_CL45_D7_EEERES_STAT_LP_100TX) { + dest->eee_active = 1; + } else + dest->eee_active = 0; + + /* Pull lp advertised settings */ + if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val)) + return; + dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val); + + /* Pull advertised and eee_enabled settings */ + if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val)) + return; + dest->eee_enabled = !!val; + dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val); + + /* Pull tx_lpi_enabled */ + val = tr32(TG3_CPMU_EEE_MODE); + dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX); + + /* Pull lpi timer value */ + dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff; +} + static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up) { u32 val; @@ -2364,11 +2394,8 @@ static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up) tw32(TG3_CPMU_EEE_CTRL, eeectl); - tg3_phy_cl45_read(tp, MDIO_MMD_AN, - TG3_CL45_D7_EEERES_STAT, &val); - - if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T || - val == TG3_CL45_D7_EEERES_STAT_LP_100TX) + tg3_eee_pull_config(tp, NULL); + if (tp->eee.eee_active) tp->setlpicnt = 2; } @@ -4192,6 +4219,8 @@ static int tg3_power_down_prepare(struct tg3 *tp) tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN); + tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN); + return 0; } @@ -4292,6 +4321,16 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl) /* Advertise 1000-BaseT EEE ability */ if (advertise & ADVERTISED_1000baseT_Full) val |= MDIO_AN_EEE_ADV_1000T; + + if (!tp->eee.eee_enabled) { + val = 0; + tp->eee.advertised = 0; + } else { + tp->eee.advertised = advertise & + (ADVERTISED_100baseT_Full | + ADVERTISED_1000baseT_Full); + } + err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val); if (err) val = 0; @@ -4536,26 +4575,23 @@ static int tg3_init_5401phy_dsp(struct tg3 *tp) static bool tg3_phy_eee_config_ok(struct tg3 *tp) { - u32 val; - u32 tgtadv = 0; - u32 advertising = tp->link_config.advertising; + struct ethtool_eee eee; if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) return true; - if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val)) - return false; - - val &= (MDIO_AN_EEE_ADV_100TX | MDIO_AN_EEE_ADV_1000T); - - - if (advertising & ADVERTISED_100baseT_Full) - tgtadv |= MDIO_AN_EEE_ADV_100TX; - if (advertising & ADVERTISED_1000baseT_Full) - tgtadv |= MDIO_AN_EEE_ADV_1000T; + tg3_eee_pull_config(tp, &eee); - if (val != tgtadv) - return false; + if (tp->eee.eee_enabled) { + if (tp->eee.advertised != eee.advertised || + tp->eee.tx_lpi_timer != eee.tx_lpi_timer || + tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled) + return false; + } else { + /* EEE is disabled but we're advertising */ + if (eee.advertised) + return false; + } return true; } @@ -4656,6 +4692,42 @@ static void tg3_clear_mac_status(struct tg3 *tp) udelay(40); } +static void tg3_setup_eee(struct tg3 *tp) +{ + u32 val; + + val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 | + TG3_CPMU_EEE_LNKIDL_UART_IDL; + if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) + val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT; + + tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val); + + tw32_f(TG3_CPMU_EEE_CTRL, + TG3_CPMU_EEE_CTRL_EXIT_20_1_US); + + val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET | + (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) | + TG3_CPMU_EEEMD_LPI_IN_RX | + TG3_CPMU_EEEMD_EEE_ENABLE; + + if (tg3_asic_rev(tp) != ASIC_REV_5717) + val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN; + + if (tg3_flag(tp, ENABLE_APE)) + val |= TG3_CPMU_EEEMD_APE_TX_DET_EN; + + tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0); + + tw32_f(TG3_CPMU_EEE_DBTMR1, + TG3_CPMU_DBTMR1_PCIEXIT_2047US | + (tp->eee.tx_lpi_timer & 0xffff)); + + tw32_f(TG3_CPMU_EEE_DBTMR2, + TG3_CPMU_DBTMR2_APE_TX_2047US | + TG3_CPMU_DBTMR2_TXIDXEQ_2047US); +} + static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset) { bool current_link_up; @@ -4822,8 +4894,10 @@ static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset) */ if (!eee_config_ok && (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) && - !force_reset) + !force_reset) { + tg3_setup_eee(tp); tg3_phy_reset(tp); + } } else { if (!(bmcr & BMCR_ANENABLE) && tp->link_config.speed == current_speed && @@ -6335,9 +6409,7 @@ static void tg3_tx_recover(struct tg3 *tp) "Please report the problem to the driver maintainer " "and include system chipset information.\n"); - spin_lock(&tp->lock); tg3_flag_set(tp, TX_RECOVERY_PENDING); - spin_unlock(&tp->lock); } static inline u32 tg3_tx_avail(struct tg3_napi *tnapi) @@ -9205,11 +9277,9 @@ static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec) } /* tp->lock is held. */ -static void tg3_rings_reset(struct tg3 *tp) +static void tg3_tx_rcbs_disable(struct tg3 *tp) { - int i; - u32 stblk, txrcb, rxrcb, limit; - struct tg3_napi *tnapi = &tp->napi[0]; + u32 txrcb, limit; /* Disable all transmit rings but the first. */ if (!tg3_flag(tp, 5705_PLUS)) @@ -9226,7 +9296,33 @@ static void tg3_rings_reset(struct tg3 *tp) txrcb < limit; txrcb += TG3_BDINFO_SIZE) tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS, BDINFO_FLAGS_DISABLED); +} + +/* tp->lock is held. */ +static void tg3_tx_rcbs_init(struct tg3 *tp) +{ + int i = 0; + u32 txrcb = NIC_SRAM_SEND_RCB; + + if (tg3_flag(tp, ENABLE_TSS)) + i++; + + for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) { + struct tg3_napi *tnapi = &tp->napi[i]; + + if (!tnapi->tx_ring) + continue; + + tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping, + (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT), + NIC_SRAM_TX_BUFFER_DESC); + } +} +/* tp->lock is held. */ +static void tg3_rx_ret_rcbs_disable(struct tg3 *tp) +{ + u32 rxrcb, limit; /* Disable all receive return rings but the first. */ if (tg3_flag(tp, 5717_PLUS)) @@ -9244,6 +9340,39 @@ static void tg3_rings_reset(struct tg3 *tp) rxrcb < limit; rxrcb += TG3_BDINFO_SIZE) tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS, BDINFO_FLAGS_DISABLED); +} + +/* tp->lock is held. */ +static void tg3_rx_ret_rcbs_init(struct tg3 *tp) +{ + int i = 0; + u32 rxrcb = NIC_SRAM_RCV_RET_RCB; + + if (tg3_flag(tp, ENABLE_RSS)) + i++; + + for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) { + struct tg3_napi *tnapi = &tp->napi[i]; + + if (!tnapi->rx_rcb) + continue; + + tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping, + (tp->rx_ret_ring_mask + 1) << + BDINFO_FLAGS_MAXLEN_SHIFT, 0); + } +} + +/* tp->lock is held. */ +static void tg3_rings_reset(struct tg3 *tp) +{ + int i; + u32 stblk; + struct tg3_napi *tnapi = &tp->napi[0]; + + tg3_tx_rcbs_disable(tp); + + tg3_rx_ret_rcbs_disable(tp); /* Disable interrupts */ tw32_mailbox_f(tp->napi[0].int_mbox, 1); @@ -9280,9 +9409,6 @@ static void tg3_rings_reset(struct tg3 *tp) tw32_tx_mbox(mbox + i * 8, 0); } - txrcb = NIC_SRAM_SEND_RCB; - rxrcb = NIC_SRAM_RCV_RET_RCB; - /* Clear status block in ram. */ memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); @@ -9292,46 +9418,20 @@ static void tg3_rings_reset(struct tg3 *tp) tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, ((u64) tnapi->status_mapping & 0xffffffff)); - if (tnapi->tx_ring) { - tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping, - (TG3_TX_RING_SIZE << - BDINFO_FLAGS_MAXLEN_SHIFT), - NIC_SRAM_TX_BUFFER_DESC); - txrcb += TG3_BDINFO_SIZE; - } - - if (tnapi->rx_rcb) { - tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping, - (tp->rx_ret_ring_mask + 1) << - BDINFO_FLAGS_MAXLEN_SHIFT, 0); - rxrcb += TG3_BDINFO_SIZE; - } - stblk = HOSTCC_STATBLCK_RING1; for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) { u64 mapping = (u64)tnapi->status_mapping; tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32); tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff); + stblk += 8; /* Clear status block in ram. */ memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); - - if (tnapi->tx_ring) { - tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping, - (TG3_TX_RING_SIZE << - BDINFO_FLAGS_MAXLEN_SHIFT), - NIC_SRAM_TX_BUFFER_DESC); - txrcb += TG3_BDINFO_SIZE; - } - - tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping, - ((tp->rx_ret_ring_mask + 1) << - BDINFO_FLAGS_MAXLEN_SHIFT), 0); - - stblk += 8; - rxrcb += TG3_BDINFO_SIZE; } + + tg3_tx_rcbs_init(tp); + tg3_rx_ret_rcbs_init(tp); } static void tg3_setup_rxbd_thresholds(struct tg3 *tp) @@ -9531,46 +9631,17 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy) if (tg3_flag(tp, INIT_COMPLETE)) tg3_abort_hw(tp, 1); - /* Enable MAC control of LPI */ - if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) { - val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 | - TG3_CPMU_EEE_LNKIDL_UART_IDL; - if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) - val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT; - - tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val); - - tw32_f(TG3_CPMU_EEE_CTRL, - TG3_CPMU_EEE_CTRL_EXIT_20_1_US); - - val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET | - TG3_CPMU_EEEMD_LPI_IN_TX | - TG3_CPMU_EEEMD_LPI_IN_RX | - TG3_CPMU_EEEMD_EEE_ENABLE; - - if (tg3_asic_rev(tp) != ASIC_REV_5717) - val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN; - - if (tg3_flag(tp, ENABLE_APE)) - val |= TG3_CPMU_EEEMD_APE_TX_DET_EN; - - tw32_f(TG3_CPMU_EEE_MODE, val); - - tw32_f(TG3_CPMU_EEE_DBTMR1, - TG3_CPMU_DBTMR1_PCIEXIT_2047US | - TG3_CPMU_DBTMR1_LNKIDLE_2047US); - - tw32_f(TG3_CPMU_EEE_DBTMR2, - TG3_CPMU_DBTMR2_APE_TX_2047US | - TG3_CPMU_DBTMR2_TXIDXEQ_2047US); - } - if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) && !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) { tg3_phy_pull_config(tp); + tg3_eee_pull_config(tp, NULL); tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; } + /* Enable MAC control of LPI */ + if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) + tg3_setup_eee(tp); + if (reset_phy) tg3_phy_reset(tp); @@ -11226,7 +11297,7 @@ static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq, */ err = tg3_alloc_consistent(tp); if (err) - goto err_out1; + goto out_ints_fini; tg3_napi_init(tp); @@ -11240,12 +11311,15 @@ static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq, tnapi = &tp->napi[i]; free_irq(tnapi->irq_vec, tnapi); } - goto err_out2; + goto out_napi_fini; } } tg3_full_lock(tp, 0); + if (init) + tg3_ape_driver_state_change(tp, RESET_KIND_INIT); + err = tg3_init_hw(tp, reset_phy); if (err) { tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); @@ -11255,7 +11329,7 @@ static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq, tg3_full_unlock(tp); if (err) - goto err_out3; + goto out_free_irq; if (test_irq && tg3_flag(tp, USING_MSI)) { err = tg3_test_msi(tp); @@ -11266,7 +11340,7 @@ static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq, tg3_free_rings(tp); tg3_full_unlock(tp); - goto err_out2; + goto out_napi_fini; } if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) { @@ -11306,18 +11380,18 @@ static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq, return 0; -err_out3: +out_free_irq: for (i = tp->irq_cnt - 1; i >= 0; i--) { struct tg3_napi *tnapi = &tp->napi[i]; free_irq(tnapi->irq_vec, tnapi); } -err_out2: +out_napi_fini: tg3_napi_disable(tp); tg3_napi_fini(tp); tg3_free_consistent(tp); -err_out1: +out_ints_fini: tg3_ints_fini(tp); return err; @@ -13362,11 +13436,13 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest, struct tg3 *tp = netdev_priv(dev); bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB; - if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) && - tg3_power_up(tp)) { - etest->flags |= ETH_TEST_FL_FAILED; - memset(data, 1, sizeof(u64) * TG3_NUM_TEST); - return; + if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { + if (tg3_power_up(tp)) { + etest->flags |= ETH_TEST_FL_FAILED; + memset(data, 1, sizeof(u64) * TG3_NUM_TEST); + return; + } + tg3_ape_driver_state_change(tp, RESET_KIND_INIT); } memset(data, 0, sizeof(u64) * TG3_NUM_TEST); @@ -13657,6 +13733,57 @@ static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) return 0; } +static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata) +{ + struct tg3 *tp = netdev_priv(dev); + + if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) { + netdev_warn(tp->dev, "Board does not support EEE!\n"); + return -EOPNOTSUPP; + } + + if (edata->advertised != tp->eee.advertised) { + netdev_warn(tp->dev, + "Direct manipulation of EEE advertisement is not supported\n"); + return -EINVAL; + } + + if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) { + netdev_warn(tp->dev, + "Maximal Tx Lpi timer supported is %#x(u)\n", + TG3_CPMU_DBTMR1_LNKIDLE_MAX); + return -EINVAL; + } + + tp->eee = *edata; + + tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; + tg3_warn_mgmt_link_flap(tp); + + if (netif_running(tp->dev)) { + tg3_full_lock(tp, 0); + tg3_setup_eee(tp); + tg3_phy_reset(tp); + tg3_full_unlock(tp); + } + + return 0; +} + +static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata) +{ + struct tg3 *tp = netdev_priv(dev); + + if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) { + netdev_warn(tp->dev, + "Board does not support EEE!\n"); + return -EOPNOTSUPP; + } + + *edata = tp->eee; + return 0; +} + static const struct ethtool_ops tg3_ethtool_ops = { .get_settings = tg3_get_settings, .set_settings = tg3_set_settings, @@ -13690,6 +13817,8 @@ static const struct ethtool_ops tg3_ethtool_ops = { .get_channels = tg3_get_channels, .set_channels = tg3_set_channels, .get_ts_info = tg3_get_ts_info, + .get_eee = tg3_get_eee, + .set_eee = tg3_set_eee, }; static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev, @@ -15038,9 +15167,18 @@ static int tg3_phy_probe(struct tg3 *tp) (tg3_asic_rev(tp) == ASIC_REV_5717 && tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) || (tg3_asic_rev(tp) == ASIC_REV_57765 && - tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) + tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) { tp->phy_flags |= TG3_PHYFLG_EEE_CAP; + tp->eee.supported = SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full; + tp->eee.advertised = ADVERTISED_100baseT_Full | + ADVERTISED_1000baseT_Full; + tp->eee.eee_enabled = 1; + tp->eee.tx_lpi_enabled = 1; + tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US; + } + tg3_phy_init_link_config(tp); if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) && @@ -17112,7 +17250,7 @@ static int tg3_init_one(struct pci_dev *pdev, { struct net_device *dev; struct tg3 *tp; - int i, err, pm_cap; + int i, err; u32 sndmbx, rcvmbx, intmbx; char str[40]; u64 dma_mask, persist_dma_mask; @@ -17134,25 +17272,10 @@ static int tg3_init_one(struct pci_dev *pdev, pci_set_master(pdev); - /* Find power-management capability. */ - pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); - if (pm_cap == 0) { - dev_err(&pdev->dev, - "Cannot find Power Management capability, aborting\n"); - err = -EIO; - goto err_out_free_res; - } - - err = pci_set_power_state(pdev, PCI_D0); - if (err) { - dev_err(&pdev->dev, "Transition to D0 failed, aborting\n"); - goto err_out_free_res; - } - dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS); if (!dev) { err = -ENOMEM; - goto err_out_power_down; + goto err_out_free_res; } SET_NETDEV_DEV(dev, &pdev->dev); @@ -17160,7 +17283,7 @@ static int tg3_init_one(struct pci_dev *pdev, tp = netdev_priv(dev); tp->pdev = pdev; tp->dev = dev; - tp->pm_cap = pm_cap; + tp->pm_cap = pdev->pm_cap; tp->rx_mode = TG3_DEF_RX_MODE; tp->tx_mode = TG3_DEF_TX_MODE; tp->irq_sync = 1; @@ -17498,9 +17621,6 @@ err_out_iounmap: err_out_free_dev: free_netdev(dev); -err_out_power_down: - pci_set_power_state(pdev, PCI_D3hot); - err_out_free_res: pci_release_regions(pdev); @@ -17610,6 +17730,8 @@ static int tg3_resume(struct device *device) tg3_full_lock(tp, 0); + tg3_ape_driver_state_change(tp, RESET_KIND_INIT); + tg3_flag_set(tp, INIT_COMPLETE); err = tg3_restart_hw(tp, !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)); @@ -17671,10 +17793,13 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev, tg3_full_unlock(tp); done: - if (state == pci_channel_io_perm_failure) + if (state == pci_channel_io_perm_failure) { + tg3_napi_enable(tp); + dev_close(netdev); err = PCI_ERS_RESULT_DISCONNECT; - else + } else { pci_disable_device(pdev); + } rtnl_unlock(); @@ -17720,6 +17845,10 @@ static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev) rc = PCI_ERS_RESULT_RECOVERED; done: + if (rc != PCI_ERS_RESULT_RECOVERED && netif_running(netdev)) { + tg3_napi_enable(tp); + dev_close(netdev); + } rtnl_unlock(); return rc; @@ -17744,6 +17873,7 @@ static void tg3_io_resume(struct pci_dev *pdev) goto done; tg3_full_lock(tp, 0); + tg3_ape_driver_state_change(tp, RESET_KIND_INIT); tg3_flag_set(tp, INIT_COMPLETE); err = tg3_restart_hw(tp, true); if (err) { @@ -17781,15 +17911,4 @@ static struct pci_driver tg3_driver = { .driver.pm = &tg3_pm_ops, }; -static int __init tg3_init(void) -{ - return pci_register_driver(&tg3_driver); -} - -static void __exit tg3_cleanup(void) -{ - pci_unregister_driver(&tg3_driver); -} - -module_init(tg3_init); -module_exit(tg3_cleanup); +module_pci_driver(tg3_driver); diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h index ff6e30eeae35..cd63d1189aae 100644 --- a/drivers/net/ethernet/broadcom/tg3.h +++ b/drivers/net/ethernet/broadcom/tg3.h @@ -1175,6 +1175,7 @@ #define TG3_CPMU_EEE_DBTMR1 0x000036b4 #define TG3_CPMU_DBTMR1_PCIEXIT_2047US 0x07ff0000 #define TG3_CPMU_DBTMR1_LNKIDLE_2047US 0x000007ff +#define TG3_CPMU_DBTMR1_LNKIDLE_MAX 0x0000ffff #define TG3_CPMU_EEE_DBTMR2 0x000036b8 #define TG3_CPMU_DBTMR2_APE_TX_2047US 0x07ff0000 #define TG3_CPMU_DBTMR2_TXIDXEQ_2047US 0x000007ff @@ -3372,6 +3373,7 @@ struct tg3 { unsigned int irq_cnt; struct ethtool_coalesce coal; + struct ethtool_eee eee; /* firmware info */ const char *fw_needed; diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs.h b/drivers/net/ethernet/brocade/bna/bfa_defs.h index e423f82da490..b7d8127c198f 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_defs.h +++ b/drivers/net/ethernet/brocade/bna/bfa_defs.h @@ -164,7 +164,8 @@ struct bfa_ioc_attr { u8 port_mode; /*!< enum bfa_mode */ u8 cap_bm; /*!< capability */ u8 port_mode_cfg; /*!< enum bfa_mode */ - u8 rsvd[4]; /*!< 64bit align */ + u8 def_fn; /*!< 1 if default fn */ + u8 rsvd[3]; /*!< 64bit align */ }; /* Adapter capability mask definition */ diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c index f2b73ffa9122..6f3cac060f29 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c @@ -2371,7 +2371,7 @@ bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr) memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr)); ioc_attr->state = bfa_ioc_get_state(ioc); - ioc_attr->port_id = ioc->port_id; + ioc_attr->port_id = bfa_ioc_portid(ioc); ioc_attr->port_mode = ioc->port_mode; ioc_attr->port_mode_cfg = ioc->port_mode_cfg; @@ -2381,8 +2381,9 @@ bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr) bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr); - ioc_attr->pci_attr.device_id = ioc->pcidev.device_id; - ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func; + ioc_attr->pci_attr.device_id = bfa_ioc_devid(ioc); + ioc_attr->pci_attr.pcifn = bfa_ioc_pcifn(ioc); + ioc_attr->def_fn = bfa_ioc_is_default(ioc); bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev); } diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.h b/drivers/net/ethernet/brocade/bna/bfa_ioc.h index 63a85e555df8..f04e0aab25b4 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_ioc.h +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.h @@ -222,6 +222,8 @@ struct bfa_ioc_hwif { #define bfa_ioc_bar0(__ioc) ((__ioc)->pcidev.pci_bar_kva) #define bfa_ioc_portid(__ioc) ((__ioc)->port_id) #define bfa_ioc_asic_gen(__ioc) ((__ioc)->asic_gen) +#define bfa_ioc_is_default(__ioc) \ + (bfa_ioc_pcifn(__ioc) == bfa_ioc_portid(__ioc)) #define bfa_ioc_fetch_stats(__ioc, __stats) \ (((__stats)->drv_stats) = (__ioc)->stats) #define bfa_ioc_clr_stats(__ioc) \ diff --git a/drivers/net/ethernet/brocade/bna/bna.h b/drivers/net/ethernet/brocade/bna/bna.h index 25dae757e9c4..f1eafc409bbd 100644 --- a/drivers/net/ethernet/brocade/bna/bna.h +++ b/drivers/net/ethernet/brocade/bna/bna.h @@ -455,6 +455,8 @@ void bna_bfi_rx_enet_stop_rsp(struct bna_rx *rx, void bna_bfi_rxf_cfg_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr); void bna_bfi_rxf_mcast_add_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr); +void bna_bfi_rxf_ucast_set_rsp(struct bna_rxf *rxf, + struct bfi_msgq_mhdr *msghdr); /* APIs for BNA */ void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna, diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c index db14f69d63bc..3ca77fad4851 100644 --- a/drivers/net/ethernet/brocade/bna/bna_enet.c +++ b/drivers/net/ethernet/brocade/bna/bna_enet.c @@ -298,7 +298,6 @@ bna_msgq_rsp_handler(void *arg, struct bfi_msgq_mhdr *msghdr) case BFI_ENET_I2H_RSS_ENABLE_RSP: case BFI_ENET_I2H_RX_PROMISCUOUS_RSP: case BFI_ENET_I2H_RX_DEFAULT_RSP: - case BFI_ENET_I2H_MAC_UCAST_SET_RSP: case BFI_ENET_I2H_MAC_UCAST_CLR_RSP: case BFI_ENET_I2H_MAC_UCAST_ADD_RSP: case BFI_ENET_I2H_MAC_UCAST_DEL_RSP: @@ -311,6 +310,12 @@ bna_msgq_rsp_handler(void *arg, struct bfi_msgq_mhdr *msghdr) bna_bfi_rxf_cfg_rsp(&rx->rxf, msghdr); break; + case BFI_ENET_I2H_MAC_UCAST_SET_RSP: + bna_rx_from_rid(bna, msghdr->enet_id, rx); + if (rx) + bna_bfi_rxf_ucast_set_rsp(&rx->rxf, msghdr); + break; + case BFI_ENET_I2H_MAC_MCAST_ADD_RSP: bna_rx_from_rid(bna, msghdr->enet_id, rx); if (rx) diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c index ea6f4a036401..57cd1bff59f1 100644 --- a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c +++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c @@ -711,6 +711,21 @@ bna_bfi_rxf_cfg_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr) } void +bna_bfi_rxf_ucast_set_rsp(struct bna_rxf *rxf, + struct bfi_msgq_mhdr *msghdr) +{ + struct bfi_enet_rsp *rsp = + (struct bfi_enet_rsp *)msghdr; + + if (rsp->error) { + /* Clear ucast from cache */ + rxf->ucast_active_set = 0; + } + + bfa_fsm_send_event(rxf, RXF_E_FW_RESP); +} + +void bna_bfi_rxf_mcast_add_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr) { diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index 07f7ef05c3f2..b78e69e0e52a 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -2624,6 +2624,9 @@ bnad_stop(struct net_device *netdev) bnad_destroy_tx(bnad, 0); bnad_destroy_rx(bnad, 0); + /* These config flags are cleared in the hardware */ + bnad->cfg_flags &= ~(BNAD_CF_ALLMULTI | BNAD_CF_PROMISC); + /* Synchronize mailbox IRQ */ bnad_mbox_irq_sync(bnad); diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h index c1d0bc059bfd..aefee77523f2 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.h +++ b/drivers/net/ethernet/brocade/bna/bnad.h @@ -71,7 +71,7 @@ struct bnad_rx_ctrl { #define BNAD_NAME "bna" #define BNAD_NAME_LEN 64 -#define BNAD_VERSION "3.1.2.1" +#define BNAD_VERSION "3.2.21.1" #define BNAD_MAILBOX_MSIX_INDEX 0 #define BNAD_MAILBOX_MSIX_VECTORS 1 diff --git a/drivers/net/ethernet/brocade/bna/cna.h b/drivers/net/ethernet/brocade/bna/cna.h index 14ca9317c915..c37f706d9992 100644 --- a/drivers/net/ethernet/brocade/bna/cna.h +++ b/drivers/net/ethernet/brocade/bna/cna.h @@ -37,8 +37,8 @@ extern char bfa_version[]; -#define CNA_FW_FILE_CT "ctfw-3.1.0.0.bin" -#define CNA_FW_FILE_CT2 "ct2fw-3.1.0.0.bin" +#define CNA_FW_FILE_CT "ctfw-3.2.1.0.bin" +#define CNA_FW_FILE_CT2 "ct2fw-3.2.1.0.bin" #define FC_SYMNAME_MAX 256 /*!< max name server symbolic name size */ #pragma pack(1) diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig index 768285ec10f4..8030cc0396fd 100644 --- a/drivers/net/ethernet/cadence/Kconfig +++ b/drivers/net/ethernet/cadence/Kconfig @@ -23,7 +23,6 @@ if NET_CADENCE config ARM_AT91_ETHER tristate "AT91RM9200 Ethernet support" depends on GENERIC_HARDIRQS && HAS_DMA - select NET_CORE select MACB ---help--- If you wish to compile a kernel for the AT91RM9200 and enable diff --git a/drivers/net/ethernet/cadence/at91_ether.c b/drivers/net/ethernet/cadence/at91_ether.c index cc9a185f0abb..bb5d63fb2e6d 100644 --- a/drivers/net/ethernet/cadence/at91_ether.c +++ b/drivers/net/ethernet/cadence/at91_ether.c @@ -29,7 +29,6 @@ #include <linux/of.h> #include <linux/of_device.h> #include <linux/of_net.h> -#include <linux/pinctrl/consumer.h> #include "macb.h" @@ -309,7 +308,6 @@ static int __init at91ether_probe(struct platform_device *pdev) struct resource *regs; struct net_device *dev; struct phy_device *phydev; - struct pinctrl *pinctrl; struct macb *lp; int res; u32 reg; @@ -319,15 +317,6 @@ static int __init at91ether_probe(struct platform_device *pdev) if (!regs) return -ENOENT; - pinctrl = devm_pinctrl_get_select_default(&pdev->dev); - if (IS_ERR(pinctrl)) { - res = PTR_ERR(pinctrl); - if (res == -EPROBE_DEFER) - return res; - - dev_warn(&pdev->dev, "No pinctrl provided\n"); - } - dev = alloc_etherdev(sizeof(struct macb)); if (!dev) return -ENOMEM; @@ -435,7 +424,6 @@ static int at91ether_remove(struct platform_device *pdev) unregister_netdev(dev); clk_disable(lp->pclk); free_netdev(dev); - platform_set_drvdata(pdev, NULL); return 0; } diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index c89aa41dd448..e866608d7d91 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -32,7 +32,8 @@ #include "macb.h" -#define RX_BUFFER_SIZE 128 +#define MACB_RX_BUFFER_SIZE 128 +#define RX_BUFFER_MULTIPLE 64 /* bytes */ #define RX_RING_SIZE 512 /* must be power of 2 */ #define RX_RING_BYTES (sizeof(struct macb_dma_desc) * RX_RING_SIZE) @@ -92,7 +93,7 @@ static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index) static void *macb_rx_buffer(struct macb *bp, unsigned int index) { - return bp->rx_buffers + RX_BUFFER_SIZE * macb_rx_ring_wrap(index); + return bp->rx_buffers + bp->rx_buffer_size * macb_rx_ring_wrap(index); } void macb_set_hwaddr(struct macb *bp) @@ -528,6 +529,155 @@ static void macb_tx_interrupt(struct macb *bp) netif_wake_queue(bp->dev); } +static void gem_rx_refill(struct macb *bp) +{ + unsigned int entry; + struct sk_buff *skb; + struct macb_dma_desc *desc; + dma_addr_t paddr; + + while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, RX_RING_SIZE) > 0) { + u32 addr, ctrl; + + entry = macb_rx_ring_wrap(bp->rx_prepared_head); + desc = &bp->rx_ring[entry]; + + /* Make hw descriptor updates visible to CPU */ + rmb(); + + addr = desc->addr; + ctrl = desc->ctrl; + bp->rx_prepared_head++; + + if ((addr & MACB_BIT(RX_USED))) + continue; + + if (bp->rx_skbuff[entry] == NULL) { + /* allocate sk_buff for this free entry in ring */ + skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size); + if (unlikely(skb == NULL)) { + netdev_err(bp->dev, + "Unable to allocate sk_buff\n"); + break; + } + bp->rx_skbuff[entry] = skb; + + /* now fill corresponding descriptor entry */ + paddr = dma_map_single(&bp->pdev->dev, skb->data, + bp->rx_buffer_size, DMA_FROM_DEVICE); + + if (entry == RX_RING_SIZE - 1) + paddr |= MACB_BIT(RX_WRAP); + bp->rx_ring[entry].addr = paddr; + bp->rx_ring[entry].ctrl = 0; + + /* properly align Ethernet header */ + skb_reserve(skb, NET_IP_ALIGN); + } + } + + /* Make descriptor updates visible to hardware */ + wmb(); + + netdev_vdbg(bp->dev, "rx ring: prepared head %d, tail %d\n", + bp->rx_prepared_head, bp->rx_tail); +} + +/* Mark DMA descriptors from begin up to and not including end as unused */ +static void discard_partial_frame(struct macb *bp, unsigned int begin, + unsigned int end) +{ + unsigned int frag; + + for (frag = begin; frag != end; frag++) { + struct macb_dma_desc *desc = macb_rx_desc(bp, frag); + desc->addr &= ~MACB_BIT(RX_USED); + } + + /* Make descriptor updates visible to hardware */ + wmb(); + + /* + * When this happens, the hardware stats registers for + * whatever caused this is updated, so we don't have to record + * anything. + */ +} + +static int gem_rx(struct macb *bp, int budget) +{ + unsigned int len; + unsigned int entry; + struct sk_buff *skb; + struct macb_dma_desc *desc; + int count = 0; + + while (count < budget) { + u32 addr, ctrl; + + entry = macb_rx_ring_wrap(bp->rx_tail); + desc = &bp->rx_ring[entry]; + + /* Make hw descriptor updates visible to CPU */ + rmb(); + + addr = desc->addr; + ctrl = desc->ctrl; + + if (!(addr & MACB_BIT(RX_USED))) + break; + + desc->addr &= ~MACB_BIT(RX_USED); + bp->rx_tail++; + count++; + + if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) { + netdev_err(bp->dev, + "not whole frame pointed by descriptor\n"); + bp->stats.rx_dropped++; + break; + } + skb = bp->rx_skbuff[entry]; + if (unlikely(!skb)) { + netdev_err(bp->dev, + "inconsistent Rx descriptor chain\n"); + bp->stats.rx_dropped++; + break; + } + /* now everything is ready for receiving packet */ + bp->rx_skbuff[entry] = NULL; + len = MACB_BFEXT(RX_FRMLEN, ctrl); + + netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len); + + skb_put(skb, len); + addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, addr)); + dma_unmap_single(&bp->pdev->dev, addr, + len, DMA_FROM_DEVICE); + + skb->protocol = eth_type_trans(skb, bp->dev); + skb_checksum_none_assert(skb); + + bp->stats.rx_packets++; + bp->stats.rx_bytes += skb->len; + +#if defined(DEBUG) && defined(VERBOSE_DEBUG) + netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n", + skb->len, skb->csum); + print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1, + skb->mac_header, 16, true); + print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_ADDRESS, 16, 1, + skb->data, 32, true); +#endif + + netif_receive_skb(skb); + } + + gem_rx_refill(bp); + + return count; +} + static int macb_rx_frame(struct macb *bp, unsigned int first_frag, unsigned int last_frag) { @@ -575,7 +725,7 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag, skb_put(skb, len); for (frag = first_frag; ; frag++) { - unsigned int frag_len = RX_BUFFER_SIZE; + unsigned int frag_len = bp->rx_buffer_size; if (offset + frag_len > len) { BUG_ON(frag != last_frag); @@ -583,7 +733,7 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag, } skb_copy_to_linear_data_offset(skb, offset, macb_rx_buffer(bp, frag), frag_len); - offset += RX_BUFFER_SIZE; + offset += bp->rx_buffer_size; desc = macb_rx_desc(bp, frag); desc->addr &= ~MACB_BIT(RX_USED); @@ -606,27 +756,6 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag, return 0; } -/* Mark DMA descriptors from begin up to and not including end as unused */ -static void discard_partial_frame(struct macb *bp, unsigned int begin, - unsigned int end) -{ - unsigned int frag; - - for (frag = begin; frag != end; frag++) { - struct macb_dma_desc *desc = macb_rx_desc(bp, frag); - desc->addr &= ~MACB_BIT(RX_USED); - } - - /* Make descriptor updates visible to hardware */ - wmb(); - - /* - * When this happens, the hardware stats registers for - * whatever caused this is updated, so we don't have to record - * anything. - */ -} - static int macb_rx(struct macb *bp, int budget) { int received = 0; @@ -687,7 +816,7 @@ static int macb_poll(struct napi_struct *napi, int budget) netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n", (unsigned long)status, budget); - work_done = macb_rx(bp, budget); + work_done = bp->macbgem_ops.mog_rx(bp, budget); if (work_done < budget) { napi_complete(napi); @@ -870,12 +999,71 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } +static void macb_init_rx_buffer_size(struct macb *bp, size_t size) +{ + if (!macb_is_gem(bp)) { + bp->rx_buffer_size = MACB_RX_BUFFER_SIZE; + } else { + bp->rx_buffer_size = size; + + if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) { + netdev_dbg(bp->dev, + "RX buffer must be multiple of %d bytes, expanding\n", + RX_BUFFER_MULTIPLE); + bp->rx_buffer_size = + roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE); + } + } + + netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%Zu]\n", + bp->dev->mtu, bp->rx_buffer_size); +} + +static void gem_free_rx_buffers(struct macb *bp) +{ + struct sk_buff *skb; + struct macb_dma_desc *desc; + dma_addr_t addr; + int i; + + if (!bp->rx_skbuff) + return; + + for (i = 0; i < RX_RING_SIZE; i++) { + skb = bp->rx_skbuff[i]; + + if (skb == NULL) + continue; + + desc = &bp->rx_ring[i]; + addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr)); + dma_unmap_single(&bp->pdev->dev, addr, skb->len, + DMA_FROM_DEVICE); + dev_kfree_skb_any(skb); + skb = NULL; + } + + kfree(bp->rx_skbuff); + bp->rx_skbuff = NULL; +} + +static void macb_free_rx_buffers(struct macb *bp) +{ + if (bp->rx_buffers) { + dma_free_coherent(&bp->pdev->dev, + RX_RING_SIZE * bp->rx_buffer_size, + bp->rx_buffers, bp->rx_buffers_dma); + bp->rx_buffers = NULL; + } +} + static void macb_free_consistent(struct macb *bp) { if (bp->tx_skb) { kfree(bp->tx_skb); bp->tx_skb = NULL; } + bp->macbgem_ops.mog_free_rx_buffers(bp); if (bp->rx_ring) { dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES, bp->rx_ring, bp->rx_ring_dma); @@ -886,12 +1074,37 @@ static void macb_free_consistent(struct macb *bp) bp->tx_ring, bp->tx_ring_dma); bp->tx_ring = NULL; } - if (bp->rx_buffers) { - dma_free_coherent(&bp->pdev->dev, - RX_RING_SIZE * RX_BUFFER_SIZE, - bp->rx_buffers, bp->rx_buffers_dma); - bp->rx_buffers = NULL; - } +} + +static int gem_alloc_rx_buffers(struct macb *bp) +{ + int size; + + size = RX_RING_SIZE * sizeof(struct sk_buff *); + bp->rx_skbuff = kzalloc(size, GFP_KERNEL); + if (!bp->rx_skbuff) + return -ENOMEM; + else + netdev_dbg(bp->dev, + "Allocated %d RX struct sk_buff entries at %p\n", + RX_RING_SIZE, bp->rx_skbuff); + return 0; +} + +static int macb_alloc_rx_buffers(struct macb *bp) +{ + int size; + + size = RX_RING_SIZE * bp->rx_buffer_size; + bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size, + &bp->rx_buffers_dma, GFP_KERNEL); + if (!bp->rx_buffers) + return -ENOMEM; + else + netdev_dbg(bp->dev, + "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n", + size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers); + return 0; } static int macb_alloc_consistent(struct macb *bp) @@ -921,14 +1134,8 @@ static int macb_alloc_consistent(struct macb *bp) "Allocated TX ring of %d bytes at %08lx (mapped %p)\n", size, (unsigned long)bp->tx_ring_dma, bp->tx_ring); - size = RX_RING_SIZE * RX_BUFFER_SIZE; - bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size, - &bp->rx_buffers_dma, GFP_KERNEL); - if (!bp->rx_buffers) + if (bp->macbgem_ops.mog_alloc_rx_buffers(bp)) goto out_err; - netdev_dbg(bp->dev, - "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n", - size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers); return 0; @@ -937,6 +1144,21 @@ out_err: return -ENOMEM; } +static void gem_init_rings(struct macb *bp) +{ + int i; + + for (i = 0; i < TX_RING_SIZE; i++) { + bp->tx_ring[i].addr = 0; + bp->tx_ring[i].ctrl = MACB_BIT(TX_USED); + } + bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP); + + bp->rx_tail = bp->rx_prepared_head = bp->tx_head = bp->tx_tail = 0; + + gem_rx_refill(bp); +} + static void macb_init_rings(struct macb *bp) { int i; @@ -946,7 +1168,7 @@ static void macb_init_rings(struct macb *bp) for (i = 0; i < RX_RING_SIZE; i++) { bp->rx_ring[i].addr = addr; bp->rx_ring[i].ctrl = 0; - addr += RX_BUFFER_SIZE; + addr += bp->rx_buffer_size; } bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP); @@ -1056,7 +1278,7 @@ static void macb_configure_dma(struct macb *bp) if (macb_is_gem(bp)) { dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L); - dmacfg |= GEM_BF(RXBS, RX_BUFFER_SIZE / 64); + dmacfg |= GEM_BF(RXBS, bp->rx_buffer_size / RX_BUFFER_MULTIPLE); dmacfg |= GEM_BF(FBLDO, 16); dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L); dmacfg &= ~GEM_BIT(ENDIA); @@ -1070,7 +1292,7 @@ static void macb_configure_dma(struct macb *bp) static void macb_configure_caps(struct macb *bp) { if (macb_is_gem(bp)) { - if (GEM_BF(IRQCOR, gem_readl(bp, DCFG1)) == 0) + if (GEM_BFEXT(IRQCOR, gem_readl(bp, DCFG1)) == 0) bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE; } } @@ -1233,6 +1455,7 @@ EXPORT_SYMBOL_GPL(macb_set_rx_mode); static int macb_open(struct net_device *dev) { struct macb *bp = netdev_priv(dev); + size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN; int err; netdev_dbg(bp->dev, "open\n"); @@ -1244,6 +1467,9 @@ static int macb_open(struct net_device *dev) if (!bp->phy_dev) return -EAGAIN; + /* RX buffers initialization */ + macb_init_rx_buffer_size(bp, bufsz); + err = macb_alloc_consistent(bp); if (err) { netdev_err(dev, "Unable to allocate DMA memory (error %d)\n", @@ -1253,7 +1479,7 @@ static int macb_open(struct net_device *dev) napi_enable(&bp->napi); - macb_init_rings(bp); + bp->macbgem_ops.mog_init_rings(bp); macb_init_hw(bp); /* schedule a link state check */ @@ -1572,6 +1798,19 @@ static int __init macb_probe(struct platform_device *pdev) dev->base_addr = regs->start; + /* setup appropriated routines according to adapter type */ + if (macb_is_gem(bp)) { + bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers; + bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers; + bp->macbgem_ops.mog_init_rings = gem_init_rings; + bp->macbgem_ops.mog_rx = gem_rx; + } else { + bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers; + bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers; + bp->macbgem_ops.mog_init_rings = macb_init_rings; + bp->macbgem_ops.mog_rx = macb_rx; + } + /* Set MII management clock divider */ config = macb_mdc_clk_div(bp); config |= macb_dbw(bp); @@ -1649,7 +1888,6 @@ err_out_put_pclk: err_out_free_dev: free_netdev(dev); err_out: - platform_set_drvdata(pdev, NULL); return err; } @@ -1675,7 +1913,6 @@ static int __exit macb_remove(struct platform_device *pdev) clk_disable_unprepare(bp->pclk); clk_put(bp->pclk); free_netdev(dev); - platform_set_drvdata(pdev, NULL); } return 0; diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index 548c0ecae869..f4076155bed7 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -545,12 +545,24 @@ struct gem_stats { u32 rx_udp_checksum_errors; }; +struct macb; + +struct macb_or_gem_ops { + int (*mog_alloc_rx_buffers)(struct macb *bp); + void (*mog_free_rx_buffers)(struct macb *bp); + void (*mog_init_rings)(struct macb *bp); + int (*mog_rx)(struct macb *bp, int budget); +}; + struct macb { void __iomem *regs; unsigned int rx_tail; + unsigned int rx_prepared_head; struct macb_dma_desc *rx_ring; + struct sk_buff **rx_skbuff; void *rx_buffers; + size_t rx_buffer_size; unsigned int tx_head, tx_tail; struct macb_dma_desc *tx_ring; @@ -573,6 +585,8 @@ struct macb { dma_addr_t tx_ring_dma; dma_addr_t rx_buffers_dma; + struct macb_or_gem_ops macbgem_ops; + struct mii_bus *mii_bus; struct phy_device *phy_dev; unsigned int link; diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c index 4a1f2fa812ab..7cb148c495c9 100644 --- a/drivers/net/ethernet/calxeda/xgmac.c +++ b/drivers/net/ethernet/calxeda/xgmac.c @@ -1790,7 +1790,6 @@ err_io: free_netdev(ndev); err_alloc: release_mem_region(res->start, resource_size(res)); - platform_set_drvdata(pdev, NULL); return ret; } @@ -1813,7 +1812,6 @@ static int xgmac_remove(struct platform_device *pdev) free_irq(ndev->irq, ndev); free_irq(priv->pmt_irq, ndev); - platform_set_drvdata(pdev, NULL); unregister_netdev(ndev); netif_napi_del(&priv->napi); diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c index 9624cfe7df57..d7048db9863d 100644 --- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c +++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c @@ -1351,22 +1351,11 @@ static void remove_one(struct pci_dev *pdev) t1_sw_reset(pdev); } -static struct pci_driver driver = { +static struct pci_driver cxgb_pci_driver = { .name = DRV_NAME, .id_table = t1_pci_tbl, .probe = init_one, .remove = remove_one, }; -static int __init t1_init_module(void) -{ - return pci_register_driver(&driver); -} - -static void __exit t1_cleanup_module(void) -{ - pci_unregister_driver(&driver); -} - -module_init(t1_init_module); -module_exit(t1_cleanup_module); +module_pci_driver(cxgb_pci_driver); diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c index 71497e835f42..b650951791dd 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c @@ -3037,7 +3037,9 @@ static void t3_io_resume(struct pci_dev *pdev) CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n", t3_read_reg(adapter, A_PCIE_PEX_ERR)); + rtnl_lock(); t3_resume_ports(adapter); + rtnl_unlock(); } static const struct pci_error_handlers t3_err_handler = { diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c index 0c96e5fe99cc..4058b856eb71 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c @@ -1246,6 +1246,7 @@ int cxgb3_offload_activate(struct adapter *adapter) struct tid_range stid_range, tid_range; struct mtutab mtutab; unsigned int l2t_capacity; + struct l2t_data *l2td; t = kzalloc(sizeof(*t), GFP_KERNEL); if (!t) @@ -1261,8 +1262,8 @@ int cxgb3_offload_activate(struct adapter *adapter) goto out_free; err = -ENOMEM; - RCU_INIT_POINTER(dev->l2opt, t3_init_l2t(l2t_capacity)); - if (!L2DATA(dev)) + l2td = t3_init_l2t(l2t_capacity); + if (!l2td) goto out_free; natids = min(tid_range.num / 2, MAX_ATIDS); @@ -1279,6 +1280,7 @@ int cxgb3_offload_activate(struct adapter *adapter) INIT_LIST_HEAD(&t->list_node); t->dev = dev; + RCU_INIT_POINTER(dev->l2opt, l2td); T3C_DATA(dev) = t; dev->recv = process_rx; dev->neigh_update = t3_l2t_update; @@ -1294,8 +1296,7 @@ int cxgb3_offload_activate(struct adapter *adapter) return 0; out_free_l2t: - t3_free_l2t(L2DATA(dev)); - RCU_INIT_POINTER(dev->l2opt, NULL); + t3_free_l2t(l2td); out_free: kfree(t); return err; diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c index f12e6b85a653..687ec4a8bb48 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c @@ -455,6 +455,11 @@ static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q, q->pg_chunk.offset = 0; mapping = pci_map_page(adapter->pdev, q->pg_chunk.page, 0, q->alloc_size, PCI_DMA_FROMDEVICE); + if (unlikely(pci_dma_mapping_error(adapter->pdev, mapping))) { + __free_pages(q->pg_chunk.page, order); + q->pg_chunk.page = NULL; + return -EIO; + } q->pg_chunk.mapping = mapping; } sd->pg_chunk = q->pg_chunk; @@ -949,40 +954,75 @@ static inline unsigned int calc_tx_descs(const struct sk_buff *skb) return flits_to_desc(flits); } + +/* map_skb - map a packet main body and its page fragments + * @pdev: the PCI device + * @skb: the packet + * @addr: placeholder to save the mapped addresses + * + * map the main body of an sk_buff and its page fragments, if any. + */ +static int map_skb(struct pci_dev *pdev, const struct sk_buff *skb, + dma_addr_t *addr) +{ + const skb_frag_t *fp, *end; + const struct skb_shared_info *si; + + *addr = pci_map_single(pdev, skb->data, skb_headlen(skb), + PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(pdev, *addr)) + goto out_err; + + si = skb_shinfo(skb); + end = &si->frags[si->nr_frags]; + + for (fp = si->frags; fp < end; fp++) { + *++addr = skb_frag_dma_map(&pdev->dev, fp, 0, skb_frag_size(fp), + DMA_TO_DEVICE); + if (pci_dma_mapping_error(pdev, *addr)) + goto unwind; + } + return 0; + +unwind: + while (fp-- > si->frags) + dma_unmap_page(&pdev->dev, *--addr, skb_frag_size(fp), + DMA_TO_DEVICE); + + pci_unmap_single(pdev, addr[-1], skb_headlen(skb), PCI_DMA_TODEVICE); +out_err: + return -ENOMEM; +} + /** - * make_sgl - populate a scatter/gather list for a packet + * write_sgl - populate a scatter/gather list for a packet * @skb: the packet * @sgp: the SGL to populate * @start: start address of skb main body data to include in the SGL * @len: length of skb main body data to include in the SGL - * @pdev: the PCI device + * @addr: the list of the mapped addresses * - * Generates a scatter/gather list for the buffers that make up a packet + * Copies the scatter/gather list for the buffers that make up a packet * and returns the SGL size in 8-byte words. The caller must size the SGL * appropriately. */ -static inline unsigned int make_sgl(const struct sk_buff *skb, +static inline unsigned int write_sgl(const struct sk_buff *skb, struct sg_ent *sgp, unsigned char *start, - unsigned int len, struct pci_dev *pdev) + unsigned int len, const dma_addr_t *addr) { - dma_addr_t mapping; - unsigned int i, j = 0, nfrags; + unsigned int i, j = 0, k = 0, nfrags; if (len) { - mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE); sgp->len[0] = cpu_to_be32(len); - sgp->addr[0] = cpu_to_be64(mapping); - j = 1; + sgp->addr[j++] = cpu_to_be64(addr[k++]); } nfrags = skb_shinfo(skb)->nr_frags; for (i = 0; i < nfrags; i++) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - mapping = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag), - DMA_TO_DEVICE); sgp->len[j] = cpu_to_be32(skb_frag_size(frag)); - sgp->addr[j] = cpu_to_be64(mapping); + sgp->addr[j] = cpu_to_be64(addr[k++]); j ^= 1; if (j == 0) ++sgp; @@ -1138,7 +1178,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb, const struct port_info *pi, unsigned int pidx, unsigned int gen, struct sge_txq *q, unsigned int ndesc, - unsigned int compl) + unsigned int compl, const dma_addr_t *addr) { unsigned int flits, sgl_flits, cntrl, tso_info; struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1]; @@ -1196,7 +1236,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb, } sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; - sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev); + sgl_flits = write_sgl(skb, sgp, skb->data, skb_headlen(skb), addr); write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen, htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl), @@ -1227,6 +1267,7 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev) struct netdev_queue *txq; struct sge_qset *qs; struct sge_txq *q; + dma_addr_t addr[MAX_SKB_FRAGS + 1]; /* * The chip min packet length is 9 octets but play safe and reject @@ -1255,6 +1296,11 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_BUSY; } + if (unlikely(map_skb(adap->pdev, skb, addr) < 0)) { + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } + q->in_use += ndesc; if (unlikely(credits - ndesc < q->stop_thres)) { t3_stop_tx_queue(txq, qs, q); @@ -1312,7 +1358,7 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev) if (likely(!skb_shared(skb))) skb_orphan(skb); - write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl); + write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl, addr); check_ring_tx_db(adap, q); return NETDEV_TX_OK; } @@ -1537,10 +1583,9 @@ static void deferred_unmap_destructor(struct sk_buff *skb) dui = (struct deferred_unmap_info *)skb->head; p = dui->addr; - if (skb->tail - skb->transport_header) - pci_unmap_single(dui->pdev, *p++, - skb->tail - skb->transport_header, - PCI_DMA_TODEVICE); + if (skb_tail_pointer(skb) - skb_transport_header(skb)) + pci_unmap_single(dui->pdev, *p++, skb_tail_pointer(skb) - + skb_transport_header(skb), PCI_DMA_TODEVICE); si = skb_shinfo(skb); for (i = 0; i < si->nr_frags; i++) @@ -1578,7 +1623,8 @@ static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev, */ static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb, struct sge_txq *q, unsigned int pidx, - unsigned int gen, unsigned int ndesc) + unsigned int gen, unsigned int ndesc, + const dma_addr_t *addr) { unsigned int sgl_flits, flits; struct work_request_hdr *from; @@ -1599,9 +1645,9 @@ static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb, flits = skb_transport_offset(skb) / 8; sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; - sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb), - skb->tail - skb->transport_header, - adap->pdev); + sgl_flits = write_sgl(skb, sgp, skb_transport_header(skb), + skb_tail_pointer(skb) - + skb_transport_header(skb), addr); if (need_skb_unmap()) { setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits); skb->destructor = deferred_unmap_destructor; @@ -1627,7 +1673,7 @@ static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb) flits = skb_transport_offset(skb) / 8; /* headers */ cnt = skb_shinfo(skb)->nr_frags; - if (skb->tail != skb->transport_header) + if (skb_tail_pointer(skb) != skb_transport_header(skb)) cnt++; return flits_to_desc(flits + sgl_len(cnt)); } @@ -1659,6 +1705,11 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); goto again; } + if (map_skb(adap->pdev, skb, (dma_addr_t *)skb->head)) { + spin_unlock(&q->lock); + return NET_XMIT_SUCCESS; + } + gen = q->gen; q->in_use += ndesc; pidx = q->pidx; @@ -1669,7 +1720,7 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); } spin_unlock(&q->lock); - write_ofld_wr(adap, skb, q, pidx, gen, ndesc); + write_ofld_wr(adap, skb, q, pidx, gen, ndesc, (dma_addr_t *)skb->head); check_ring_tx_db(adap, q); return NET_XMIT_SUCCESS; } @@ -1687,6 +1738,7 @@ static void restart_offloadq(unsigned long data) struct sge_txq *q = &qs->txq[TXQ_OFLD]; const struct port_info *pi = netdev_priv(qs->netdev); struct adapter *adap = pi->adapter; + unsigned int written = 0; spin_lock(&q->lock); again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); @@ -1706,10 +1758,14 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); break; } + if (map_skb(adap->pdev, skb, (dma_addr_t *)skb->head)) + break; + gen = q->gen; q->in_use += ndesc; pidx = q->pidx; q->pidx += ndesc; + written += ndesc; if (q->pidx >= q->size) { q->pidx -= q->size; q->gen ^= 1; @@ -1717,7 +1773,8 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); __skb_unlink(skb, &q->sendq); spin_unlock(&q->lock); - write_ofld_wr(adap, skb, q, pidx, gen, ndesc); + write_ofld_wr(adap, skb, q, pidx, gen, ndesc, + (dma_addr_t *)skb->head); spin_lock(&q->lock); } spin_unlock(&q->lock); @@ -1727,8 +1784,9 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); set_bit(TXQ_LAST_PKT_DB, &q->flags); #endif wmb(); - t3_write_reg(adap, A_SG_KDOORBELL, - F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); + if (likely(written)) + t3_write_reg(adap, A_SG_KDOORBELL, + F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); } /** diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 681804b30a3f..2aafb809e067 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -51,7 +51,7 @@ #include "t4_hw.h" #define FW_VERSION_MAJOR 1 -#define FW_VERSION_MINOR 1 +#define FW_VERSION_MINOR 4 #define FW_VERSION_MICRO 0 #define FW_VERSION_MAJOR_T5 0 diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 3cd397d60434..5a3256b083f2 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -4842,8 +4842,17 @@ static int adap_init0(struct adapter *adap) * is excessively mismatched relative to the driver.) */ ret = t4_check_fw_version(adap); + + /* The error code -EFAULT is returned by t4_check_fw_version() if + * firmware on adapter < supported firmware. If firmware on adapter + * is too old (not supported by driver) and we're the MASTER_PF set + * adapter state to DEV_STATE_UNINIT to force firmware upgrade + * and reinitialization. + */ + if ((adap->flags & MASTER_PF) && ret == -EFAULT) + state = DEV_STATE_UNINIT; if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) { - if (ret == -EINVAL || ret > 0) { + if (ret == -EINVAL || ret == -EFAULT || ret > 0) { if (upgrade_fw(adap) >= 0) { /* * Note that the chip was reset as part of the @@ -4852,7 +4861,21 @@ static int adap_init0(struct adapter *adap) */ reset = 0; ret = t4_check_fw_version(adap); - } + } else + if (ret == -EFAULT) { + /* + * Firmware is old but still might + * work if we force reinitialization + * of the adapter. Ignoring FW upgrade + * failure. + */ + dev_warn(adap->pdev_dev, + "Ignoring firmware upgrade " + "failure, and forcing driver " + "to reinitialize the " + "adapter.\n"); + ret = 0; + } } if (ret < 0) return ret; diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 2bfbb206b35a..ac311f5f3eb9 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -1294,7 +1294,7 @@ static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb) flits = skb_transport_offset(skb) / 8U; /* headers */ cnt = skb_shinfo(skb)->nr_frags; - if (skb->tail != skb->transport_header) + if (skb_tail_pointer(skb) != skb_transport_header(skb)) cnt++; return flits + sgl_len(cnt); } diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index d02d4e8c4417..4cbb2f9850be 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -938,6 +938,15 @@ int t4_check_fw_version(struct adapter *adapter) memcpy(adapter->params.api_vers, api_vers, sizeof(adapter->params.api_vers)); + if (major < exp_major || (major == exp_major && minor < exp_minor) || + (major == exp_major && minor == exp_minor && micro < exp_micro)) { + dev_err(adapter->pdev_dev, + "Card has firmware version %u.%u.%u, minimum " + "supported firmware is %u.%u.%u.\n", major, minor, + micro, exp_major, exp_minor, exp_micro); + return -EFAULT; + } + if (major != exp_major) { /* major mismatch - fail */ dev_err(adapter->pdev_dev, "card FW has major version %u, driver wants %u\n", @@ -3773,7 +3782,6 @@ int t4_port_init(struct adapter *adap, int mbox, int pf, int vf) p->lport = j; p->rss_size = rss_size; memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN); - adap->port[i]->dev_id = j; ret = ntohl(c.u.info.lstatus_to_modtype); p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ? diff --git a/drivers/net/ethernet/cirrus/Kconfig b/drivers/net/ethernet/cirrus/Kconfig index 8388e36cf08f..7403dff8f14a 100644 --- a/drivers/net/ethernet/cirrus/Kconfig +++ b/drivers/net/ethernet/cirrus/Kconfig @@ -44,7 +44,6 @@ config CS89x0_PLATFORM config EP93XX_ETH tristate "EP93xx Ethernet support" depends on ARM && ARCH_EP93XX - select NET_CORE select MII help This is a driver for the ethernet hardware included in EP93xx CPUs. diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c index 67b0388b6e68..e3d4ec836f8b 100644 --- a/drivers/net/ethernet/cirrus/ep93xx_eth.c +++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c @@ -783,7 +783,6 @@ static int ep93xx_eth_remove(struct platform_device *pdev) dev = platform_get_drvdata(pdev); if (dev == NULL) return 0; - platform_set_drvdata(pdev, NULL); ep = netdev_priv(dev); diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 635f55992d7e..992ec2ee64d9 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -1761,6 +1761,7 @@ static void enic_change_mtu_work(struct work_struct *work) enic_synchronize_irqs(enic); err = vnic_rq_disable(&enic->rq[0]); if (err) { + rtnl_unlock(); netdev_err(netdev, "Unable to disable RQ.\n"); return; } @@ -1773,6 +1774,7 @@ static void enic_change_mtu_work(struct work_struct *work) vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf); /* Need at least one buffer on ring to get going */ if (vnic_rq_desc_used(&enic->rq[0]) == 0) { + rtnl_unlock(); netdev_err(netdev, "Unable to alloc receive buffers.\n"); return; } diff --git a/drivers/net/ethernet/davicom/Kconfig b/drivers/net/ethernet/davicom/Kconfig index 9745fe5e8039..316c5e5a92ad 100644 --- a/drivers/net/ethernet/davicom/Kconfig +++ b/drivers/net/ethernet/davicom/Kconfig @@ -6,7 +6,6 @@ config DM9000 tristate "DM9000 support" depends on ARM || BLACKFIN || MIPS || COLDFIRE select CRC32 - select NET_CORE select MII ---help--- Support for DM9000 chipset. diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index 9105465b2a1a..a13b312b50f2 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c @@ -29,6 +29,8 @@ #include <linux/spinlock.h> #include <linux/crc32.h> #include <linux/mii.h> +#include <linux/of.h> +#include <linux/of_net.h> #include <linux/ethtool.h> #include <linux/dm9000.h> #include <linux/delay.h> @@ -827,7 +829,7 @@ dm9000_hash_table_unlocked(struct net_device *dev) struct netdev_hw_addr *ha; int i, oft; u32 hash_val; - u16 hash_table[4]; + u16 hash_table[4] = { 0, 0, 0, 0x8000 }; /* broadcast address */ u8 rcr = RCR_DIS_LONG | RCR_DIS_CRC | RCR_RXEN; dm9000_dbg(db, 1, "entering %s\n", __func__); @@ -835,13 +837,6 @@ dm9000_hash_table_unlocked(struct net_device *dev) for (i = 0, oft = DM9000_PAR; i < 6; i++, oft++) iow(db, oft, dev->dev_addr[i]); - /* Clear Hash Table */ - for (i = 0; i < 4; i++) - hash_table[i] = 0x0; - - /* broadcast address */ - hash_table[3] = 0x8000; - if (dev->flags & IFF_PROMISC) rcr |= RCR_PRMSC; @@ -1358,6 +1353,31 @@ static const struct net_device_ops dm9000_netdev_ops = { #endif }; +static struct dm9000_plat_data *dm9000_parse_dt(struct device *dev) +{ + struct dm9000_plat_data *pdata; + struct device_node *np = dev->of_node; + const void *mac_addr; + + if (!IS_ENABLED(CONFIG_OF) || !np) + return NULL; + + pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return ERR_PTR(-ENOMEM); + + if (of_find_property(np, "davicom,ext-phy", NULL)) + pdata->flags |= DM9000_PLATF_EXT_PHY; + if (of_find_property(np, "davicom,no-eeprom", NULL)) + pdata->flags |= DM9000_PLATF_NO_EEPROM; + + mac_addr = of_get_mac_address(np); + if (mac_addr) + memcpy(pdata->dev_addr, mac_addr, sizeof(pdata->dev_addr)); + + return pdata; +} + /* * Search DM9000 board, allocate space and register it */ @@ -1373,6 +1393,12 @@ dm9000_probe(struct platform_device *pdev) int i; u32 id_val; + if (!pdata) { + pdata = dm9000_parse_dt(&pdev->dev); + if (IS_ERR(pdata)) + return PTR_ERR(pdata); + } + /* Init network device */ ndev = alloc_etherdev(sizeof(struct board_info)); if (!ndev) @@ -1673,8 +1699,6 @@ dm9000_drv_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); - platform_set_drvdata(pdev, NULL); - unregister_netdev(ndev); dm9000_release_board(pdev, netdev_priv(ndev)); free_netdev(ndev); /* free device structure */ @@ -1683,11 +1707,20 @@ dm9000_drv_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_OF +static const struct of_device_id dm9000_of_matches[] = { + { .compatible = "davicom,dm9000", }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, dm9000_of_matches); +#endif + static struct platform_driver dm9000_driver = { .driver = { .name = "dm9000", .owner = THIS_MODULE, .pm = &dm9000_drv_pm_ops, + .of_match_table = of_match_ptr(dm9000_of_matches), }, .probe = dm9000_probe, .remove = dm9000_drv_remove, diff --git a/drivers/net/ethernet/dec/tulip/Kconfig b/drivers/net/ethernet/dec/tulip/Kconfig index 1df33c799c00..eb9ba6e97d04 100644 --- a/drivers/net/ethernet/dec/tulip/Kconfig +++ b/drivers/net/ethernet/dec/tulip/Kconfig @@ -126,7 +126,6 @@ config WINBOND_840 tristate "Winbond W89c840 Ethernet support" depends on PCI select CRC32 - select NET_CORE select MII ---help--- This driver is for the Winbond W89c840 chip. It also works with diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c index 1e9443d9fb57..c94152f1c6be 100644 --- a/drivers/net/ethernet/dec/tulip/tulip_core.c +++ b/drivers/net/ethernet/dec/tulip/tulip_core.c @@ -1410,12 +1410,6 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) return i; } - /* The chip will fail to enter a low-power state later unless - * first explicitly commanded into D0 */ - if (pci_set_power_state(pdev, PCI_D0)) { - pr_notice("Failed to set power state to D0\n"); - } - irq = pdev->irq; /* alloc_etherdev ensures aligned and zeroed private structures */ diff --git a/drivers/net/ethernet/dec/tulip/xircom_cb.c b/drivers/net/ethernet/dec/tulip/xircom_cb.c index cdbcd1643141..9b84cb04fe5f 100644 --- a/drivers/net/ethernet/dec/tulip/xircom_cb.c +++ b/drivers/net/ethernet/dec/tulip/xircom_cb.c @@ -1171,16 +1171,4 @@ investigate_write_descriptor(struct net_device *dev, } } -static int __init xircom_init(void) -{ - return pci_register_driver(&xircom_ops); -} - -static void __exit xircom_exit(void) -{ - pci_unregister_driver(&xircom_ops); -} - -module_init(xircom_init) -module_exit(xircom_exit) - +module_pci_driver(xircom_ops); diff --git a/drivers/net/ethernet/dlink/Kconfig b/drivers/net/ethernet/dlink/Kconfig index ee26ce78e270..c543ac11ce08 100644 --- a/drivers/net/ethernet/dlink/Kconfig +++ b/drivers/net/ethernet/dlink/Kconfig @@ -36,7 +36,6 @@ config SUNDANCE tristate "Sundance Alta support" depends on PCI select CRC32 - select NET_CORE select MII ---help--- This driver is for the Sundance "Alta" chip. diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 0a510684e468..c827b1b6b1ce 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -333,6 +333,9 @@ enum vf_state { #define BE_VF_UC_PMAC_COUNT 2 #define BE_FLAGS_QNQ_ASYNC_EVT_RCVD (1 << 11) +/* Ethtool set_dump flags */ +#define LANCER_INITIATE_FW_DUMP 0x1 + struct phy_info { u8 transceiver; u8 autoneg; @@ -398,6 +401,7 @@ struct be_adapter { u32 cmd_privileges; /* Ethtool knobs and info */ char fw_ver[FW_VER_LEN]; + char fw_on_flash[FW_VER_LEN]; int if_handle; /* Used to configure filtering */ u32 *pmac_id; /* MAC addr handle used by BE card */ u32 beacon_state; /* for set_phys_id */ diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 1db2df61b8af..6e6e0a117ee2 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -3255,6 +3255,72 @@ err: return status; } +static int lancer_wait_idle(struct be_adapter *adapter) +{ +#define SLIPORT_IDLE_TIMEOUT 30 + u32 reg_val; + int status = 0, i; + + for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) { + reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET); + if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0) + break; + + ssleep(1); + } + + if (i == SLIPORT_IDLE_TIMEOUT) + status = -1; + + return status; +} + +int lancer_physdev_ctrl(struct be_adapter *adapter, u32 mask) +{ + int status = 0; + + status = lancer_wait_idle(adapter); + if (status) + return status; + + iowrite32(mask, adapter->db + PHYSDEV_CONTROL_OFFSET); + + return status; +} + +/* Routine to check whether dump image is present or not */ +bool dump_present(struct be_adapter *adapter) +{ + u32 sliport_status = 0; + + sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET); + return !!(sliport_status & SLIPORT_STATUS_DIP_MASK); +} + +int lancer_initiate_dump(struct be_adapter *adapter) +{ + int status; + + /* give firmware reset and diagnostic dump */ + status = lancer_physdev_ctrl(adapter, PHYSDEV_CONTROL_FW_RESET_MASK | + PHYSDEV_CONTROL_DD_MASK); + if (status < 0) { + dev_err(&adapter->pdev->dev, "Firmware reset failed\n"); + return status; + } + + status = lancer_wait_idle(adapter); + if (status) + return status; + + if (!dump_present(adapter)) { + dev_err(&adapter->pdev->dev, "Dump image not present\n"); + return -1; + } + + return 0; +} + /* Uses sync mcc */ int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain) { diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index 025bdb0d1764..5228d88c5a02 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -1937,6 +1937,9 @@ extern int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter, struct be_dma_mem *cmd, struct be_fat_conf_params *cfgs); extern int lancer_wait_ready(struct be_adapter *adapter); +extern int lancer_physdev_ctrl(struct be_adapter *adapter, u32 mask); +extern int lancer_initiate_dump(struct be_adapter *adapter); +extern bool dump_present(struct be_adapter *adapter); extern int lancer_test_and_set_rdy_state(struct be_adapter *adapter); extern int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name); extern int be_cmd_get_func_config(struct be_adapter *adapter); diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c index 3d4461adb3b4..4f8c941217cc 100644 --- a/drivers/net/ethernet/emulex/benet/be_ethtool.c +++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c @@ -177,19 +177,15 @@ static void be_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { struct be_adapter *adapter = netdev_priv(netdev); - char fw_on_flash[FW_VER_LEN]; - - memset(fw_on_flash, 0 , sizeof(fw_on_flash)); - be_cmd_get_fw_ver(adapter, adapter->fw_ver, fw_on_flash); strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); strlcpy(drvinfo->version, DRV_VER, sizeof(drvinfo->version)); - if (!memcmp(adapter->fw_ver, fw_on_flash, FW_VER_LEN)) + if (!memcmp(adapter->fw_ver, adapter->fw_on_flash, FW_VER_LEN)) strlcpy(drvinfo->fw_version, adapter->fw_ver, sizeof(drvinfo->fw_version)); else snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), - "%s [%s]", adapter->fw_ver, fw_on_flash); + "%s [%s]", adapter->fw_ver, adapter->fw_on_flash); strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); @@ -673,6 +669,34 @@ be_set_phys_id(struct net_device *netdev, return 0; } +static int be_set_dump(struct net_device *netdev, struct ethtool_dump *dump) +{ + struct be_adapter *adapter = netdev_priv(netdev); + struct device *dev = &adapter->pdev->dev; + int status; + + if (!lancer_chip(adapter)) { + dev_err(dev, "FW dump not supported\n"); + return -EOPNOTSUPP; + } + + if (dump_present(adapter)) { + dev_err(dev, "Previous dump not cleared, not forcing dump\n"); + return 0; + } + + switch (dump->flag) { + case LANCER_INITIATE_FW_DUMP: + status = lancer_initiate_dump(adapter); + if (!status) + dev_info(dev, "F/w dump initiated successfully\n"); + break; + default: + dev_err(dev, "Invalid dump level: 0x%x\n", dump->flag); + return -EINVAL; + } + return status; +} static void be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) @@ -1110,6 +1134,7 @@ const struct ethtool_ops be_ethtool_ops = { .set_pauseparam = be_set_pauseparam, .get_strings = be_get_stat_strings, .set_phys_id = be_set_phys_id, + .set_dump = be_set_dump, .get_msglevel = be_get_msg_level, .set_msglevel = be_set_msg_level, .get_sset_count = be_get_sset_count, diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h index 8780183c6d1c..3e2162121601 100644 --- a/drivers/net/ethernet/emulex/benet/be_hw.h +++ b/drivers/net/ethernet/emulex/benet/be_hw.h @@ -53,10 +53,12 @@ #define PHYSDEV_CONTROL_OFFSET 0x414 #define SLIPORT_STATUS_ERR_MASK 0x80000000 +#define SLIPORT_STATUS_DIP_MASK 0x02000000 #define SLIPORT_STATUS_RN_MASK 0x01000000 #define SLIPORT_STATUS_RDY_MASK 0x00800000 #define SLI_PORT_CONTROL_IP_MASK 0x08000000 #define PHYSDEV_CONTROL_FW_RESET_MASK 0x00000002 +#define PHYSDEV_CONTROL_DD_MASK 0x00000004 #define PHYSDEV_CONTROL_INP_MASK 0x40000000 #define SLIPORT_ERROR_NO_RESOURCE1 0x2 diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index a0b4be51f0d1..181edb522450 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -782,16 +782,22 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter, if (vlan_tx_tag_present(skb)) vlan_tag = be_get_tx_vlan_tag(adapter, skb); - else if (qnq_async_evt_rcvd(adapter) && adapter->pvid) - vlan_tag = adapter->pvid; + + if (qnq_async_evt_rcvd(adapter) && adapter->pvid) { + if (!vlan_tag) + vlan_tag = adapter->pvid; + /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to + * skip VLAN insertion + */ + if (skip_hw_vlan) + *skip_hw_vlan = true; + } if (vlan_tag) { skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); if (unlikely(!skb)) return skb; skb->vlan_tci = 0; - if (skip_hw_vlan) - *skip_hw_vlan = true; } /* Insert the outer VLAN, if any */ @@ -834,32 +840,39 @@ static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb) return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid; } -static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb) +static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, + struct sk_buff *skb) { - return BE3_chip(adapter) && - be_ipv6_exthdr_check(skb); + return BE3_chip(adapter) && be_ipv6_exthdr_check(skb); } -static netdev_tx_t be_xmit(struct sk_buff *skb, - struct net_device *netdev) +static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter, + struct sk_buff *skb, + bool *skip_hw_vlan) { - struct be_adapter *adapter = netdev_priv(netdev); - struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)]; - struct be_queue_info *txq = &txo->q; - struct iphdr *ip = NULL; - u32 wrb_cnt = 0, copied = 0; - u32 start = txq->head, eth_hdr_len; - bool dummy_wrb, stopped = false; - bool skip_hw_vlan = false; struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; + unsigned int eth_hdr_len; + struct iphdr *ip; - eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ? - VLAN_ETH_HLEN : ETH_HLEN; + /* Lancer ASIC has a bug wherein packets that are 32 bytes or less + * may cause a transmit stall on that port. So the work-around is to + * pad such packets to a 36-byte length. + */ + if (unlikely(lancer_chip(adapter) && skb->len <= 32)) { + if (skb_padto(skb, 36)) + goto tx_drop; + skb->len = 36; + } /* For padded packets, BE HW modifies tot_len field in IP header * incorrecly when VLAN tag is inserted by HW. + * For padded packets, Lancer computes incorrect checksum. */ - if (skb->len <= 60 && vlan_tx_tag_present(skb) && is_ipv4_pkt(skb)) { + eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ? + VLAN_ETH_HLEN : ETH_HLEN; + if (skb->len <= 60 && + (lancer_chip(adapter) || vlan_tx_tag_present(skb)) && + is_ipv4_pkt(skb)) { ip = (struct iphdr *)ip_hdr(skb); pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len)); } @@ -869,15 +882,15 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, */ if ((adapter->function_mode & UMC_ENABLED) && veh->h_vlan_proto == htons(ETH_P_8021Q)) - skip_hw_vlan = true; + *skip_hw_vlan = true; /* HW has a bug wherein it will calculate CSUM for VLAN * pkts even though it is disabled. * Manually insert VLAN in pkt. */ if (skb->ip_summed != CHECKSUM_PARTIAL && - vlan_tx_tag_present(skb)) { - skb = be_insert_vlan_in_pkt(adapter, skb, &skip_hw_vlan); + vlan_tx_tag_present(skb)) { + skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan); if (unlikely(!skb)) goto tx_drop; } @@ -887,8 +900,8 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, * skip HW tagging is not enabled by FW. */ if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) && - (adapter->pvid || adapter->qnq_vid) && - !qnq_async_evt_rcvd(adapter))) + (adapter->pvid || adapter->qnq_vid) && + !qnq_async_evt_rcvd(adapter))) goto tx_drop; /* Manual VLAN tag insertion to prevent: @@ -899,11 +912,31 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, */ if (be_ipv6_tx_stall_chk(adapter, skb) && be_vlan_tag_tx_chk(adapter, skb)) { - skb = be_insert_vlan_in_pkt(adapter, skb, &skip_hw_vlan); + skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan); if (unlikely(!skb)) goto tx_drop; } + return skb; +tx_drop: + dev_kfree_skb_any(skb); + return NULL; +} + +static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct be_adapter *adapter = netdev_priv(netdev); + struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)]; + struct be_queue_info *txq = &txo->q; + bool dummy_wrb, stopped = false; + u32 wrb_cnt = 0, copied = 0; + bool skip_hw_vlan = false; + u32 start = txq->head; + + skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan); + if (!skb) + return NETDEV_TX_OK; + wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb); copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb, @@ -933,7 +966,6 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, txq->head = start; dev_kfree_skb_any(skb); } -tx_drop: return NETDEV_TX_OK; } @@ -1236,30 +1268,6 @@ static int be_set_vf_tx_rate(struct net_device *netdev, return status; } -static int be_find_vfs(struct be_adapter *adapter, int vf_state) -{ - struct pci_dev *dev, *pdev = adapter->pdev; - int vfs = 0, assigned_vfs = 0, pos; - u16 offset, stride; - - pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); - if (!pos) - return 0; - pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset); - pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride); - - dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL); - while (dev) { - if (dev->is_virtfn && pci_physfn(dev) == pdev) { - vfs++; - if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) - assigned_vfs++; - } - dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev); - } - return (vf_state == ASSIGNED) ? assigned_vfs : vfs; -} - static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo) { struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]); @@ -2771,7 +2779,7 @@ static void be_vf_clear(struct be_adapter *adapter) struct be_vf_cfg *vf_cfg; u32 vf; - if (be_find_vfs(adapter, ASSIGNED)) { + if (pci_vfs_assigned(adapter->pdev)) { dev_warn(&adapter->pdev->dev, "VFs are assigned to VMs: not disabling VFs\n"); goto done; @@ -2873,7 +2881,7 @@ static int be_vf_setup(struct be_adapter *adapter) int status, old_vfs, vf; struct device *dev = &adapter->pdev->dev; - old_vfs = be_find_vfs(adapter, ENABLED); + old_vfs = pci_num_vf(adapter->pdev); if (old_vfs) { dev_info(dev, "%d VFs are already enabled\n", old_vfs); if (old_vfs != num_vfs) @@ -3184,7 +3192,7 @@ static int be_setup(struct be_adapter *adapter) if (status) goto err; - be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL); + be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash); if (adapter->vlans_added) be_vid_config(adapter); @@ -3530,40 +3538,6 @@ static int be_flash_skyhawk(struct be_adapter *adapter, return 0; } -static int lancer_wait_idle(struct be_adapter *adapter) -{ -#define SLIPORT_IDLE_TIMEOUT 30 - u32 reg_val; - int status = 0, i; - - for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) { - reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET); - if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0) - break; - - ssleep(1); - } - - if (i == SLIPORT_IDLE_TIMEOUT) - status = -1; - - return status; -} - -static int lancer_fw_reset(struct be_adapter *adapter) -{ - int status = 0; - - status = lancer_wait_idle(adapter); - if (status) - return status; - - iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db + - PHYSDEV_CONTROL_OFFSET); - - return status; -} - static int lancer_fw_download(struct be_adapter *adapter, const struct firmware *fw) { @@ -3641,7 +3615,8 @@ static int lancer_fw_download(struct be_adapter *adapter, } if (change_status == LANCER_FW_RESET_NEEDED) { - status = lancer_fw_reset(adapter); + status = lancer_physdev_ctrl(adapter, + PHYSDEV_CONTROL_FW_RESET_MASK); if (status) { dev_err(&adapter->pdev->dev, "Adapter busy for FW reset.\n" @@ -3776,6 +3751,10 @@ int be_load_fw(struct be_adapter *adapter, u8 *fw_file) else status = be_fw_download(adapter, fw); + if (!status) + be_cmd_get_fw_ver(adapter, adapter->fw_ver, + adapter->fw_on_flash); + fw_exit: release_firmware(fw); return status; @@ -4203,9 +4182,10 @@ reschedule: schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000)); } +/* If any VFs are already enabled don't FLR the PF */ static bool be_reset_required(struct be_adapter *adapter) { - return be_find_vfs(adapter, ENABLED) > 0 ? false : true; + return pci_num_vf(adapter->pdev) ? false : true; } static char *mc_name(struct be_adapter *adapter) @@ -4390,7 +4370,7 @@ static int be_resume(struct pci_dev *pdev) if (status) return status; - pci_set_power_state(pdev, 0); + pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); /* tell fw we're ready to fire cmds */ @@ -4486,7 +4466,7 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev) return PCI_ERS_RESULT_DISCONNECT; pci_set_master(pdev); - pci_set_power_state(pdev, 0); + pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); /* Check if card is ok and fw is ready */ diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c index 5722bc61fa58..cf579fb39bc5 100644 --- a/drivers/net/ethernet/ethoc.c +++ b/drivers/net/ethernet/ethoc.c @@ -1147,8 +1147,6 @@ static int ethoc_remove(struct platform_device *pdev) struct net_device *netdev = platform_get_drvdata(pdev); struct ethoc *priv = netdev_priv(netdev); - platform_set_drvdata(pdev, NULL); - if (netdev) { netif_napi_del(&priv->napi); phy_disconnect(priv->phy); diff --git a/drivers/net/ethernet/faraday/Kconfig b/drivers/net/ethernet/faraday/Kconfig index b8974b9e3b47..5918c6891694 100644 --- a/drivers/net/ethernet/faraday/Kconfig +++ b/drivers/net/ethernet/faraday/Kconfig @@ -21,7 +21,6 @@ if NET_VENDOR_FARADAY config FTMAC100 tristate "Faraday FTMAC100 10/100 Ethernet support" depends on ARM - select NET_CORE select MII ---help--- This driver supports the FTMAC100 10/100 Ethernet controller diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index 21b85fb7d05f..934e1ae279f0 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -1311,7 +1311,6 @@ err_ioremap: release_resource(priv->res); err_req_mem: netif_napi_del(&priv->napi); - platform_set_drvdata(pdev, NULL); free_netdev(netdev); err_alloc_etherdev: return err; @@ -1335,7 +1334,6 @@ static int __exit ftgmac100_remove(struct platform_device *pdev) release_resource(priv->res); netif_napi_del(&priv->napi); - platform_set_drvdata(pdev, NULL); free_netdev(netdev); return 0; } diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c index a6eda8d83138..4658f4cc1969 100644 --- a/drivers/net/ethernet/faraday/ftmac100.c +++ b/drivers/net/ethernet/faraday/ftmac100.c @@ -1149,7 +1149,6 @@ err_ioremap: release_resource(priv->res); err_req_mem: netif_napi_del(&priv->napi); - platform_set_drvdata(pdev, NULL); free_netdev(netdev); err_alloc_etherdev: return err; @@ -1169,7 +1168,6 @@ static int __exit ftmac100_remove(struct platform_device *pdev) release_resource(priv->res); netif_napi_del(&priv->napi); - platform_set_drvdata(pdev, NULL); free_netdev(netdev); return 0; } diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index 9ce5b7185fda..2b0a0ea4f8e7 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -60,6 +60,61 @@ #define BM_MIIGSK_CFGR_RMII 0x01 #define BM_MIIGSK_CFGR_FRCONT_10M 0x40 +#define RMON_T_DROP 0x200 /* Count of frames not cntd correctly */ +#define RMON_T_PACKETS 0x204 /* RMON TX packet count */ +#define RMON_T_BC_PKT 0x208 /* RMON TX broadcast pkts */ +#define RMON_T_MC_PKT 0x20C /* RMON TX multicast pkts */ +#define RMON_T_CRC_ALIGN 0x210 /* RMON TX pkts with CRC align err */ +#define RMON_T_UNDERSIZE 0x214 /* RMON TX pkts < 64 bytes, good CRC */ +#define RMON_T_OVERSIZE 0x218 /* RMON TX pkts > MAX_FL bytes good CRC */ +#define RMON_T_FRAG 0x21C /* RMON TX pkts < 64 bytes, bad CRC */ +#define RMON_T_JAB 0x220 /* RMON TX pkts > MAX_FL bytes, bad CRC */ +#define RMON_T_COL 0x224 /* RMON TX collision count */ +#define RMON_T_P64 0x228 /* RMON TX 64 byte pkts */ +#define RMON_T_P65TO127 0x22C /* RMON TX 65 to 127 byte pkts */ +#define RMON_T_P128TO255 0x230 /* RMON TX 128 to 255 byte pkts */ +#define RMON_T_P256TO511 0x234 /* RMON TX 256 to 511 byte pkts */ +#define RMON_T_P512TO1023 0x238 /* RMON TX 512 to 1023 byte pkts */ +#define RMON_T_P1024TO2047 0x23C /* RMON TX 1024 to 2047 byte pkts */ +#define RMON_T_P_GTE2048 0x240 /* RMON TX pkts > 2048 bytes */ +#define RMON_T_OCTETS 0x244 /* RMON TX octets */ +#define IEEE_T_DROP 0x248 /* Count of frames not counted crtly */ +#define IEEE_T_FRAME_OK 0x24C /* Frames tx'd OK */ +#define IEEE_T_1COL 0x250 /* Frames tx'd with single collision */ +#define IEEE_T_MCOL 0x254 /* Frames tx'd with multiple collision */ +#define IEEE_T_DEF 0x258 /* Frames tx'd after deferral delay */ +#define IEEE_T_LCOL 0x25C /* Frames tx'd with late collision */ +#define IEEE_T_EXCOL 0x260 /* Frames tx'd with excesv collisions */ +#define IEEE_T_MACERR 0x264 /* Frames tx'd with TX FIFO underrun */ +#define IEEE_T_CSERR 0x268 /* Frames tx'd with carrier sense err */ +#define IEEE_T_SQE 0x26C /* Frames tx'd with SQE err */ +#define IEEE_T_FDXFC 0x270 /* Flow control pause frames tx'd */ +#define IEEE_T_OCTETS_OK 0x274 /* Octet count for frames tx'd w/o err */ +#define RMON_R_PACKETS 0x284 /* RMON RX packet count */ +#define RMON_R_BC_PKT 0x288 /* RMON RX broadcast pkts */ +#define RMON_R_MC_PKT 0x28C /* RMON RX multicast pkts */ +#define RMON_R_CRC_ALIGN 0x290 /* RMON RX pkts with CRC alignment err */ +#define RMON_R_UNDERSIZE 0x294 /* RMON RX pkts < 64 bytes, good CRC */ +#define RMON_R_OVERSIZE 0x298 /* RMON RX pkts > MAX_FL bytes good CRC */ +#define RMON_R_FRAG 0x29C /* RMON RX pkts < 64 bytes, bad CRC */ +#define RMON_R_JAB 0x2A0 /* RMON RX pkts > MAX_FL bytes, bad CRC */ +#define RMON_R_RESVD_O 0x2A4 /* Reserved */ +#define RMON_R_P64 0x2A8 /* RMON RX 64 byte pkts */ +#define RMON_R_P65TO127 0x2AC /* RMON RX 65 to 127 byte pkts */ +#define RMON_R_P128TO255 0x2B0 /* RMON RX 128 to 255 byte pkts */ +#define RMON_R_P256TO511 0x2B4 /* RMON RX 256 to 511 byte pkts */ +#define RMON_R_P512TO1023 0x2B8 /* RMON RX 512 to 1023 byte pkts */ +#define RMON_R_P1024TO2047 0x2BC /* RMON RX 1024 to 2047 byte pkts */ +#define RMON_R_P_GTE2048 0x2C0 /* RMON RX pkts > 2048 bytes */ +#define RMON_R_OCTETS 0x2C4 /* RMON RX octets */ +#define IEEE_R_DROP 0x2C8 /* Count frames not counted correctly */ +#define IEEE_R_FRAME_OK 0x2CC /* Frames rx'd OK */ +#define IEEE_R_CRC 0x2D0 /* Frames rx'd with CRC err */ +#define IEEE_R_ALIGN 0x2D4 /* Frames rx'd with alignment err */ +#define IEEE_R_MACERR 0x2D8 /* Receive FIFO overflow count */ +#define IEEE_R_FDXFC 0x2DC /* Flow control pause frames rx'd */ +#define IEEE_R_OCTETS_OK 0x2E0 /* Octet cnt for frames rx'd w/o err */ + #else #define FEC_ECNTRL 0x000 /* Ethernet control reg */ @@ -148,6 +203,9 @@ struct bufdesc_ex { #define BD_ENET_RX_CL ((ushort)0x0001) #define BD_ENET_RX_STATS ((ushort)0x013f) /* All status bits */ +/* Enhanced buffer descriptor control/status used by Ethernet receive */ +#define BD_ENET_RX_VLAN 0x00000004 + /* Buffer descriptor control/status used by Ethernet transmit. */ #define BD_ENET_TX_READY ((ushort)0x8000) @@ -272,9 +330,10 @@ struct fec_enet_private { int hwts_tx_en; struct timer_list time_keep; struct fec_enet_delayed_work delay_work; + struct regulator *reg_phy; }; -void fec_ptp_init(struct net_device *ndev, struct platform_device *pdev); +void fec_ptp_init(struct platform_device *pdev); void fec_ptp_start_cyclecounter(struct net_device *ndev); int fec_ptp_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd); diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index d48099f03b7f..d3ad5ea711d3 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -53,13 +53,15 @@ #include <linux/of_device.h> #include <linux/of_gpio.h> #include <linux/of_net.h> -#include <linux/pinctrl/consumer.h> #include <linux/regulator/consumer.h> +#include <linux/if_vlan.h> #include <asm/cacheflush.h> #include "fec.h" +static void set_multicast_list(struct net_device *ndev); + #if defined(CONFIG_ARM) #define FEC_ALIGNMENT 0xf #else @@ -89,6 +91,8 @@ #define FEC_QUIRK_HAS_BUFDESC_EX (1 << 4) /* Controller has hardware checksum support */ #define FEC_QUIRK_HAS_CSUM (1 << 5) +/* Controller has hardware vlan support */ +#define FEC_QUIRK_HAS_VLAN (1 << 6) static struct platform_device_id fec_devtype[] = { { @@ -107,7 +111,8 @@ static struct platform_device_id fec_devtype[] = { }, { .name = "imx6q-fec", .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | - FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM, + FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | + FEC_QUIRK_HAS_VLAN, }, { .name = "mvf600-fec", .driver_data = FEC_QUIRK_ENET_MAC, @@ -178,11 +183,11 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); #define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII) #define FEC_RX_DISABLED_IMASK (FEC_DEFAULT_IMASK & (~FEC_ENET_RXF)) -/* The FEC stores dest/src/type, data, and checksum for receive packets. +/* The FEC stores dest/src/type/vlan, data, and checksum for receive packets. */ -#define PKT_MAXBUF_SIZE 1518 +#define PKT_MAXBUF_SIZE 1522 #define PKT_MINBUF_SIZE 64 -#define PKT_MAXBLR_SIZE 1520 +#define PKT_MAXBLR_SIZE 1536 /* FEC receive acceleration */ #define FEC_RACC_IPDIS (1 << 1) @@ -243,7 +248,7 @@ static void *swap_buffer(void *bufaddr, int len) int i; unsigned int *buf = bufaddr; - for (i = 0; i < (len + 3) / 4; i++, buf++) + for (i = 0; i < DIV_ROUND_UP(len, 4); i++, buf++) *buf = cpu_to_be32(*buf); return bufaddr; @@ -471,9 +476,8 @@ fec_restart(struct net_device *ndev, int duplex) /* Clear any outstanding interrupt. */ writel(0xffc00000, fep->hwp + FEC_IEVENT); - /* Reset all multicast. */ - writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); - writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW); + /* Setup multicast filter. */ + set_multicast_list(ndev); #ifndef CONFIG_M5272 writel(0, fep->hwp + FEC_HASH_TABLE_HIGH); writel(0, fep->hwp + FEC_HASH_TABLE_LOW); @@ -609,6 +613,11 @@ fec_restart(struct net_device *ndev, int duplex) if (fep->bufdesc_ex) ecntl |= (1 << 4); +#ifndef CONFIG_M5272 + /* Enable the MIB statistic event counters */ + writel(0 << 31, fep->hwp + FEC_MIB_CTRLSTAT); +#endif + /* And last, enable the transmit and receive processing */ writel(ecntl, fep->hwp + FEC_ECNTRL); writel(0, fep->hwp + FEC_R_DES_ACTIVE); @@ -735,6 +744,7 @@ fec_enet_tx(struct net_device *ndev) ndev->stats.tx_carrier_errors++; } else { ndev->stats.tx_packets++; + ndev->stats.tx_bytes += bdp->cbd_datlen; } if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) && @@ -800,6 +810,9 @@ fec_enet_rx(struct net_device *ndev, int budget) ushort pkt_len; __u8 *data; int pkt_received = 0; + struct bufdesc_ex *ebdp = NULL; + bool vlan_packet_rcvd = false; + u16 vlan_tag; #ifdef CONFIG_M532x flush_cache_all(); @@ -863,6 +876,24 @@ fec_enet_rx(struct net_device *ndev, int budget) if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) swap_buffer(data, pkt_len); + /* Extract the enhanced buffer descriptor */ + ebdp = NULL; + if (fep->bufdesc_ex) + ebdp = (struct bufdesc_ex *)bdp; + + /* If this is a VLAN packet remove the VLAN Tag */ + vlan_packet_rcvd = false; + if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) && + fep->bufdesc_ex && (ebdp->cbd_esc & BD_ENET_RX_VLAN)) { + /* Push and remove the vlan tag */ + struct vlan_hdr *vlan_header = + (struct vlan_hdr *) (data + ETH_HLEN); + vlan_tag = ntohs(vlan_header->h_vlan_TCI); + pkt_len -= VLAN_HLEN; + + vlan_packet_rcvd = true; + } + /* This does 16 byte alignment, exactly what we need. * The packet length includes FCS, but we don't want to * include that when passing upstream as it messes up @@ -873,9 +904,18 @@ fec_enet_rx(struct net_device *ndev, int budget) if (unlikely(!skb)) { ndev->stats.rx_dropped++; } else { + int payload_offset = (2 * ETH_ALEN); skb_reserve(skb, NET_IP_ALIGN); skb_put(skb, pkt_len - 4); /* Make room */ - skb_copy_to_linear_data(skb, data, pkt_len - 4); + + /* Extract the frame data without the VLAN header. */ + skb_copy_to_linear_data(skb, data, (2 * ETH_ALEN)); + if (vlan_packet_rcvd) + payload_offset = (2 * ETH_ALEN) + VLAN_HLEN; + skb_copy_to_linear_data_offset(skb, (2 * ETH_ALEN), + data + payload_offset, + pkt_len - 4 - (2 * ETH_ALEN)); + skb->protocol = eth_type_trans(skb, ndev); /* Get receive timestamp from the skb */ @@ -883,8 +923,6 @@ fec_enet_rx(struct net_device *ndev, int budget) struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); unsigned long flags; - struct bufdesc_ex *ebdp = - (struct bufdesc_ex *)bdp; memset(shhwtstamps, 0, sizeof(*shhwtstamps)); @@ -895,9 +933,7 @@ fec_enet_rx(struct net_device *ndev, int budget) } if (fep->bufdesc_ex && - (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) { - struct bufdesc_ex *ebdp = - (struct bufdesc_ex *)bdp; + (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) { if (!(ebdp->cbd_esc & FLAG_RX_CSUM_ERROR)) { /* don't check it */ skb->ip_summed = CHECKSUM_UNNECESSARY; @@ -906,6 +942,12 @@ fec_enet_rx(struct net_device *ndev, int budget) } } + /* Handle received VLAN packets */ + if (vlan_packet_rcvd) + __vlan_hwaccel_put_tag(skb, + htons(ETH_P_8021Q), + vlan_tag); + if (!skb_defer_rx_timestamp(skb)) napi_gro_receive(&fep->napi, skb); } @@ -1444,8 +1486,117 @@ static int fec_enet_set_pauseparam(struct net_device *ndev, return 0; } +static const struct fec_stat { + char name[ETH_GSTRING_LEN]; + u16 offset; +} fec_stats[] = { + /* RMON TX */ + { "tx_dropped", RMON_T_DROP }, + { "tx_packets", RMON_T_PACKETS }, + { "tx_broadcast", RMON_T_BC_PKT }, + { "tx_multicast", RMON_T_MC_PKT }, + { "tx_crc_errors", RMON_T_CRC_ALIGN }, + { "tx_undersize", RMON_T_UNDERSIZE }, + { "tx_oversize", RMON_T_OVERSIZE }, + { "tx_fragment", RMON_T_FRAG }, + { "tx_jabber", RMON_T_JAB }, + { "tx_collision", RMON_T_COL }, + { "tx_64byte", RMON_T_P64 }, + { "tx_65to127byte", RMON_T_P65TO127 }, + { "tx_128to255byte", RMON_T_P128TO255 }, + { "tx_256to511byte", RMON_T_P256TO511 }, + { "tx_512to1023byte", RMON_T_P512TO1023 }, + { "tx_1024to2047byte", RMON_T_P1024TO2047 }, + { "tx_GTE2048byte", RMON_T_P_GTE2048 }, + { "tx_octets", RMON_T_OCTETS }, + + /* IEEE TX */ + { "IEEE_tx_drop", IEEE_T_DROP }, + { "IEEE_tx_frame_ok", IEEE_T_FRAME_OK }, + { "IEEE_tx_1col", IEEE_T_1COL }, + { "IEEE_tx_mcol", IEEE_T_MCOL }, + { "IEEE_tx_def", IEEE_T_DEF }, + { "IEEE_tx_lcol", IEEE_T_LCOL }, + { "IEEE_tx_excol", IEEE_T_EXCOL }, + { "IEEE_tx_macerr", IEEE_T_MACERR }, + { "IEEE_tx_cserr", IEEE_T_CSERR }, + { "IEEE_tx_sqe", IEEE_T_SQE }, + { "IEEE_tx_fdxfc", IEEE_T_FDXFC }, + { "IEEE_tx_octets_ok", IEEE_T_OCTETS_OK }, + + /* RMON RX */ + { "rx_packets", RMON_R_PACKETS }, + { "rx_broadcast", RMON_R_BC_PKT }, + { "rx_multicast", RMON_R_MC_PKT }, + { "rx_crc_errors", RMON_R_CRC_ALIGN }, + { "rx_undersize", RMON_R_UNDERSIZE }, + { "rx_oversize", RMON_R_OVERSIZE }, + { "rx_fragment", RMON_R_FRAG }, + { "rx_jabber", RMON_R_JAB }, + { "rx_64byte", RMON_R_P64 }, + { "rx_65to127byte", RMON_R_P65TO127 }, + { "rx_128to255byte", RMON_R_P128TO255 }, + { "rx_256to511byte", RMON_R_P256TO511 }, + { "rx_512to1023byte", RMON_R_P512TO1023 }, + { "rx_1024to2047byte", RMON_R_P1024TO2047 }, + { "rx_GTE2048byte", RMON_R_P_GTE2048 }, + { "rx_octets", RMON_R_OCTETS }, + + /* IEEE RX */ + { "IEEE_rx_drop", IEEE_R_DROP }, + { "IEEE_rx_frame_ok", IEEE_R_FRAME_OK }, + { "IEEE_rx_crc", IEEE_R_CRC }, + { "IEEE_rx_align", IEEE_R_ALIGN }, + { "IEEE_rx_macerr", IEEE_R_MACERR }, + { "IEEE_rx_fdxfc", IEEE_R_FDXFC }, + { "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK }, +}; + +static void fec_enet_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct fec_enet_private *fep = netdev_priv(dev); + int i; + + for (i = 0; i < ARRAY_SIZE(fec_stats); i++) + data[i] = readl(fep->hwp + fec_stats[i].offset); +} + +static void fec_enet_get_strings(struct net_device *netdev, + u32 stringset, u8 *data) +{ + int i; + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < ARRAY_SIZE(fec_stats); i++) + memcpy(data + i * ETH_GSTRING_LEN, + fec_stats[i].name, ETH_GSTRING_LEN); + break; + } +} + +static int fec_enet_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(fec_stats); + default: + return -EOPNOTSUPP; + } +} #endif /* !defined(CONFIG_M5272) */ +static int fec_enet_nway_reset(struct net_device *dev) +{ + struct fec_enet_private *fep = netdev_priv(dev); + struct phy_device *phydev = fep->phy_dev; + + if (!phydev) + return -ENODEV; + + return genphy_restart_aneg(phydev); +} + static const struct ethtool_ops fec_enet_ethtool_ops = { #if !defined(CONFIG_M5272) .get_pauseparam = fec_enet_get_pauseparam, @@ -1456,6 +1607,12 @@ static const struct ethtool_ops fec_enet_ethtool_ops = { .get_drvinfo = fec_enet_get_drvinfo, .get_link = ethtool_op_get_link, .get_ts_info = fec_enet_get_ts_info, + .nway_reset = fec_enet_nway_reset, +#ifndef CONFIG_M5272 + .get_ethtool_stats = fec_enet_get_ethtool_stats, + .get_strings = fec_enet_get_strings, + .get_sset_count = fec_enet_get_sset_count, +#endif }; static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) @@ -1803,6 +1960,12 @@ static int fec_enet_init(struct net_device *ndev) writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK); netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, FEC_NAPI_WEIGHT); + if (id_entry->driver_data & FEC_QUIRK_HAS_VLAN) { + /* enable hw VLAN support */ + ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; + ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; + } + if (id_entry->driver_data & FEC_QUIRK_HAS_CSUM) { /* enable hw accelerator */ ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM @@ -1865,8 +2028,6 @@ fec_probe(struct platform_device *pdev) struct resource *r; const struct of_device_id *of_id; static int dev_id; - struct pinctrl *pinctrl; - struct regulator *reg_phy; of_id = of_match_device(fec_dt_ids, &pdev->dev); if (of_id) @@ -1893,17 +2054,17 @@ fec_probe(struct platform_device *pdev) fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG; #endif - fep->hwp = devm_request_and_ioremap(&pdev->dev, r); + fep->hwp = devm_ioremap_resource(&pdev->dev, r); + if (IS_ERR(fep->hwp)) { + ret = PTR_ERR(fep->hwp); + goto failed_ioremap; + } + fep->pdev = pdev; fep->dev_id = dev_id++; fep->bufdesc_ex = 0; - if (!fep->hwp) { - ret = -ENOMEM; - goto failed_ioremap; - } - platform_set_drvdata(pdev, ndev); ret = of_get_phy_mode(pdev->dev.of_node); @@ -1917,12 +2078,6 @@ fec_probe(struct platform_device *pdev) fep->phy_interface = ret; } - pinctrl = devm_pinctrl_get_select_default(&pdev->dev); - if (IS_ERR(pinctrl)) { - ret = PTR_ERR(pinctrl); - goto failed_pin; - } - fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); if (IS_ERR(fep->clk_ipg)) { ret = PTR_ERR(fep->clk_ipg); @@ -1953,20 +2108,22 @@ fec_probe(struct platform_device *pdev) clk_prepare_enable(fep->clk_enet_out); clk_prepare_enable(fep->clk_ptp); - reg_phy = devm_regulator_get(&pdev->dev, "phy"); - if (!IS_ERR(reg_phy)) { - ret = regulator_enable(reg_phy); + fep->reg_phy = devm_regulator_get(&pdev->dev, "phy"); + if (!IS_ERR(fep->reg_phy)) { + ret = regulator_enable(fep->reg_phy); if (ret) { dev_err(&pdev->dev, "Failed to enable phy regulator: %d\n", ret); goto failed_regulator; } + } else { + fep->reg_phy = NULL; } fec_reset_phy(pdev); if (fep->bufdesc_ex) - fec_ptp_init(ndev, pdev); + fec_ptp_init(pdev); ret = fec_enet_init(ndev); if (ret) @@ -2010,19 +2167,20 @@ fec_probe(struct platform_device *pdev) failed_register: fec_enet_mii_remove(fep); failed_mii_init: -failed_init: +failed_irq: for (i = 0; i < FEC_IRQ_NUM; i++) { irq = platform_get_irq(pdev, i); if (irq > 0) free_irq(irq, ndev); } -failed_irq: +failed_init: + if (fep->reg_phy) + regulator_disable(fep->reg_phy); failed_regulator: clk_disable_unprepare(fep->clk_ahb); clk_disable_unprepare(fep->clk_ipg); clk_disable_unprepare(fep->clk_enet_out); clk_disable_unprepare(fep->clk_ptp); -failed_pin: failed_clk: failed_ioremap: free_netdev(ndev); @@ -2041,21 +2199,21 @@ fec_drv_remove(struct platform_device *pdev) unregister_netdev(ndev); fec_enet_mii_remove(fep); del_timer_sync(&fep->time_keep); + for (i = 0; i < FEC_IRQ_NUM; i++) { + int irq = platform_get_irq(pdev, i); + if (irq > 0) + free_irq(irq, ndev); + } + if (fep->reg_phy) + regulator_disable(fep->reg_phy); clk_disable_unprepare(fep->clk_ptp); if (fep->ptp_clock) ptp_clock_unregister(fep->ptp_clock); clk_disable_unprepare(fep->clk_enet_out); clk_disable_unprepare(fep->clk_ahb); clk_disable_unprepare(fep->clk_ipg); - for (i = 0; i < FEC_IRQ_NUM; i++) { - int irq = platform_get_irq(pdev, i); - if (irq > 0) - free_irq(irq, ndev); - } free_netdev(ndev); - platform_set_drvdata(pdev, NULL); - return 0; } @@ -2074,6 +2232,9 @@ fec_suspend(struct device *dev) clk_disable_unprepare(fep->clk_ahb); clk_disable_unprepare(fep->clk_ipg); + if (fep->reg_phy) + regulator_disable(fep->reg_phy); + return 0; } @@ -2082,6 +2243,13 @@ fec_resume(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct fec_enet_private *fep = netdev_priv(ndev); + int ret; + + if (fep->reg_phy) { + ret = regulator_enable(fep->reg_phy); + if (ret) + return ret; + } clk_prepare_enable(fep->clk_enet_out); clk_prepare_enable(fep->clk_ahb); diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c index 9bc15e2365bb..9947765e90c5 100644 --- a/drivers/net/ethernet/freescale/fec_mpc52xx.c +++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c @@ -981,7 +981,7 @@ static int mpc52xx_fec_probe(struct platform_device *op) goto err_node; /* We're done ! */ - dev_set_drvdata(&op->dev, ndev); + platform_set_drvdata(op, ndev); netdev_info(ndev, "%s MAC %pM\n", op->dev.of_node->full_name, ndev->dev_addr); @@ -1010,7 +1010,7 @@ mpc52xx_fec_remove(struct platform_device *op) struct net_device *ndev; struct mpc52xx_fec_priv *priv; - ndev = dev_get_drvdata(&op->dev); + ndev = platform_get_drvdata(op); priv = netdev_priv(ndev); unregister_netdev(ndev); @@ -1030,14 +1030,13 @@ mpc52xx_fec_remove(struct platform_device *op) free_netdev(ndev); - dev_set_drvdata(&op->dev, NULL); return 0; } #ifdef CONFIG_PM static int mpc52xx_fec_of_suspend(struct platform_device *op, pm_message_t state) { - struct net_device *dev = dev_get_drvdata(&op->dev); + struct net_device *dev = platform_get_drvdata(op); if (netif_running(dev)) mpc52xx_fec_close(dev); @@ -1047,7 +1046,7 @@ static int mpc52xx_fec_of_suspend(struct platform_device *op, pm_message_t state static int mpc52xx_fec_of_resume(struct platform_device *op) { - struct net_device *dev = dev_get_drvdata(&op->dev); + struct net_device *dev = platform_get_drvdata(op); mpc52xx_fec_hw_init(dev); mpc52xx_fec_reset_stats(dev); diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c index 25fc960cbf0e..5007e4f9fff9 100644 --- a/drivers/net/ethernet/freescale/fec_ptp.c +++ b/drivers/net/ethernet/freescale/fec_ptp.c @@ -347,8 +347,9 @@ static void fec_time_keep(unsigned long _data) * cyclecounter init routine and exits. */ -void fec_ptp_init(struct net_device *ndev, struct platform_device *pdev) +void fec_ptp_init(struct platform_device *pdev) { + struct net_device *ndev = platform_get_drvdata(pdev); struct fec_enet_private *fep = netdev_priv(ndev); fep->ptp_caps.owner = THIS_MODULE; diff --git a/drivers/net/ethernet/freescale/fs_enet/Kconfig b/drivers/net/ethernet/freescale/fs_enet/Kconfig index 268414d9f2cb..be92229f2c2a 100644 --- a/drivers/net/ethernet/freescale/fs_enet/Kconfig +++ b/drivers/net/ethernet/freescale/fs_enet/Kconfig @@ -1,7 +1,6 @@ config FS_ENET tristate "Freescale Ethernet Driver" depends on NET_VENDOR_FREESCALE && (CPM1 || CPM2 || PPC_MPC512x) - select NET_CORE select MII select PHYLIB diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c index edc120094c34..8de53a14a6f4 100644 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c @@ -1048,7 +1048,7 @@ static int fs_enet_probe(struct platform_device *ofdev) } SET_NETDEV_DEV(ndev, &ofdev->dev); - dev_set_drvdata(&ofdev->dev, ndev); + platform_set_drvdata(ofdev, ndev); fep = netdev_priv(ndev); fep->dev = &ofdev->dev; @@ -1106,7 +1106,6 @@ out_cleanup_data: fep->ops->cleanup_data(ndev); out_free_dev: free_netdev(ndev); - dev_set_drvdata(&ofdev->dev, NULL); out_put: of_node_put(fpi->phy_node); out_free_fpi: @@ -1116,7 +1115,7 @@ out_free_fpi: static int fs_enet_remove(struct platform_device *ofdev) { - struct net_device *ndev = dev_get_drvdata(&ofdev->dev); + struct net_device *ndev = platform_get_drvdata(ofdev); struct fs_enet_private *fep = netdev_priv(ndev); unregister_netdev(ndev); diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c index 2bafbd37c247..844ecfa84d17 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c +++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c @@ -179,7 +179,7 @@ static int fs_enet_mdio_probe(struct platform_device *ofdev) } new_bus->parent = &ofdev->dev; - dev_set_drvdata(&ofdev->dev, new_bus); + platform_set_drvdata(ofdev, new_bus); ret = of_mdiobus_register(new_bus, ofdev->dev.of_node); if (ret) @@ -188,7 +188,6 @@ static int fs_enet_mdio_probe(struct platform_device *ofdev) return 0; out_free_irqs: - dev_set_drvdata(&ofdev->dev, NULL); kfree(new_bus->irq); out_unmap_regs: iounmap(bitbang->dir); @@ -202,11 +201,10 @@ out: static int fs_enet_mdio_remove(struct platform_device *ofdev) { - struct mii_bus *bus = dev_get_drvdata(&ofdev->dev); + struct mii_bus *bus = platform_get_drvdata(ofdev); struct bb_info *bitbang = bus->priv; mdiobus_unregister(bus); - dev_set_drvdata(&ofdev->dev, NULL); kfree(bus->irq); free_mdio_bitbang(bus); iounmap(bitbang->dir); diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c index 18e8ef203736..2f1c46a12f05 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c +++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c @@ -180,7 +180,7 @@ static int fs_enet_mdio_probe(struct platform_device *ofdev) } new_bus->parent = &ofdev->dev; - dev_set_drvdata(&ofdev->dev, new_bus); + platform_set_drvdata(ofdev, new_bus); ret = of_mdiobus_register(new_bus, ofdev->dev.of_node); if (ret) @@ -189,7 +189,6 @@ static int fs_enet_mdio_probe(struct platform_device *ofdev) return 0; out_free_irqs: - dev_set_drvdata(&ofdev->dev, NULL); kfree(new_bus->irq); out_unmap_regs: iounmap(fec->fecp); @@ -204,11 +203,10 @@ out: static int fs_enet_mdio_remove(struct platform_device *ofdev) { - struct mii_bus *bus = dev_get_drvdata(&ofdev->dev); + struct mii_bus *bus = platform_get_drvdata(ofdev); struct fec_info *fec = bus->priv; mdiobus_unregister(bus); - dev_set_drvdata(&ofdev->dev, NULL); kfree(bus->irq); iounmap(fec->fecp); kfree(fec); diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 2375a01715a0..8d2db7b808b7 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -128,6 +128,7 @@ static void gfar_set_multi(struct net_device *dev); static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr); static void gfar_configure_serdes(struct net_device *dev); static int gfar_poll(struct napi_struct *napi, int budget); +static int gfar_poll_sq(struct napi_struct *napi, int budget); #ifdef CONFIG_NET_POLL_CONTROLLER static void gfar_netpoll(struct net_device *dev); #endif @@ -1000,7 +1001,7 @@ static int gfar_probe(struct platform_device *ofdev) spin_lock_init(&priv->bflock); INIT_WORK(&priv->reset_task, gfar_reset_task); - dev_set_drvdata(&ofdev->dev, priv); + platform_set_drvdata(ofdev, priv); regs = priv->gfargrp[0].regs; gfar_detect_errata(priv); @@ -1038,9 +1039,13 @@ static int gfar_probe(struct platform_device *ofdev) dev->ethtool_ops = &gfar_ethtool_ops; /* Register for napi ...We are registering NAPI for each grp */ - for (i = 0; i < priv->num_grps; i++) - netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, + if (priv->mode == SQ_SG_MODE) + netif_napi_add(dev, &priv->gfargrp[0].napi, gfar_poll_sq, GFAR_DEV_WEIGHT); + else + for (i = 0; i < priv->num_grps; i++) + netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, + GFAR_DEV_WEIGHT); if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | @@ -1240,15 +1245,13 @@ register_fail: static int gfar_remove(struct platform_device *ofdev) { - struct gfar_private *priv = dev_get_drvdata(&ofdev->dev); + struct gfar_private *priv = platform_get_drvdata(ofdev); if (priv->phy_node) of_node_put(priv->phy_node); if (priv->tbi_node) of_node_put(priv->tbi_node); - dev_set_drvdata(&ofdev->dev, NULL); - unregister_netdev(priv->ndev); unmap_group_regs(priv); free_gfar_dev(priv); @@ -2825,6 +2828,48 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) return howmany; } +static int gfar_poll_sq(struct napi_struct *napi, int budget) +{ + struct gfar_priv_grp *gfargrp = + container_of(napi, struct gfar_priv_grp, napi); + struct gfar __iomem *regs = gfargrp->regs; + struct gfar_priv_tx_q *tx_queue = gfargrp->priv->tx_queue[0]; + struct gfar_priv_rx_q *rx_queue = gfargrp->priv->rx_queue[0]; + int work_done = 0; + + /* Clear IEVENT, so interrupts aren't called again + * because of the packets that have already arrived + */ + gfar_write(®s->ievent, IEVENT_RTX_MASK); + + /* run Tx cleanup to completion */ + if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) + gfar_clean_tx_ring(tx_queue); + + work_done = gfar_clean_rx_ring(rx_queue, budget); + + if (work_done < budget) { + napi_complete(napi); + /* Clear the halt bit in RSTAT */ + gfar_write(®s->rstat, gfargrp->rstat); + + gfar_write(®s->imask, IMASK_DEFAULT); + + /* If we are coalescing interrupts, update the timer + * Otherwise, clear it + */ + gfar_write(®s->txic, 0); + if (likely(tx_queue->txcoalescing)) + gfar_write(®s->txic, tx_queue->txic); + + gfar_write(®s->rxic, 0); + if (unlikely(rx_queue->rxcoalescing)) + gfar_write(®s->rxic, rx_queue->rxic); + } + + return work_done; +} + static int gfar_poll(struct napi_struct *napi, int budget) { struct gfar_priv_grp *gfargrp = diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c index 083ea2b4d20a..098f133908ae 100644 --- a/drivers/net/ethernet/freescale/gianfar_ptp.c +++ b/drivers/net/ethernet/freescale/gianfar_ptp.c @@ -519,7 +519,7 @@ static int gianfar_ptp_probe(struct platform_device *dev) } gfar_phc_index = ptp_clock_index(etsects->clock); - dev_set_drvdata(&dev->dev, etsects); + platform_set_drvdata(dev, etsects); return 0; @@ -537,7 +537,7 @@ no_memory: static int gianfar_ptp_remove(struct platform_device *dev) { - struct etsects *etsects = dev_get_drvdata(&dev->dev); + struct etsects *etsects = platform_get_drvdata(dev); gfar_write(&etsects->regs->tmr_temask, 0); gfar_write(&etsects->regs->tmr_ctrl, 0); diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index e04c59818f60..3c43dac894ec 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -3564,7 +3564,7 @@ static void ucc_geth_timeout(struct net_device *dev) static int ucc_geth_suspend(struct platform_device *ofdev, pm_message_t state) { - struct net_device *ndev = dev_get_drvdata(&ofdev->dev); + struct net_device *ndev = platform_get_drvdata(ofdev); struct ucc_geth_private *ugeth = netdev_priv(ndev); if (!netif_running(ndev)) @@ -3592,7 +3592,7 @@ static int ucc_geth_suspend(struct platform_device *ofdev, pm_message_t state) static int ucc_geth_resume(struct platform_device *ofdev) { - struct net_device *ndev = dev_get_drvdata(&ofdev->dev); + struct net_device *ndev = platform_get_drvdata(ofdev); struct ucc_geth_private *ugeth = netdev_priv(ndev); int err; diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c index 418068b941b1..c1b6e7e31aac 100644 --- a/drivers/net/ethernet/freescale/xgmac_mdio.c +++ b/drivers/net/ethernet/freescale/xgmac_mdio.c @@ -227,7 +227,7 @@ static int xgmac_mdio_probe(struct platform_device *pdev) goto err_registration; } - dev_set_drvdata(&pdev->dev, bus); + platform_set_drvdata(pdev, bus); return 0; @@ -242,7 +242,7 @@ err_ioremap: static int xgmac_mdio_remove(struct platform_device *pdev) { - struct mii_bus *bus = dev_get_drvdata(&pdev->dev); + struct mii_bus *bus = platform_get_drvdata(pdev); mdiobus_unregister(bus); iounmap(bus->priv); diff --git a/drivers/net/ethernet/ibm/Kconfig b/drivers/net/ethernet/ibm/Kconfig index 6529d31595a7..563a1ac71dbc 100644 --- a/drivers/net/ethernet/ibm/Kconfig +++ b/drivers/net/ethernet/ibm/Kconfig @@ -5,8 +5,7 @@ config NET_VENDOR_IBM bool "IBM devices" default y - depends on MCA || PPC_PSERIES || PPC_PSERIES || PPC_DCR || \ - (IBMEBUS && SPARSEMEM) + depends on PPC_PSERIES || PPC_DCR || (IBMEBUS && SPARSEMEM) ---help--- If you have a network (Ethernet) card belonging to this class, say Y and read the Ethernet-HOWTO, available from diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index de2969cae262..35853b43d66e 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c @@ -3287,7 +3287,7 @@ static int ehea_probe_adapter(struct platform_device *dev) adapter->pd = EHEA_PD_ID; - dev_set_drvdata(&dev->dev, adapter); + platform_set_drvdata(dev, adapter); /* initialize adapter and ports */ @@ -3358,7 +3358,7 @@ out: static int ehea_remove(struct platform_device *dev) { - struct ehea_adapter *adapter = dev_get_drvdata(&dev->dev); + struct ehea_adapter *adapter = platform_get_drvdata(dev); int i; for (i = 0; i < EHEA_MAX_PORTS; i++) diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c index 610ed223d1db..856ea66c9223 100644 --- a/drivers/net/ethernet/ibm/emac/mal.c +++ b/drivers/net/ethernet/ibm/emac/mal.c @@ -696,7 +696,7 @@ static int mal_probe(struct platform_device *ofdev) /* Advertise this instance to the rest of the world */ wmb(); - dev_set_drvdata(&ofdev->dev, mal); + platform_set_drvdata(ofdev, mal); mal_dbg_register(mal); @@ -722,7 +722,7 @@ static int mal_probe(struct platform_device *ofdev) static int mal_remove(struct platform_device *ofdev) { - struct mal_instance *mal = dev_get_drvdata(&ofdev->dev); + struct mal_instance *mal = platform_get_drvdata(ofdev); MAL_DBG(mal, "remove" NL); @@ -735,8 +735,6 @@ static int mal_remove(struct platform_device *ofdev) "mal%d: commac list is not empty on remove!\n", mal->index); - dev_set_drvdata(&ofdev->dev, NULL); - free_irq(mal->serr_irq, mal); free_irq(mal->txde_irq, mal); free_irq(mal->txeob_irq, mal); diff --git a/drivers/net/ethernet/ibm/emac/rgmii.c b/drivers/net/ethernet/ibm/emac/rgmii.c index 39251765b55d..c47e23d6eeaa 100644 --- a/drivers/net/ethernet/ibm/emac/rgmii.c +++ b/drivers/net/ethernet/ibm/emac/rgmii.c @@ -95,7 +95,7 @@ static inline u32 rgmii_mode_mask(int mode, int input) int rgmii_attach(struct platform_device *ofdev, int input, int mode) { - struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev); + struct rgmii_instance *dev = platform_get_drvdata(ofdev); struct rgmii_regs __iomem *p = dev->base; RGMII_DBG(dev, "attach(%d)" NL, input); @@ -124,7 +124,7 @@ int rgmii_attach(struct platform_device *ofdev, int input, int mode) void rgmii_set_speed(struct platform_device *ofdev, int input, int speed) { - struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev); + struct rgmii_instance *dev = platform_get_drvdata(ofdev); struct rgmii_regs __iomem *p = dev->base; u32 ssr; @@ -146,7 +146,7 @@ void rgmii_set_speed(struct platform_device *ofdev, int input, int speed) void rgmii_get_mdio(struct platform_device *ofdev, int input) { - struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev); + struct rgmii_instance *dev = platform_get_drvdata(ofdev); struct rgmii_regs __iomem *p = dev->base; u32 fer; @@ -167,7 +167,7 @@ void rgmii_get_mdio(struct platform_device *ofdev, int input) void rgmii_put_mdio(struct platform_device *ofdev, int input) { - struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev); + struct rgmii_instance *dev = platform_get_drvdata(ofdev); struct rgmii_regs __iomem *p = dev->base; u32 fer; @@ -188,7 +188,7 @@ void rgmii_put_mdio(struct platform_device *ofdev, int input) void rgmii_detach(struct platform_device *ofdev, int input) { - struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev); + struct rgmii_instance *dev = platform_get_drvdata(ofdev); struct rgmii_regs __iomem *p; BUG_ON(!dev || dev->users == 0); @@ -214,7 +214,7 @@ int rgmii_get_regs_len(struct platform_device *ofdev) void *rgmii_dump_regs(struct platform_device *ofdev, void *buf) { - struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev); + struct rgmii_instance *dev = platform_get_drvdata(ofdev); struct emac_ethtool_regs_subhdr *hdr = buf; struct rgmii_regs *regs = (struct rgmii_regs *)(hdr + 1); @@ -279,7 +279,7 @@ static int rgmii_probe(struct platform_device *ofdev) (dev->flags & EMAC_RGMII_FLAG_HAS_MDIO) ? "" : "out"); wmb(); - dev_set_drvdata(&ofdev->dev, dev); + platform_set_drvdata(ofdev, dev); return 0; @@ -291,9 +291,7 @@ static int rgmii_probe(struct platform_device *ofdev) static int rgmii_remove(struct platform_device *ofdev) { - struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev); - - dev_set_drvdata(&ofdev->dev, NULL); + struct rgmii_instance *dev = platform_get_drvdata(ofdev); WARN_ON(dev->users != 0); diff --git a/drivers/net/ethernet/ibm/emac/tah.c b/drivers/net/ethernet/ibm/emac/tah.c index 795f1393e2b6..c231a4a32c4d 100644 --- a/drivers/net/ethernet/ibm/emac/tah.c +++ b/drivers/net/ethernet/ibm/emac/tah.c @@ -25,7 +25,7 @@ int tah_attach(struct platform_device *ofdev, int channel) { - struct tah_instance *dev = dev_get_drvdata(&ofdev->dev); + struct tah_instance *dev = platform_get_drvdata(ofdev); mutex_lock(&dev->lock); /* Reset has been done at probe() time... nothing else to do for now */ @@ -37,7 +37,7 @@ int tah_attach(struct platform_device *ofdev, int channel) void tah_detach(struct platform_device *ofdev, int channel) { - struct tah_instance *dev = dev_get_drvdata(&ofdev->dev); + struct tah_instance *dev = platform_get_drvdata(ofdev); mutex_lock(&dev->lock); --dev->users; @@ -46,7 +46,7 @@ void tah_detach(struct platform_device *ofdev, int channel) void tah_reset(struct platform_device *ofdev) { - struct tah_instance *dev = dev_get_drvdata(&ofdev->dev); + struct tah_instance *dev = platform_get_drvdata(ofdev); struct tah_regs __iomem *p = dev->base; int n; @@ -74,7 +74,7 @@ int tah_get_regs_len(struct platform_device *ofdev) void *tah_dump_regs(struct platform_device *ofdev, void *buf) { - struct tah_instance *dev = dev_get_drvdata(&ofdev->dev); + struct tah_instance *dev = platform_get_drvdata(ofdev); struct emac_ethtool_regs_subhdr *hdr = buf; struct tah_regs *regs = (struct tah_regs *)(hdr + 1); @@ -118,7 +118,7 @@ static int tah_probe(struct platform_device *ofdev) goto err_free; } - dev_set_drvdata(&ofdev->dev, dev); + platform_set_drvdata(ofdev, dev); /* Initialize TAH and enable IPv4 checksum verification, no TSO yet */ tah_reset(ofdev); @@ -137,9 +137,7 @@ static int tah_probe(struct platform_device *ofdev) static int tah_remove(struct platform_device *ofdev) { - struct tah_instance *dev = dev_get_drvdata(&ofdev->dev); - - dev_set_drvdata(&ofdev->dev, NULL); + struct tah_instance *dev = platform_get_drvdata(ofdev); WARN_ON(dev->users != 0); diff --git a/drivers/net/ethernet/ibm/emac/zmii.c b/drivers/net/ethernet/ibm/emac/zmii.c index f91202f42125..4cdf286f7ee3 100644 --- a/drivers/net/ethernet/ibm/emac/zmii.c +++ b/drivers/net/ethernet/ibm/emac/zmii.c @@ -84,7 +84,7 @@ static inline u32 zmii_mode_mask(int mode, int input) int zmii_attach(struct platform_device *ofdev, int input, int *mode) { - struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev); + struct zmii_instance *dev = platform_get_drvdata(ofdev); struct zmii_regs __iomem *p = dev->base; ZMII_DBG(dev, "init(%d, %d)" NL, input, *mode); @@ -150,7 +150,7 @@ int zmii_attach(struct platform_device *ofdev, int input, int *mode) void zmii_get_mdio(struct platform_device *ofdev, int input) { - struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev); + struct zmii_instance *dev = platform_get_drvdata(ofdev); u32 fer; ZMII_DBG2(dev, "get_mdio(%d)" NL, input); @@ -163,7 +163,7 @@ void zmii_get_mdio(struct platform_device *ofdev, int input) void zmii_put_mdio(struct platform_device *ofdev, int input) { - struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev); + struct zmii_instance *dev = platform_get_drvdata(ofdev); ZMII_DBG2(dev, "put_mdio(%d)" NL, input); mutex_unlock(&dev->lock); @@ -172,7 +172,7 @@ void zmii_put_mdio(struct platform_device *ofdev, int input) void zmii_set_speed(struct platform_device *ofdev, int input, int speed) { - struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev); + struct zmii_instance *dev = platform_get_drvdata(ofdev); u32 ssr; mutex_lock(&dev->lock); @@ -193,7 +193,7 @@ void zmii_set_speed(struct platform_device *ofdev, int input, int speed) void zmii_detach(struct platform_device *ofdev, int input) { - struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev); + struct zmii_instance *dev = platform_get_drvdata(ofdev); BUG_ON(!dev || dev->users == 0); @@ -218,7 +218,7 @@ int zmii_get_regs_len(struct platform_device *ofdev) void *zmii_dump_regs(struct platform_device *ofdev, void *buf) { - struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev); + struct zmii_instance *dev = platform_get_drvdata(ofdev); struct emac_ethtool_regs_subhdr *hdr = buf; struct zmii_regs *regs = (struct zmii_regs *)(hdr + 1); @@ -272,7 +272,7 @@ static int zmii_probe(struct platform_device *ofdev) printk(KERN_INFO "ZMII %s initialized\n", ofdev->dev.of_node->full_name); wmb(); - dev_set_drvdata(&ofdev->dev, dev); + platform_set_drvdata(ofdev, dev); return 0; @@ -284,9 +284,7 @@ static int zmii_probe(struct platform_device *ofdev) static int zmii_remove(struct platform_device *ofdev) { - struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev); - - dev_set_drvdata(&ofdev->dev, NULL); + struct zmii_instance *dev = platform_get_drvdata(ofdev); WARN_ON(dev->users != 0); diff --git a/drivers/net/ethernet/icplus/Kconfig b/drivers/net/ethernet/icplus/Kconfig index 5119ef18953b..14a66e9d2e26 100644 --- a/drivers/net/ethernet/icplus/Kconfig +++ b/drivers/net/ethernet/icplus/Kconfig @@ -5,7 +5,6 @@ config IP1000 tristate "IP1000 Gigabit Ethernet support" depends on PCI - select NET_CORE select MII ---help--- This driver supports IP1000 gigabit Ethernet cards. diff --git a/drivers/net/ethernet/icplus/ipg.c b/drivers/net/ethernet/icplus/ipg.c index 068d78151658..1fde90b96685 100644 --- a/drivers/net/ethernet/icplus/ipg.c +++ b/drivers/net/ethernet/icplus/ipg.c @@ -2298,15 +2298,4 @@ static struct pci_driver ipg_pci_driver = { .remove = ipg_remove, }; -static int __init ipg_init_module(void) -{ - return pci_register_driver(&ipg_pci_driver); -} - -static void __exit ipg_exit_module(void) -{ - pci_unregister_driver(&ipg_pci_driver); -} - -module_init(ipg_init_module); -module_exit(ipg_exit_module); +module_pci_driver(ipg_pci_driver); diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index 05f7264c51f7..f0e7ed20a750 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -20,7 +20,6 @@ if NET_VENDOR_INTEL config E100 tristate "Intel(R) PRO/100+ support" depends on PCI - select NET_CORE select MII ---help--- This driver supports Intel(R) PRO/100 family of adapters. diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index d2bea3f07c73..5115ae76a5d1 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -3069,7 +3069,7 @@ static int e100_resume(struct pci_dev *pdev) pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); /* ack any pending wake events, disable PME */ - pci_enable_wake(pdev, 0, 0); + pci_enable_wake(pdev, PCI_D0, 0); /* disable reverse auto-negotiation */ if (nic->phy == phy_82552_v) { @@ -3160,7 +3160,7 @@ static void e100_io_resume(struct pci_dev *pdev) struct nic *nic = netdev_priv(netdev); /* ack any pending wake events, disable PME */ - pci_enable_wake(pdev, 0, 0); + pci_enable_wake(pdev, PCI_D0, 0); netif_device_attach(netdev); if (netif_running(netdev)) { diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c index b71c8502a2b3..895450e9bb3c 100644 --- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c +++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c @@ -66,17 +66,17 @@ static s32 e1000_init_phy_params_80003es2lan(struct e1000_hw *hw) s32 ret_val; if (hw->phy.media_type != e1000_media_type_copper) { - phy->type = e1000_phy_none; + phy->type = e1000_phy_none; return 0; } else { phy->ops.power_up = e1000_power_up_phy_copper; phy->ops.power_down = e1000_power_down_phy_copper_80003es2lan; } - phy->addr = 1; - phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; - phy->reset_delay_us = 100; - phy->type = e1000_phy_gg82563; + phy->addr = 1; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->reset_delay_us = 100; + phy->type = e1000_phy_gg82563; /* This can only be done after all function pointers are setup. */ ret_val = e1000e_get_phy_id(hw); @@ -98,19 +98,19 @@ static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw) u32 eecd = er32(EECD); u16 size; - nvm->opcode_bits = 8; - nvm->delay_usec = 1; + nvm->opcode_bits = 8; + nvm->delay_usec = 1; switch (nvm->override) { case e1000_nvm_override_spi_large: - nvm->page_size = 32; + nvm->page_size = 32; nvm->address_bits = 16; break; case e1000_nvm_override_spi_small: - nvm->page_size = 8; + nvm->page_size = 8; nvm->address_bits = 8; break; default: - nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; + nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8; break; } @@ -128,7 +128,7 @@ static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw) /* EEPROM access above 16k is unsupported */ if (size > 14) size = 14; - nvm->word_size = 1 << size; + nvm->word_size = 1 << size; return 0; } @@ -859,7 +859,7 @@ static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw) /* Transmit Arbitration Control 0 */ reg = er32(TARC(0)); - reg &= ~(0xF << 27); /* 30:27 */ + reg &= ~(0xF << 27); /* 30:27 */ if (hw->phy.media_type != e1000_media_type_copper) reg &= ~(1 << 20); ew32(TARC(0), reg); diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c index 7380442a3829..4c303e2a7cb3 100644 --- a/drivers/net/ethernet/intel/e1000e/82571.c +++ b/drivers/net/ethernet/intel/e1000e/82571.c @@ -77,24 +77,24 @@ static s32 e1000_init_phy_params_82571(struct e1000_hw *hw) return 0; } - phy->addr = 1; - phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; - phy->reset_delay_us = 100; + phy->addr = 1; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->reset_delay_us = 100; - phy->ops.power_up = e1000_power_up_phy_copper; - phy->ops.power_down = e1000_power_down_phy_copper_82571; + phy->ops.power_up = e1000_power_up_phy_copper; + phy->ops.power_down = e1000_power_down_phy_copper_82571; switch (hw->mac.type) { case e1000_82571: case e1000_82572: - phy->type = e1000_phy_igp_2; + phy->type = e1000_phy_igp_2; break; case e1000_82573: - phy->type = e1000_phy_m88; + phy->type = e1000_phy_m88; break; case e1000_82574: case e1000_82583: - phy->type = e1000_phy_bm; + phy->type = e1000_phy_bm; phy->ops.acquire = e1000_get_hw_semaphore_82574; phy->ops.release = e1000_put_hw_semaphore_82574; phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82574; @@ -193,7 +193,7 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw) /* EEPROM access above 16k is unsupported */ if (size > 14) size = 14; - nvm->word_size = 1 << size; + nvm->word_size = 1 << size; break; } @@ -339,7 +339,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_hw *hw) static s32 e1000_get_variants_82571(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; - static int global_quad_port_a; /* global port a indication */ + static int global_quad_port_a; /* global port a indication */ struct pci_dev *pdev = adapter->pdev; int is_port_b = er32(STATUS) & E1000_STATUS_FUNC_1; s32 rc; @@ -1003,8 +1003,6 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw) default: break; } - if (ret_val) - e_dbg("Cannot acquire MDIO ownership\n"); ctrl = er32(CTRL); @@ -1015,7 +1013,9 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw) switch (hw->mac.type) { case e1000_82574: case e1000_82583: - e1000_put_hw_semaphore_82574(hw); + /* Release mutex only if the hw semaphore is acquired */ + if (!ret_val) + e1000_put_hw_semaphore_82574(hw); break; default: break; @@ -1178,7 +1178,7 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw) /* Transmit Arbitration Control 0 */ reg = er32(TARC(0)); - reg &= ~(0xF << 27); /* 30:27 */ + reg &= ~(0xF << 27); /* 30:27 */ switch (hw->mac.type) { case e1000_82571: case e1000_82572: @@ -1390,7 +1390,7 @@ bool e1000_check_phy_82574(struct e1000_hw *hw) ret_val = e1e_rphy(hw, E1000_RECEIVE_ERROR_COUNTER, &receive_errors); if (ret_val) return false; - if (receive_errors == E1000_RECEIVE_ERROR_MAX) { + if (receive_errors == E1000_RECEIVE_ERROR_MAX) { ret_val = e1e_rphy(hw, E1000_BASE1000T_STATUS, &status_1kbt); if (ret_val) return false; diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index 7c8ca658d553..59c22bf18701 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -244,7 +244,7 @@ static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx) mac->autoneg = 1; adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; break; - case SPEED_1000 + DUPLEX_HALF: /* not supported */ + case SPEED_1000 + DUPLEX_HALF: /* not supported */ default: goto err_inval; } @@ -416,7 +416,7 @@ static void e1000_set_msglevel(struct net_device *netdev, u32 data) static int e1000_get_regs_len(struct net_device __always_unused *netdev) { -#define E1000_REGS_LEN 32 /* overestimate */ +#define E1000_REGS_LEN 32 /* overestimate */ return E1000_REGS_LEN * sizeof(u32); } @@ -433,22 +433,22 @@ static void e1000_get_regs(struct net_device *netdev, regs->version = (1 << 24) | (adapter->pdev->revision << 16) | adapter->pdev->device; - regs_buff[0] = er32(CTRL); - regs_buff[1] = er32(STATUS); + regs_buff[0] = er32(CTRL); + regs_buff[1] = er32(STATUS); - regs_buff[2] = er32(RCTL); - regs_buff[3] = er32(RDLEN(0)); - regs_buff[4] = er32(RDH(0)); - regs_buff[5] = er32(RDT(0)); - regs_buff[6] = er32(RDTR); + regs_buff[2] = er32(RCTL); + regs_buff[3] = er32(RDLEN(0)); + regs_buff[4] = er32(RDH(0)); + regs_buff[5] = er32(RDT(0)); + regs_buff[6] = er32(RDTR); - regs_buff[7] = er32(TCTL); - regs_buff[8] = er32(TDLEN(0)); - regs_buff[9] = er32(TDH(0)); + regs_buff[7] = er32(TCTL); + regs_buff[8] = er32(TDLEN(0)); + regs_buff[9] = er32(TDH(0)); regs_buff[10] = er32(TDT(0)); regs_buff[11] = er32(TIDV); - regs_buff[12] = adapter->hw.phy.type; /* PHY type (IGP=1, M88=0) */ + regs_buff[12] = adapter->hw.phy.type; /* PHY type (IGP=1, M88=0) */ /* ethtool doesn't use anything past this point, so all this * code is likely legacy junk for apps that may or may not exist @@ -1379,7 +1379,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) if (hw->phy.media_type == e1000_media_type_copper && hw->phy.type == e1000_phy_m88) { - ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ + ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ } else { /* Set the ILOS bit on the fiber Nic if half duplex link is * detected. @@ -1613,7 +1613,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter) ew32(TDT(0), k); e1e_flush(); msleep(200); - time = jiffies; /* set the start time for the receive */ + time = jiffies; /* set the start time for the receive */ good_cnt = 0; /* receive the sent packets */ do { @@ -1636,11 +1636,11 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter) */ } while ((good_cnt < 64) && !time_after(jiffies, time + 20)); if (good_cnt != 64) { - ret_val = 13; /* ret_val is the same as mis-compare */ + ret_val = 13; /* ret_val is the same as mis-compare */ break; } if (jiffies >= (time + 20)) { - ret_val = 14; /* error code for time out error */ + ret_val = 14; /* error code for time out error */ break; } } diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h index 84850f7a23e4..a6f903a9b773 100644 --- a/drivers/net/ethernet/intel/e1000e/hw.h +++ b/drivers/net/ethernet/intel/e1000e/hw.h @@ -402,13 +402,13 @@ struct e1000_phy_stats { struct e1000_host_mng_dhcp_cookie { u32 signature; - u8 status; - u8 reserved0; + u8 status; + u8 reserved0; u16 vlan_id; u32 reserved1; u16 reserved2; - u8 reserved3; - u8 checksum; + u8 reserved3; + u8 checksum; }; /* Host Interface "Rev 1" */ @@ -427,8 +427,8 @@ struct e1000_host_command_info { /* Host Interface "Rev 2" */ struct e1000_host_mng_command_header { - u8 command_id; - u8 checksum; + u8 command_id; + u8 checksum; u16 reserved1; u16 reserved2; u16 command_length; @@ -549,7 +549,7 @@ struct e1000_mac_info { u32 mta_shadow[MAX_MTA_REG]; u16 rar_entry_count; - u8 forced_speed_duplex; + u8 forced_speed_duplex; bool adaptive_ifs; bool has_fwsm; @@ -577,7 +577,7 @@ struct e1000_phy_info { u32 addr; u32 id; - u32 reset_delay_us; /* in usec */ + u32 reset_delay_us; /* in usec */ u32 revision; enum e1000_media_type media_type; @@ -636,11 +636,11 @@ struct e1000_dev_spec_82571 { }; struct e1000_dev_spec_80003es2lan { - bool mdic_wa_enable; + bool mdic_wa_enable; }; struct e1000_shadow_ram { - u16 value; + u16 value; bool modified; }; @@ -660,17 +660,17 @@ struct e1000_hw { void __iomem *hw_addr; void __iomem *flash_address; - struct e1000_mac_info mac; - struct e1000_fc_info fc; - struct e1000_phy_info phy; - struct e1000_nvm_info nvm; - struct e1000_bus_info bus; + struct e1000_mac_info mac; + struct e1000_fc_info fc; + struct e1000_phy_info phy; + struct e1000_nvm_info nvm; + struct e1000_bus_info bus; struct e1000_host_mng_dhcp_cookie mng_cookie; union { - struct e1000_dev_spec_82571 e82571; + struct e1000_dev_spec_82571 e82571; struct e1000_dev_spec_80003es2lan e80003es2lan; - struct e1000_dev_spec_ich8lan ich8lan; + struct e1000_dev_spec_ich8lan ich8lan; } dev_spec; }; diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index ad9d8f2dd868..9dde390f7e71 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -101,12 +101,12 @@ union ich8_hws_flash_regacc { /* ICH Flash Protected Region */ union ich8_flash_protected_range { struct ich8_pr { - u32 base:13; /* 0:12 Protected Range Base */ - u32 reserved1:2; /* 13:14 Reserved */ - u32 rpe:1; /* 15 Read Protection Enable */ - u32 limit:13; /* 16:28 Protected Range Limit */ - u32 reserved2:2; /* 29:30 Reserved */ - u32 wpe:1; /* 31 Write Protection Enable */ + u32 base:13; /* 0:12 Protected Range Base */ + u32 reserved1:2; /* 13:14 Reserved */ + u32 rpe:1; /* 15 Read Protection Enable */ + u32 limit:13; /* 16:28 Protected Range Limit */ + u32 reserved2:2; /* 29:30 Reserved */ + u32 wpe:1; /* 31 Write Protection Enable */ } range; u32 regval; }; @@ -362,21 +362,21 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) struct e1000_phy_info *phy = &hw->phy; s32 ret_val; - phy->addr = 1; - phy->reset_delay_us = 100; - - phy->ops.set_page = e1000_set_page_igp; - phy->ops.read_reg = e1000_read_phy_reg_hv; - phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked; - phy->ops.read_reg_page = e1000_read_phy_reg_page_hv; - phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan; - phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan; - phy->ops.write_reg = e1000_write_phy_reg_hv; - phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked; - phy->ops.write_reg_page = e1000_write_phy_reg_page_hv; - phy->ops.power_up = e1000_power_up_phy_copper; - phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; - phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->addr = 1; + phy->reset_delay_us = 100; + + phy->ops.set_page = e1000_set_page_igp; + phy->ops.read_reg = e1000_read_phy_reg_hv; + phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked; + phy->ops.read_reg_page = e1000_read_phy_reg_page_hv; + phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan; + phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan; + phy->ops.write_reg = e1000_write_phy_reg_hv; + phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked; + phy->ops.write_reg_page = e1000_write_phy_reg_page_hv; + phy->ops.power_up = e1000_power_up_phy_copper; + phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; phy->id = e1000_phy_unknown; @@ -445,11 +445,11 @@ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw) s32 ret_val; u16 i = 0; - phy->addr = 1; - phy->reset_delay_us = 100; + phy->addr = 1; + phy->reset_delay_us = 100; - phy->ops.power_up = e1000_power_up_phy_copper; - phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; + phy->ops.power_up = e1000_power_up_phy_copper; + phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; /* We may need to do this twice - once for IGP and if that fails, * we'll set BM func pointers and try again @@ -457,7 +457,7 @@ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw) ret_val = e1000e_determine_phy_address(hw); if (ret_val) { phy->ops.write_reg = e1000e_write_phy_reg_bm; - phy->ops.read_reg = e1000e_read_phy_reg_bm; + phy->ops.read_reg = e1000e_read_phy_reg_bm; ret_val = e1000e_determine_phy_address(hw); if (ret_val) { e_dbg("Cannot determine PHY addr. Erroring out\n"); @@ -560,7 +560,7 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw) /* Clear shadow ram */ for (i = 0; i < nvm->word_size; i++) { dev_spec->shadow_ram[i].modified = false; - dev_spec->shadow_ram[i].value = 0xFFFF; + dev_spec->shadow_ram[i].value = 0xFFFF; } return 0; @@ -1012,7 +1012,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) hw->dev_spec.ich8lan.eee_lp_ability = 0; if (!link) - return 0; /* No link detected */ + return 0; /* No link detected */ mac->get_link_status = false; @@ -2816,7 +2816,7 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, s32 ret_val = -E1000_ERR_NVM; u8 count = 0; - if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK) + if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK) return -E1000_ERR_NVM; flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + @@ -2939,7 +2939,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) * write to bank 0 etc. We also need to erase the segment that * is going to be written */ - ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); + ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); if (ret_val) { e_dbg("Could not detect valid bank, assuming bank 0\n"); bank = 0; @@ -4073,7 +4073,7 @@ void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw) { u32 reg; u16 data; - u8 retry = 0; + u8 retry = 0; if (hw->phy.type != e1000_phy_igp_3) return; diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index a27e3bcc3249..77f81cbb601a 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -1196,7 +1196,7 @@ static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring) while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && (count < tx_ring->count)) { bool cleaned = false; - rmb(); /* read buffer_info after eop_desc */ + rmb(); /* read buffer_info after eop_desc */ for (; !cleaned; count++) { tx_desc = E1000_TX_DESC(*tx_ring, i); buffer_info = &tx_ring->buffer_info[i]; @@ -1385,7 +1385,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_ring *rx_ring, int *work_done, skb_put(skb, l1); goto copydone; - } /* if */ + } /* if */ } for (j = 0; j < PS_PAGE_BUFFERS; j++) { @@ -1800,7 +1800,7 @@ static irqreturn_t e1000_intr(int __always_unused irq, void *data) u32 rctl, icr = er32(ICR); if (!icr || test_bit(__E1000_DOWN, &adapter->state)) - return IRQ_NONE; /* Not our interrupt */ + return IRQ_NONE; /* Not our interrupt */ /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is * not set, then the adapter didn't send an interrupt @@ -2487,7 +2487,7 @@ static unsigned int e1000_update_itr(u16 itr_setting, int packets, int bytes) else if ((packets < 5) && (bytes > 512)) retval = low_latency; break; - case low_latency: /* 50 usec aka 20000 ints/s */ + case low_latency: /* 50 usec aka 20000 ints/s */ if (bytes > 10000) { /* this if handles the TSO accounting */ if (bytes / packets > 8000) @@ -2502,7 +2502,7 @@ static unsigned int e1000_update_itr(u16 itr_setting, int packets, int bytes) retval = lowest_latency; } break; - case bulk_latency: /* 250 usec aka 4000 ints/s */ + case bulk_latency: /* 250 usec aka 4000 ints/s */ if (bytes > 25000) { if (packets > 35) retval = low_latency; @@ -2554,7 +2554,7 @@ static void e1000_set_itr(struct e1000_adapter *adapter) new_itr = 70000; break; case low_latency: - new_itr = 20000; /* aka hwitr = ~200 */ + new_itr = 20000; /* aka hwitr = ~200 */ break; case bulk_latency: new_itr = 4000; @@ -2673,7 +2673,7 @@ static int e1000e_poll(struct napi_struct *napi, int weight) } static int e1000_vlan_rx_add_vid(struct net_device *netdev, - __be16 proto, u16 vid) + __always_unused __be16 proto, u16 vid) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; @@ -2699,7 +2699,7 @@ static int e1000_vlan_rx_add_vid(struct net_device *netdev, } static int e1000_vlan_rx_kill_vid(struct net_device *netdev, - __be16 proto, u16 vid) + __always_unused __be16 proto, u16 vid) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; @@ -3104,13 +3104,13 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) /* UPE and MPE will be handled by normal PROMISC logic * in e1000e_set_rx_mode */ - rctl |= (E1000_RCTL_SBP | /* Receive bad packets */ - E1000_RCTL_BAM | /* RX All Bcast Pkts */ - E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ + rctl |= (E1000_RCTL_SBP | /* Receive bad packets */ + E1000_RCTL_BAM | /* RX All Bcast Pkts */ + E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ - rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */ - E1000_RCTL_DPF | /* Allow filtered pause */ - E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */ + rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */ + E1000_RCTL_DPF | /* Allow filtered pause */ + E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */ /* Do not mess with E1000_CTRL_VME, it affects transmit as well, * and that breaks VLANs. */ @@ -3799,7 +3799,7 @@ void e1000e_reset(struct e1000_adapter *adapter) hwm = min(((pba << 10) * 9 / 10), ((pba << 10) - adapter->max_frame_size)); - fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */ + fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */ fc->low_water = fc->high_water - 8; break; case e1000_pchlan: @@ -3808,10 +3808,10 @@ void e1000e_reset(struct e1000_adapter *adapter) */ if (adapter->netdev->mtu > ETH_DATA_LEN) { fc->high_water = 0x3500; - fc->low_water = 0x1500; + fc->low_water = 0x1500; } else { fc->high_water = 0x5000; - fc->low_water = 0x3000; + fc->low_water = 0x3000; } fc->refresh_time = 0x1000; break; @@ -4581,7 +4581,7 @@ static void e1000e_update_stats(struct e1000_adapter *adapter) adapter->stats.crcerrs += er32(CRCERRS); adapter->stats.gprc += er32(GPRC); adapter->stats.gorc += er32(GORCL); - er32(GORCH); /* Clear gorc */ + er32(GORCH); /* Clear gorc */ adapter->stats.bprc += er32(BPRC); adapter->stats.mprc += er32(MPRC); adapter->stats.roc += er32(ROC); @@ -4614,7 +4614,7 @@ static void e1000e_update_stats(struct e1000_adapter *adapter) adapter->stats.xofftxc += er32(XOFFTXC); adapter->stats.gptc += er32(GPTC); adapter->stats.gotc += er32(GOTCL); - er32(GOTCH); /* Clear gotc */ + er32(GOTCH); /* Clear gotc */ adapter->stats.rnbc += er32(RNBC); adapter->stats.ruc += er32(RUC); @@ -5106,13 +5106,13 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb) context_desc = E1000_CONTEXT_DESC(*tx_ring, i); buffer_info = &tx_ring->buffer_info[i]; - context_desc->lower_setup.ip_fields.ipcss = ipcss; - context_desc->lower_setup.ip_fields.ipcso = ipcso; - context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); + context_desc->lower_setup.ip_fields.ipcss = ipcss; + context_desc->lower_setup.ip_fields.ipcso = ipcso; + context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); context_desc->upper_setup.tcp_fields.tucss = tucss; context_desc->upper_setup.tcp_fields.tucso = tucso; context_desc->upper_setup.tcp_fields.tucse = 0; - context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); + context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; context_desc->cmd_and_length = cpu_to_le32(cmd_length); @@ -5363,7 +5363,7 @@ static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count) static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter, struct sk_buff *skb) { - struct e1000_hw *hw = &adapter->hw; + struct e1000_hw *hw = &adapter->hw; u16 length, offset; if (vlan_tx_tag_present(skb) && @@ -6259,7 +6259,7 @@ static void e1000_netpoll(struct net_device *netdev) e1000_intr_msi(adapter->pdev->irq, netdev); enable_irq(adapter->pdev->irq); break; - default: /* E1000E_INT_MODE_LEGACY */ + default: /* E1000E_INT_MODE_LEGACY */ disable_irq(adapter->pdev->irq); e1000_intr(adapter->pdev->irq, netdev); enable_irq(adapter->pdev->irq); @@ -6589,9 +6589,9 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T; /* construct the net_device struct */ - netdev->netdev_ops = &e1000e_netdev_ops; + netdev->netdev_ops = &e1000e_netdev_ops; e1000e_set_ethtool_ops(netdev); - netdev->watchdog_timeo = 5 * HZ; + netdev->watchdog_timeo = 5 * HZ; netif_napi_add(netdev, &adapter->napi, e1000e_poll, 64); strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); @@ -7034,7 +7034,6 @@ static void __exit e1000_exit_module(void) } module_exit(e1000_exit_module); - MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c index 44ddc0a0ee0e..d70a03906ac0 100644 --- a/drivers/net/ethernet/intel/e1000e/nvm.c +++ b/drivers/net/ethernet/intel/e1000e/nvm.c @@ -117,7 +117,6 @@ static u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count) u16 data; eecd = er32(EECD); - eecd &= ~(E1000_EECD_DO | E1000_EECD_DI); data = 0; diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c index 59c76a6815a0..da2be59505c0 100644 --- a/drivers/net/ethernet/intel/e1000e/phy.c +++ b/drivers/net/ethernet/intel/e1000e/phy.c @@ -1583,13 +1583,13 @@ s32 e1000e_check_downshift(struct e1000_hw *hw) case e1000_phy_gg82563: case e1000_phy_bm: case e1000_phy_82578: - offset = M88E1000_PHY_SPEC_STATUS; - mask = M88E1000_PSSR_DOWNSHIFT; + offset = M88E1000_PHY_SPEC_STATUS; + mask = M88E1000_PSSR_DOWNSHIFT; break; case e1000_phy_igp_2: case e1000_phy_igp_3: - offset = IGP01E1000_PHY_LINK_HEALTH; - mask = IGP01E1000_PLHR_SS_DOWNGRADE; + offset = IGP01E1000_PHY_LINK_HEALTH; + mask = IGP01E1000_PLHR_SS_DOWNGRADE; break; default: /* speed downshift not supported */ @@ -1653,14 +1653,14 @@ s32 e1000_check_polarity_igp(struct e1000_hw *hw) if ((data & IGP01E1000_PSSR_SPEED_MASK) == IGP01E1000_PSSR_SPEED_1000MBPS) { - offset = IGP01E1000_PHY_PCS_INIT_REG; - mask = IGP01E1000_PHY_POLARITY_MASK; + offset = IGP01E1000_PHY_PCS_INIT_REG; + mask = IGP01E1000_PHY_POLARITY_MASK; } else { /* This really only applies to 10Mbps since * there is no polarity for 100Mbps (always 0). */ - offset = IGP01E1000_PHY_PORT_STATUS; - mask = IGP01E1000_PSSR_POLARITY_REVERSED; + offset = IGP01E1000_PHY_PORT_STATUS; + mask = IGP01E1000_PSSR_POLARITY_REVERSED; } ret_val = e1e_rphy(hw, offset, &data); @@ -1900,7 +1900,7 @@ s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw) s32 e1000e_get_phy_info_m88(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; + s32 ret_val; u16 phy_data; bool link; @@ -2253,7 +2253,7 @@ enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id) case M88E1011_I_PHY_ID: phy_type = e1000_phy_m88; break; - case IGP01E1000_I_PHY_ID: /* IGP 1 & 2 share this */ + case IGP01E1000_I_PHY_ID: /* IGP 1 & 2 share this */ phy_type = e1000_phy_igp_2; break; case GG82563_E_PHY_ID: @@ -2317,7 +2317,7 @@ s32 e1000e_determine_phy_address(struct e1000_hw *hw) /* If phy_type is valid, break - we found our * PHY address */ - if (phy_type != e1000_phy_unknown) + if (phy_type != e1000_phy_unknown) return 0; usleep_range(1000, 2000); diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index ff6a17cb1362..f21a91a299a2 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -401,12 +401,82 @@ static s32 igb_init_mac_params_82575(struct e1000_hw *hw) return 0; } +/** + * igb_set_sfp_media_type_82575 - derives SFP module media type. + * @hw: pointer to the HW structure + * + * The media type is chosen based on SFP module. + * compatibility flags retrieved from SFP ID EEPROM. + **/ +static s32 igb_set_sfp_media_type_82575(struct e1000_hw *hw) +{ + s32 ret_val = E1000_ERR_CONFIG; + u32 ctrl_ext = 0; + struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; + struct e1000_sfp_flags *eth_flags = &dev_spec->eth_flags; + u8 tranceiver_type = 0; + s32 timeout = 3; + + /* Turn I2C interface ON and power on sfp cage */ + ctrl_ext = rd32(E1000_CTRL_EXT); + ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA; + wr32(E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_I2C_ENA); + + wrfl(); + + /* Read SFP module data */ + while (timeout) { + ret_val = igb_read_sfp_data_byte(hw, + E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_IDENTIFIER_OFFSET), + &tranceiver_type); + if (ret_val == 0) + break; + msleep(100); + timeout--; + } + if (ret_val != 0) + goto out; + + ret_val = igb_read_sfp_data_byte(hw, + E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_ETH_FLAGS_OFFSET), + (u8 *)eth_flags); + if (ret_val != 0) + goto out; + + /* Check if there is some SFP module plugged and powered */ + if ((tranceiver_type == E1000_SFF_IDENTIFIER_SFP) || + (tranceiver_type == E1000_SFF_IDENTIFIER_SFF)) { + dev_spec->module_plugged = true; + if (eth_flags->e1000_base_lx || eth_flags->e1000_base_sx) { + hw->phy.media_type = e1000_media_type_internal_serdes; + } else if (eth_flags->e100_base_fx) { + dev_spec->sgmii_active = true; + hw->phy.media_type = e1000_media_type_internal_serdes; + } else if (eth_flags->e1000_base_t) { + dev_spec->sgmii_active = true; + hw->phy.media_type = e1000_media_type_copper; + } else { + hw->phy.media_type = e1000_media_type_unknown; + hw_dbg("PHY module has not been recognized\n"); + goto out; + } + } else { + hw->phy.media_type = e1000_media_type_unknown; + } + ret_val = 0; +out: + /* Restore I2C interface setting */ + wr32(E1000_CTRL_EXT, ctrl_ext); + return ret_val; +} + static s32 igb_get_invariants_82575(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; struct e1000_dev_spec_82575 * dev_spec = &hw->dev_spec._82575; s32 ret_val; u32 ctrl_ext = 0; + u32 link_mode = 0; switch (hw->device_id) { case E1000_DEV_ID_82575EB_COPPER: @@ -470,16 +540,56 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) */ hw->phy.media_type = e1000_media_type_copper; dev_spec->sgmii_active = false; + dev_spec->module_plugged = false; ctrl_ext = rd32(E1000_CTRL_EXT); - switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) { - case E1000_CTRL_EXT_LINK_MODE_SGMII: - dev_spec->sgmii_active = true; - break; + + link_mode = ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK; + switch (link_mode) { case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: - case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES: hw->phy.media_type = e1000_media_type_internal_serdes; break; + case E1000_CTRL_EXT_LINK_MODE_SGMII: + /* Get phy control interface type set (MDIO vs. I2C)*/ + if (igb_sgmii_uses_mdio_82575(hw)) { + hw->phy.media_type = e1000_media_type_copper; + dev_spec->sgmii_active = true; + break; + } + /* fall through for I2C based SGMII */ + case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES: + /* read media type from SFP EEPROM */ + ret_val = igb_set_sfp_media_type_82575(hw); + if ((ret_val != 0) || + (hw->phy.media_type == e1000_media_type_unknown)) { + /* If media type was not identified then return media + * type defined by the CTRL_EXT settings. + */ + hw->phy.media_type = e1000_media_type_internal_serdes; + + if (link_mode == E1000_CTRL_EXT_LINK_MODE_SGMII) { + hw->phy.media_type = e1000_media_type_copper; + dev_spec->sgmii_active = true; + } + + break; + } + + /* do not change link mode for 100BaseFX */ + if (dev_spec->eth_flags.e100_base_fx) + break; + + /* change current link mode setting */ + ctrl_ext &= ~E1000_CTRL_EXT_LINK_MODE_MASK; + + if (hw->phy.media_type == e1000_media_type_copper) + ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_SGMII; + else + ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; + + wr32(E1000_CTRL_EXT, ctrl_ext); + + break; default: break; } diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h index 31a0f82cc650..aa201abb8ad2 100644 --- a/drivers/net/ethernet/intel/igb/e1000_defines.h +++ b/drivers/net/ethernet/intel/igb/e1000_defines.h @@ -61,20 +61,22 @@ /* Clear Interrupt timers after IMS clear */ /* packet buffer parity error detection enabled */ /* descriptor FIFO parity error detection enable */ -#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */ -#define E1000_I2CCMD_REG_ADDR_SHIFT 16 -#define E1000_I2CCMD_PHY_ADDR_SHIFT 24 -#define E1000_I2CCMD_OPCODE_READ 0x08000000 -#define E1000_I2CCMD_OPCODE_WRITE 0x00000000 -#define E1000_I2CCMD_READY 0x20000000 -#define E1000_I2CCMD_ERROR 0x80000000 -#define E1000_MAX_SGMII_PHY_REG_ADDR 255 -#define E1000_I2CCMD_PHY_TIMEOUT 200 -#define E1000_IVAR_VALID 0x80 -#define E1000_GPIE_NSICR 0x00000001 -#define E1000_GPIE_MSIX_MODE 0x00000010 -#define E1000_GPIE_EIAME 0x40000000 -#define E1000_GPIE_PBA 0x80000000 +#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */ +#define E1000_I2CCMD_REG_ADDR_SHIFT 16 +#define E1000_I2CCMD_PHY_ADDR_SHIFT 24 +#define E1000_I2CCMD_OPCODE_READ 0x08000000 +#define E1000_I2CCMD_OPCODE_WRITE 0x00000000 +#define E1000_I2CCMD_READY 0x20000000 +#define E1000_I2CCMD_ERROR 0x80000000 +#define E1000_I2CCMD_SFP_DATA_ADDR(a) (0x0000 + (a)) +#define E1000_I2CCMD_SFP_DIAG_ADDR(a) (0x0100 + (a)) +#define E1000_MAX_SGMII_PHY_REG_ADDR 255 +#define E1000_I2CCMD_PHY_TIMEOUT 200 +#define E1000_IVAR_VALID 0x80 +#define E1000_GPIE_NSICR 0x00000001 +#define E1000_GPIE_MSIX_MODE 0x00000010 +#define E1000_GPIE_EIAME 0x40000000 +#define E1000_GPIE_PBA 0x80000000 /* Receive Descriptor bit definitions */ #define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ @@ -270,8 +272,10 @@ #define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX /* LED Control */ -#define E1000_LEDCTL_LED0_MODE_SHIFT 0 -#define E1000_LEDCTL_LED0_BLINK 0x00000080 +#define E1000_LEDCTL_LED0_MODE_SHIFT 0 +#define E1000_LEDCTL_LED0_BLINK 0x00000080 +#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F +#define E1000_LEDCTL_LED0_IVRT 0x00000040 #define E1000_LEDCTL_MODE_LED_ON 0xE #define E1000_LEDCTL_MODE_LED_OFF 0xF diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h index 488abb24a54f..94d7866b9c20 100644 --- a/drivers/net/ethernet/intel/igb/e1000_hw.h +++ b/drivers/net/ethernet/intel/igb/e1000_hw.h @@ -528,6 +528,8 @@ struct e1000_dev_spec_82575 { bool global_device_reset; bool eee_disable; bool clear_semaphore_once; + struct e1000_sfp_flags eth_flags; + bool module_plugged; }; struct e1000_hw { diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h index bfc08e05c907..5caa332e7556 100644 --- a/drivers/net/ethernet/intel/igb/e1000_i210.h +++ b/drivers/net/ethernet/intel/igb/e1000_i210.h @@ -82,11 +82,11 @@ enum E1000_INVM_STRUCTURE_TYPE { #define E1000_INVM_MAJOR_SHIFT 4 #define ID_LED_DEFAULT_I210 ((ID_LED_OFF1_ON2 << 8) | \ - (ID_LED_OFF1_OFF2 << 4) | \ - (ID_LED_DEF1_DEF2)) + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_OFF1_OFF2)) #define ID_LED_DEFAULT_I210_SERDES ((ID_LED_DEF1_DEF2 << 8) | \ (ID_LED_DEF1_DEF2 << 4) | \ - (ID_LED_DEF1_DEF2)) + (ID_LED_OFF1_ON2)) /* NVM offset defaults for i211 device */ #define NVM_INIT_CTRL_2_DEFAULT_I211 0X7243 diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c index 2559d70a2321..bab556a47fcc 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mac.c +++ b/drivers/net/ethernet/intel/igb/e1000_mac.c @@ -1332,7 +1332,13 @@ s32 igb_id_led_init(struct e1000_hw *hw) u16 data, i, temp; const u16 led_mask = 0x0F; - ret_val = igb_valid_led_default(hw, &data); + /* i210 and i211 devices have different LED mechanism */ + if ((hw->mac.type == e1000_i210) || + (hw->mac.type == e1000_i211)) + ret_val = igb_valid_led_default_i210(hw, &data); + else + ret_val = igb_valid_led_default(hw, &data); + if (ret_val) goto out; @@ -1406,15 +1412,34 @@ s32 igb_blink_led(struct e1000_hw *hw) u32 ledctl_blink = 0; u32 i; - /* set the blink bit for each LED that's "on" (0x0E) - * in ledctl_mode2 - */ - ledctl_blink = hw->mac.ledctl_mode2; - for (i = 0; i < 4; i++) - if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) == - E1000_LEDCTL_MODE_LED_ON) - ledctl_blink |= (E1000_LEDCTL_LED0_BLINK << - (i * 8)); + if (hw->phy.media_type == e1000_media_type_fiber) { + /* always blink LED0 for PCI-E fiber */ + ledctl_blink = E1000_LEDCTL_LED0_BLINK | + (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT); + } else { + /* Set the blink bit for each LED that's "on" (0x0E) + * (or "off" if inverted) in ledctl_mode2. The blink + * logic in hardware only works when mode is set to "on" + * so it must be changed accordingly when the mode is + * "off" and inverted. + */ + ledctl_blink = hw->mac.ledctl_mode2; + for (i = 0; i < 32; i += 8) { + u32 mode = (hw->mac.ledctl_mode2 >> i) & + E1000_LEDCTL_LED0_MODE_MASK; + u32 led_default = hw->mac.ledctl_default >> i; + + if ((!(led_default & E1000_LEDCTL_LED0_IVRT) && + (mode == E1000_LEDCTL_MODE_LED_ON)) || + ((led_default & E1000_LEDCTL_LED0_IVRT) && + (mode == E1000_LEDCTL_MODE_LED_OFF))) { + ledctl_blink &= + ~(E1000_LEDCTL_LED0_MODE_MASK << i); + ledctl_blink |= (E1000_LEDCTL_LED0_BLINK | + E1000_LEDCTL_MODE_LED_ON) << i; + } + } + } wr32(E1000_LEDCTL, ledctl_blink); diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c index 9979ebcf2a0c..60461946f98c 100644 --- a/drivers/net/ethernet/intel/igb/e1000_phy.c +++ b/drivers/net/ethernet/intel/igb/e1000_phy.c @@ -341,6 +341,130 @@ s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data) } /** + * igb_read_sfp_data_byte - Reads SFP module data. + * @hw: pointer to the HW structure + * @offset: byte location offset to be read + * @data: read data buffer pointer + * + * Reads one byte from SFP module data stored + * in SFP resided EEPROM memory or SFP diagnostic area. + * Function should be called with + * E1000_I2CCMD_SFP_DATA_ADDR(<byte offset>) for SFP module database access + * E1000_I2CCMD_SFP_DIAG_ADDR(<byte offset>) for SFP diagnostics parameters + * access + **/ +s32 igb_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data) +{ + u32 i = 0; + u32 i2ccmd = 0; + u32 data_local = 0; + + if (offset > E1000_I2CCMD_SFP_DIAG_ADDR(255)) { + hw_dbg("I2CCMD command address exceeds upper limit\n"); + return -E1000_ERR_PHY; + } + + /* Set up Op-code, EEPROM Address,in the I2CCMD + * register. The MAC will take care of interfacing with the + * EEPROM to retrieve the desired data. + */ + i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | + E1000_I2CCMD_OPCODE_READ); + + wr32(E1000_I2CCMD, i2ccmd); + + /* Poll the ready bit to see if the I2C read completed */ + for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { + udelay(50); + data_local = rd32(E1000_I2CCMD); + if (data_local & E1000_I2CCMD_READY) + break; + } + if (!(data_local & E1000_I2CCMD_READY)) { + hw_dbg("I2CCMD Read did not complete\n"); + return -E1000_ERR_PHY; + } + if (data_local & E1000_I2CCMD_ERROR) { + hw_dbg("I2CCMD Error bit set\n"); + return -E1000_ERR_PHY; + } + *data = (u8) data_local & 0xFF; + + return 0; +} + +/** + * e1000_write_sfp_data_byte - Writes SFP module data. + * @hw: pointer to the HW structure + * @offset: byte location offset to write to + * @data: data to write + * + * Writes one byte to SFP module data stored + * in SFP resided EEPROM memory or SFP diagnostic area. + * Function should be called with + * E1000_I2CCMD_SFP_DATA_ADDR(<byte offset>) for SFP module database access + * E1000_I2CCMD_SFP_DIAG_ADDR(<byte offset>) for SFP diagnostics parameters + * access + **/ +s32 e1000_write_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 data) +{ + u32 i = 0; + u32 i2ccmd = 0; + u32 data_local = 0; + + if (offset > E1000_I2CCMD_SFP_DIAG_ADDR(255)) { + hw_dbg("I2CCMD command address exceeds upper limit\n"); + return -E1000_ERR_PHY; + } + /* The programming interface is 16 bits wide + * so we need to read the whole word first + * then update appropriate byte lane and write + * the updated word back. + */ + /* Set up Op-code, EEPROM Address,in the I2CCMD + * register. The MAC will take care of interfacing + * with an EEPROM to write the data given. + */ + i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | + E1000_I2CCMD_OPCODE_READ); + /* Set a command to read single word */ + wr32(E1000_I2CCMD, i2ccmd); + for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { + udelay(50); + /* Poll the ready bit to see if lastly + * launched I2C operation completed + */ + i2ccmd = rd32(E1000_I2CCMD); + if (i2ccmd & E1000_I2CCMD_READY) { + /* Check if this is READ or WRITE phase */ + if ((i2ccmd & E1000_I2CCMD_OPCODE_READ) == + E1000_I2CCMD_OPCODE_READ) { + /* Write the selected byte + * lane and update whole word + */ + data_local = i2ccmd & 0xFF00; + data_local |= data; + i2ccmd = ((offset << + E1000_I2CCMD_REG_ADDR_SHIFT) | + E1000_I2CCMD_OPCODE_WRITE | data_local); + wr32(E1000_I2CCMD, i2ccmd); + } else { + break; + } + } + } + if (!(i2ccmd & E1000_I2CCMD_READY)) { + hw_dbg("I2CCMD Write did not complete\n"); + return -E1000_ERR_PHY; + } + if (i2ccmd & E1000_I2CCMD_ERROR) { + hw_dbg("I2CCMD Error bit set\n"); + return -E1000_ERR_PHY; + } + return 0; +} + +/** * igb_read_phy_reg_igp - Read igp PHY register * @hw: pointer to the HW structure * @offset: register offset to be read diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h index 784fd1c40989..6a0873f2095a 100644 --- a/drivers/net/ethernet/intel/igb/e1000_phy.h +++ b/drivers/net/ethernet/intel/igb/e1000_phy.h @@ -69,6 +69,8 @@ s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data); s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data); s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data); s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data); +s32 igb_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data); +s32 e1000_write_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 data); s32 igb_copper_link_setup_82580(struct e1000_hw *hw); s32 igb_get_phy_info_82580(struct e1000_hw *hw); s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw); @@ -157,4 +159,22 @@ s32 igb_check_polarity_m88(struct e1000_hw *hw); #define GS40G_CS_POWER_DOWN 0x0002 #define GS40G_LINE_LB 0x4000 +/* SFP modules ID memory locations */ +#define E1000_SFF_IDENTIFIER_OFFSET 0x00 +#define E1000_SFF_IDENTIFIER_SFF 0x02 +#define E1000_SFF_IDENTIFIER_SFP 0x03 + +#define E1000_SFF_ETH_FLAGS_OFFSET 0x06 +/* Flags for SFP modules compatible with ETH up to 1Gb */ +struct e1000_sfp_flags { + u8 e1000_base_sx:1; + u8 e1000_base_lx:1; + u8 e1000_base_cx:1; + u8 e1000_base_t:1; + u8 e100_base_lx:1; + u8 e100_base_fx:1; + u8 e10_base_bx10:1; + u8 e10_base_px:1; +}; + #endif diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index 9d6c075e232d..15ea8dc9dad3 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -322,11 +322,6 @@ static inline int igb_desc_unused(struct igb_ring *ring) return ring->count + ring->next_to_clean - ring->next_to_use - 1; } -struct igb_i2c_client_list { - struct i2c_client *client; - struct igb_i2c_client_list *next; -}; - #ifdef CONFIG_IGB_HWMON #define IGB_HWMON_TYPE_LOC 0 @@ -514,13 +509,18 @@ extern void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, extern void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va, struct sk_buff *skb); -static inline void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector, +static inline void igb_ptp_rx_hwtstamp(struct igb_ring *rx_ring, union e1000_adv_rx_desc *rx_desc, struct sk_buff *skb) { if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) && !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) - igb_ptp_rx_rgtstamp(q_vector, skb); + igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb); + + /* Update the last_rx_timestamp timer in order to enable watchdog check + * for error case of latched timestamp on a dropped packet. + */ + rx_ring->last_rx_timestamp = jiffies; } extern int igb_ptp_hwtstamp_ioctl(struct net_device *netdev, diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 7876240fa74e..85fe7b52f435 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -142,6 +142,8 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; + struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; + struct e1000_sfp_flags *eth_flags = &dev_spec->eth_flags; u32 status; if (hw->phy.media_type == e1000_media_type_copper) { @@ -162,49 +164,26 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) ecmd->advertising |= hw->phy.autoneg_advertised; } - if (hw->mac.autoneg != 1) - ecmd->advertising &= ~(ADVERTISED_Pause | - ADVERTISED_Asym_Pause); - - if (hw->fc.requested_mode == e1000_fc_full) - ecmd->advertising |= ADVERTISED_Pause; - else if (hw->fc.requested_mode == e1000_fc_rx_pause) - ecmd->advertising |= (ADVERTISED_Pause | - ADVERTISED_Asym_Pause); - else if (hw->fc.requested_mode == e1000_fc_tx_pause) - ecmd->advertising |= ADVERTISED_Asym_Pause; - else - ecmd->advertising &= ~(ADVERTISED_Pause | - ADVERTISED_Asym_Pause); - ecmd->port = PORT_TP; ecmd->phy_address = hw->phy.addr; ecmd->transceiver = XCVR_INTERNAL; } else { - ecmd->supported = (SUPPORTED_1000baseT_Full | - SUPPORTED_100baseT_Full | - SUPPORTED_FIBRE | + ecmd->supported = (SUPPORTED_FIBRE | SUPPORTED_Autoneg | SUPPORTED_Pause); - if (hw->mac.type == e1000_i354) - ecmd->supported |= SUPPORTED_2500baseX_Full; - ecmd->advertising = ADVERTISED_FIBRE; - - switch (adapter->link_speed) { - case SPEED_2500: - ecmd->advertising = ADVERTISED_2500baseX_Full; - break; - case SPEED_1000: - ecmd->advertising = ADVERTISED_1000baseT_Full; - break; - case SPEED_100: - ecmd->advertising = ADVERTISED_100baseT_Full; - break; - default: - break; + if (hw->mac.type == e1000_i354) { + ecmd->supported |= SUPPORTED_2500baseX_Full; + ecmd->advertising |= ADVERTISED_2500baseX_Full; + } + if ((eth_flags->e1000_base_lx) || (eth_flags->e1000_base_sx)) { + ecmd->supported |= SUPPORTED_1000baseT_Full; + ecmd->advertising |= ADVERTISED_1000baseT_Full; + } + if (eth_flags->e100_base_fx) { + ecmd->supported |= SUPPORTED_100baseT_Full; + ecmd->advertising |= ADVERTISED_100baseT_Full; } - if (hw->mac.autoneg == 1) ecmd->advertising |= ADVERTISED_Autoneg; @@ -212,6 +191,21 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) ecmd->transceiver = XCVR_EXTERNAL; } + if (hw->mac.autoneg != 1) + ecmd->advertising &= ~(ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + + if (hw->fc.requested_mode == e1000_fc_full) + ecmd->advertising |= ADVERTISED_Pause; + else if (hw->fc.requested_mode == e1000_fc_rx_pause) + ecmd->advertising |= (ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + else if (hw->fc.requested_mode == e1000_fc_tx_pause) + ecmd->advertising |= ADVERTISED_Asym_Pause; + else + ecmd->advertising &= ~(ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + status = rd32(E1000_STATUS); if (status & E1000_STATUS_LU) { @@ -392,6 +386,10 @@ static int igb_set_pauseparam(struct net_device *netdev, struct e1000_hw *hw = &adapter->hw; int retval = 0; + /* 100basefx does not support setting link flow control */ + if (hw->dev_spec._82575.eth_flags.e100_base_fx) + return -EINVAL; + adapter->fc_autoneg = pause->autoneg; while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) @@ -813,10 +811,8 @@ static int igb_set_eeprom(struct net_device *netdev, ret_val = hw->nvm.ops.write(hw, first_word, last_word - first_word + 1, eeprom_buff); - /* Update the checksum over the first part of the EEPROM if needed - * and flush shadow RAM for 82573 controllers - */ - if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG))) + /* Update the checksum if nvm write succeeded */ + if (ret_val == 0) hw->nvm.ops.update(hw); igb_set_fw_version(adapter); diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 64cbe0dfe043..6a0c1b66ce54 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -1667,10 +1667,13 @@ void igb_down(struct igb_adapter *adapter) wrfl(); msleep(10); - for (i = 0; i < adapter->num_q_vectors; i++) + igb_irq_disable(adapter); + + for (i = 0; i < adapter->num_q_vectors; i++) { + napi_synchronize(&(adapter->q_vector[i]->napi)); napi_disable(&(adapter->q_vector[i]->napi)); + } - igb_irq_disable(adapter); del_timer_sync(&adapter->watchdog_timer); del_timer_sync(&adapter->phy_info_timer); @@ -6622,7 +6625,7 @@ static void igb_process_skb_fields(struct igb_ring *rx_ring, igb_rx_checksum(rx_ring, rx_desc, skb); - igb_ptp_rx_hwtstamp(rx_ring->q_vector, rx_desc, skb); + igb_ptp_rx_hwtstamp(rx_ring, rx_desc, skb); if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index ca932387a80f..7be725cdfea8 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -52,6 +52,11 @@ #include <linux/dca.h> #endif +#include <net/busy_poll.h> + +#ifdef CONFIG_NET_LL_RX_POLL +#define LL_EXTENDED_STATS +#endif /* common prefix used by pr_<> macros */ #undef pr_fmt #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -182,6 +187,11 @@ struct ixgbe_rx_buffer { struct ixgbe_queue_stats { u64 packets; u64 bytes; +#ifdef LL_EXTENDED_STATS + u64 yields; + u64 misses; + u64 cleaned; +#endif /* LL_EXTENDED_STATS */ }; struct ixgbe_tx_queue_stats { @@ -356,9 +366,133 @@ struct ixgbe_q_vector { struct rcu_head rcu; /* to avoid race with update stats on free */ char name[IFNAMSIZ + 9]; +#ifdef CONFIG_NET_LL_RX_POLL + unsigned int state; +#define IXGBE_QV_STATE_IDLE 0 +#define IXGBE_QV_STATE_NAPI 1 /* NAPI owns this QV */ +#define IXGBE_QV_STATE_POLL 2 /* poll owns this QV */ +#define IXGBE_QV_LOCKED (IXGBE_QV_STATE_NAPI | IXGBE_QV_STATE_POLL) +#define IXGBE_QV_STATE_NAPI_YIELD 4 /* NAPI yielded this QV */ +#define IXGBE_QV_STATE_POLL_YIELD 8 /* poll yielded this QV */ +#define IXGBE_QV_YIELD (IXGBE_QV_STATE_NAPI_YIELD | IXGBE_QV_STATE_POLL_YIELD) +#define IXGBE_QV_USER_PEND (IXGBE_QV_STATE_POLL | IXGBE_QV_STATE_POLL_YIELD) + spinlock_t lock; +#endif /* CONFIG_NET_LL_RX_POLL */ + /* for dynamic allocation of rings associated with this q_vector */ struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp; }; +#ifdef CONFIG_NET_LL_RX_POLL +static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector) +{ + + spin_lock_init(&q_vector->lock); + q_vector->state = IXGBE_QV_STATE_IDLE; +} + +/* called from the device poll routine to get ownership of a q_vector */ +static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector) +{ + int rc = true; + spin_lock(&q_vector->lock); + if (q_vector->state & IXGBE_QV_LOCKED) { + WARN_ON(q_vector->state & IXGBE_QV_STATE_NAPI); + q_vector->state |= IXGBE_QV_STATE_NAPI_YIELD; + rc = false; +#ifdef LL_EXTENDED_STATS + q_vector->tx.ring->stats.yields++; +#endif + } else + /* we don't care if someone yielded */ + q_vector->state = IXGBE_QV_STATE_NAPI; + spin_unlock(&q_vector->lock); + return rc; +} + +/* returns true is someone tried to get the qv while napi had it */ +static inline bool ixgbe_qv_unlock_napi(struct ixgbe_q_vector *q_vector) +{ + int rc = false; + spin_lock(&q_vector->lock); + WARN_ON(q_vector->state & (IXGBE_QV_STATE_POLL | + IXGBE_QV_STATE_NAPI_YIELD)); + + if (q_vector->state & IXGBE_QV_STATE_POLL_YIELD) + rc = true; + q_vector->state = IXGBE_QV_STATE_IDLE; + spin_unlock(&q_vector->lock); + return rc; +} + +/* called from ixgbe_low_latency_poll() */ +static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector) +{ + int rc = true; + spin_lock_bh(&q_vector->lock); + if ((q_vector->state & IXGBE_QV_LOCKED)) { + q_vector->state |= IXGBE_QV_STATE_POLL_YIELD; + rc = false; +#ifdef LL_EXTENDED_STATS + q_vector->rx.ring->stats.yields++; +#endif + } else + /* preserve yield marks */ + q_vector->state |= IXGBE_QV_STATE_POLL; + spin_unlock_bh(&q_vector->lock); + return rc; +} + +/* returns true if someone tried to get the qv while it was locked */ +static inline bool ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector) +{ + int rc = false; + spin_lock_bh(&q_vector->lock); + WARN_ON(q_vector->state & (IXGBE_QV_STATE_NAPI)); + + if (q_vector->state & IXGBE_QV_STATE_POLL_YIELD) + rc = true; + q_vector->state = IXGBE_QV_STATE_IDLE; + spin_unlock_bh(&q_vector->lock); + return rc; +} + +/* true if a socket is polling, even if it did not get the lock */ +static inline bool ixgbe_qv_ll_polling(struct ixgbe_q_vector *q_vector) +{ + WARN_ON(!(q_vector->state & IXGBE_QV_LOCKED)); + return q_vector->state & IXGBE_QV_USER_PEND; +} +#else /* CONFIG_NET_LL_RX_POLL */ +static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector) +{ +} + +static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector) +{ + return true; +} + +static inline bool ixgbe_qv_unlock_napi(struct ixgbe_q_vector *q_vector) +{ + return false; +} + +static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector) +{ + return false; +} + +static inline bool ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector) +{ + return false; +} + +static inline bool ixgbe_qv_ll_polling(struct ixgbe_q_vector *q_vector) +{ + return false; +} +#endif /* CONFIG_NET_LL_RX_POLL */ + #ifdef CONFIG_IXGBE_HWMON #define IXGBE_HWMON_TYPE_LOC 0 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c index 1f2c805684dd..e055e000131b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c @@ -380,3 +380,26 @@ s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw, } return 0; } + +static void ixgbe_dcb_read_rtrup2tc_82599(struct ixgbe_hw *hw, u8 *map) +{ + u32 reg, i; + + reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC); + for (i = 0; i < MAX_USER_PRIORITY; i++) + map[i] = IXGBE_RTRUP2TC_UP_MASK & + (reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT)); + return; +} + +void ixgbe_dcb_read_rtrup2tc(struct ixgbe_hw *hw, u8 *map) +{ + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + ixgbe_dcb_read_rtrup2tc_82599(hw, map); + break; + default: + break; + } +} diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h index 1634de8b627f..fc0a2dd52499 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h @@ -159,6 +159,8 @@ s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw, u16 *refill, u16 *max, s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *tc_prio); s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *); +void ixgbe_dcb_read_rtrup2tc(struct ixgbe_hw *hw, u8 *map); + /* DCB definitions for credit calculation */ #define DCB_CREDIT_QUANTUM 64 /* DCB Quantum */ #define MAX_CREDIT_REFILL 511 /* 0x1FF * 64B = 32704B */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h index a4ef07631d1e..d71d9ce3e394 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h @@ -45,6 +45,7 @@ /* Receive UP2TC mapping */ #define IXGBE_RTRUP2TC_UP_SHIFT 3 +#define IXGBE_RTRUP2TC_UP_MASK 7 /* Transmit UP2TC mapping */ #define IXGBE_RTTUP2TC_UP_SHIFT 3 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c index f3d68f9696ba..edd89a1ef27f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c @@ -554,6 +554,9 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev, for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) adapter->ixgbe_ieee_ets->prio_tc[i] = IEEE_8021QAZ_MAX_TCS; + /* if possible update UP2TC mappings from HW */ + ixgbe_dcb_read_rtrup2tc(&adapter->hw, + adapter->ixgbe_ieee_ets->prio_tc); } for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index d3754722adb4..24e2e7aafda2 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -1054,6 +1054,12 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev, data[i] = 0; data[i+1] = 0; i += 2; +#ifdef LL_EXTENDED_STATS + data[i] = 0; + data[i+1] = 0; + data[i+2] = 0; + i += 3; +#endif continue; } @@ -1063,6 +1069,12 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev, data[i+1] = ring->stats.bytes; } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); i += 2; +#ifdef LL_EXTENDED_STATS + data[i] = ring->stats.yields; + data[i+1] = ring->stats.misses; + data[i+2] = ring->stats.cleaned; + i += 3; +#endif } for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) { ring = adapter->rx_ring[j]; @@ -1070,6 +1082,12 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev, data[i] = 0; data[i+1] = 0; i += 2; +#ifdef LL_EXTENDED_STATS + data[i] = 0; + data[i+1] = 0; + data[i+2] = 0; + i += 3; +#endif continue; } @@ -1079,6 +1097,12 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev, data[i+1] = ring->stats.bytes; } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); i += 2; +#ifdef LL_EXTENDED_STATS + data[i] = ring->stats.yields; + data[i+1] = ring->stats.misses; + data[i+2] = ring->stats.cleaned; + i += 3; +#endif } for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) { @@ -1115,12 +1139,28 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, p += ETH_GSTRING_LEN; sprintf(p, "tx_queue_%u_bytes", i); p += ETH_GSTRING_LEN; +#ifdef LL_EXTENDED_STATS + sprintf(p, "tx_q_%u_napi_yield", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_q_%u_misses", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_q_%u_cleaned", i); + p += ETH_GSTRING_LEN; +#endif /* LL_EXTENDED_STATS */ } for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) { sprintf(p, "rx_queue_%u_packets", i); p += ETH_GSTRING_LEN; sprintf(p, "rx_queue_%u_bytes", i); p += ETH_GSTRING_LEN; +#ifdef LL_EXTENDED_STATS + sprintf(p, "rx_q_%u_ll_poll_yield", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_q_%u_misses", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_q_%u_cleaned", i); + p += ETH_GSTRING_LEN; +#endif /* LL_EXTENDED_STATS */ } for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) { sprintf(p, "tx_pb_%u_pxon", i); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index ef5f7a678ce1..90b4e1089ecc 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -811,6 +811,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, /* initialize NAPI */ netif_napi_add(adapter->netdev, &q_vector->napi, ixgbe_poll, 64); + napi_hash_add(&q_vector->napi); /* tie q_vector and adapter together */ adapter->q_vector[v_idx] = q_vector; @@ -931,6 +932,7 @@ static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx) adapter->rx_ring[ring->queue_index] = NULL; adapter->q_vector[v_idx] = NULL; + napi_hash_del(&q_vector->napi); netif_napi_del(&q_vector->napi); /* diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index d30fbdd81fca..bad8f14b1941 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1504,7 +1504,9 @@ static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector, { struct ixgbe_adapter *adapter = q_vector->adapter; - if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) + if (ixgbe_qv_ll_polling(q_vector)) + netif_receive_skb(skb); + else if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) napi_gro_receive(&q_vector->napi, skb); else netif_rx(skb); @@ -1892,9 +1894,9 @@ dma_sync: * expensive overhead for IOMMU access this provides a means of avoiding * it by maintaining the mapping of the page to the syste. * - * Returns true if all work is completed without reaching budget + * Returns amount of work completed **/ -static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, +static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, struct ixgbe_ring *rx_ring, const int budget) { @@ -1976,6 +1978,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, } #endif /* IXGBE_FCOE */ + skb_mark_napi_id(skb, &q_vector->napi); ixgbe_rx_skb(q_vector, skb); /* update budget accounting */ @@ -1992,9 +1995,43 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, if (cleaned_count) ixgbe_alloc_rx_buffers(rx_ring, cleaned_count); - return (total_rx_packets < budget); + return total_rx_packets; } +#ifdef CONFIG_NET_LL_RX_POLL +/* must be called with local_bh_disable()d */ +static int ixgbe_low_latency_recv(struct napi_struct *napi) +{ + struct ixgbe_q_vector *q_vector = + container_of(napi, struct ixgbe_q_vector, napi); + struct ixgbe_adapter *adapter = q_vector->adapter; + struct ixgbe_ring *ring; + int found = 0; + + if (test_bit(__IXGBE_DOWN, &adapter->state)) + return LL_FLUSH_FAILED; + + if (!ixgbe_qv_lock_poll(q_vector)) + return LL_FLUSH_BUSY; + + ixgbe_for_each_ring(ring, q_vector->rx) { + found = ixgbe_clean_rx_irq(q_vector, ring, 4); +#ifdef LL_EXTENDED_STATS + if (found) + ring->stats.cleaned += found; + else + ring->stats.misses++; +#endif + if (found) + break; + } + + ixgbe_qv_unlock_poll(q_vector); + + return found; +} +#endif /* CONFIG_NET_LL_RX_POLL */ + /** * ixgbe_configure_msix - Configure MSI-X hardware * @adapter: board private structure @@ -2550,6 +2587,9 @@ int ixgbe_poll(struct napi_struct *napi, int budget) ixgbe_for_each_ring(ring, q_vector->tx) clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring); + if (!ixgbe_qv_lock_napi(q_vector)) + return budget; + /* attempt to distribute budget to each queue fairly, but don't allow * the budget to go below 1 because we'll exit polling */ if (q_vector->rx.count > 1) @@ -2558,9 +2598,10 @@ int ixgbe_poll(struct napi_struct *napi, int budget) per_ring_budget = budget; ixgbe_for_each_ring(ring, q_vector->rx) - clean_complete &= ixgbe_clean_rx_irq(q_vector, ring, - per_ring_budget); + clean_complete &= (ixgbe_clean_rx_irq(q_vector, ring, + per_ring_budget) < per_ring_budget); + ixgbe_qv_unlock_napi(q_vector); /* If all work not completed, return budget and keep polling */ if (!clean_complete) return budget; @@ -3747,16 +3788,25 @@ static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) { int q_idx; - for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) + for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) { + ixgbe_qv_init_lock(adapter->q_vector[q_idx]); napi_enable(&adapter->q_vector[q_idx]->napi); + } } static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter) { int q_idx; - for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) + local_bh_disable(); /* for ixgbe_qv_lock_napi() */ + for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) { napi_disable(&adapter->q_vector[q_idx]->napi); + while (!ixgbe_qv_lock_napi(adapter->q_vector[q_idx])) { + pr_info("QV %d locked\n", q_idx); + mdelay(1); + } + } + local_bh_enable(); } #ifdef CONFIG_IXGBE_DCB @@ -7177,6 +7227,9 @@ static const struct net_device_ops ixgbe_netdev_ops = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = ixgbe_netpoll, #endif +#ifdef CONFIG_NET_LL_RX_POLL + .ndo_busy_poll = ixgbe_low_latency_recv, +#endif #ifdef IXGBE_FCOE .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get, .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target, diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c index 070a6f1a0577..7fbe6abf6054 100644 --- a/drivers/net/ethernet/jme.c +++ b/drivers/net/ethernet/jme.c @@ -3148,7 +3148,6 @@ jme_init_one(struct pci_dev *pdev, jme->mii_if.mdio_write = jme_mdio_write; jme_clear_pm(jme); - pci_set_power_state(jme->pdev, PCI_D0); device_set_wakeup_enable(&pdev->dev, true); jme_set_phyfifo_5level(jme); diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c index 5409fe876a44..270e65f21102 100644 --- a/drivers/net/ethernet/korina.c +++ b/drivers/net/ethernet/korina.c @@ -483,7 +483,6 @@ static void korina_multicast_list(struct net_device *dev) unsigned long flags; struct netdev_hw_addr *ha; u32 recognise = ETH_ARC_AB; /* always accept broadcasts */ - int i; /* Set promiscuous mode */ if (dev->flags & IFF_PROMISC) @@ -495,12 +494,9 @@ static void korina_multicast_list(struct net_device *dev) /* Build the hash table */ if (netdev_mc_count(dev) > 4) { - u16 hash_table[4]; + u16 hash_table[4] = { 0 }; u32 crc; - for (i = 0; i < 4; i++) - hash_table[i] = 0; - netdev_for_each_mc_addr(ha, dev) { crc = ether_crc_le(6, ha->addr); crc >>= 26; @@ -1214,7 +1210,6 @@ static int korina_remove(struct platform_device *pdev) iounmap(lp->rx_dma_regs); iounmap(lp->tx_dma_regs); - platform_set_drvdata(pdev, NULL); unregister_netdev(bif->dev); free_netdev(bif->dev); diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index d1cbfb12c1ca..c35db735958f 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -60,6 +60,10 @@ #include <linux/types.h> #include <linux/slab.h> #include <linux/clk.h> +#include <linux/of.h> +#include <linux/of_irq.h> +#include <linux/of_net.h> +#include <linux/of_mdio.h> static char mv643xx_eth_driver_name[] = "mv643xx_eth"; static char mv643xx_eth_driver_version[] = "1.4"; @@ -115,6 +119,8 @@ static char mv643xx_eth_driver_version[] = "1.4"; #define LINK_UP 0x00000002 #define TXQ_COMMAND 0x0048 #define TXQ_FIX_PRIO_CONF 0x004c +#define PORT_SERIAL_CONTROL1 0x004c +#define CLK125_BYPASS_EN 0x00000010 #define TX_BW_RATE 0x0050 #define TX_BW_MTU 0x0058 #define TX_BW_BURST 0x005c @@ -615,7 +621,7 @@ static int rxq_refill(struct rx_queue *rxq, int budget) rx_desc = rxq->rx_desc_area + rx; - size = skb->end - skb->data; + size = skb_end_pointer(skb) - skb->data; rx_desc->buf_ptr = dma_map_single(mp->dev->dev.parent, skb->data, size, DMA_FROM_DEVICE); @@ -2450,13 +2456,159 @@ static void infer_hw_params(struct mv643xx_eth_shared_private *msp) } } +#if defined(CONFIG_OF) +static const struct of_device_id mv643xx_eth_shared_ids[] = { + { .compatible = "marvell,orion-eth", }, + { .compatible = "marvell,kirkwood-eth", }, + { } +}; +MODULE_DEVICE_TABLE(of, mv643xx_eth_shared_ids); +#endif + +#if defined(CONFIG_OF) && !defined(CONFIG_MV64X60) +#define mv643xx_eth_property(_np, _name, _v) \ + do { \ + u32 tmp; \ + if (!of_property_read_u32(_np, "marvell," _name, &tmp)) \ + _v = tmp; \ + } while (0) + +static struct platform_device *port_platdev[3]; + +static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev, + struct device_node *pnp) +{ + struct platform_device *ppdev; + struct mv643xx_eth_platform_data ppd; + struct resource res; + const char *mac_addr; + int ret; + int dev_num = 0; + + memset(&ppd, 0, sizeof(ppd)); + ppd.shared = pdev; + + memset(&res, 0, sizeof(res)); + if (!of_irq_to_resource(pnp, 0, &res)) { + dev_err(&pdev->dev, "missing interrupt on %s\n", pnp->name); + return -EINVAL; + } + + if (of_property_read_u32(pnp, "reg", &ppd.port_number)) { + dev_err(&pdev->dev, "missing reg property on %s\n", pnp->name); + return -EINVAL; + } + + if (ppd.port_number >= 3) { + dev_err(&pdev->dev, "invalid reg property on %s\n", pnp->name); + return -EINVAL; + } + + while (dev_num < 3 && port_platdev[dev_num]) + dev_num++; + + if (dev_num == 3) { + dev_err(&pdev->dev, "too many ports registered\n"); + return -EINVAL; + } + + mac_addr = of_get_mac_address(pnp); + if (mac_addr) + memcpy(ppd.mac_addr, mac_addr, 6); + + mv643xx_eth_property(pnp, "tx-queue-size", ppd.tx_queue_size); + mv643xx_eth_property(pnp, "tx-sram-addr", ppd.tx_sram_addr); + mv643xx_eth_property(pnp, "tx-sram-size", ppd.tx_sram_size); + mv643xx_eth_property(pnp, "rx-queue-size", ppd.rx_queue_size); + mv643xx_eth_property(pnp, "rx-sram-addr", ppd.rx_sram_addr); + mv643xx_eth_property(pnp, "rx-sram-size", ppd.rx_sram_size); + + ppd.phy_node = of_parse_phandle(pnp, "phy-handle", 0); + if (!ppd.phy_node) { + ppd.phy_addr = MV643XX_ETH_PHY_NONE; + of_property_read_u32(pnp, "speed", &ppd.speed); + of_property_read_u32(pnp, "duplex", &ppd.duplex); + } + + ppdev = platform_device_alloc(MV643XX_ETH_NAME, dev_num); + if (!ppdev) + return -ENOMEM; + ppdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); + + ret = platform_device_add_resources(ppdev, &res, 1); + if (ret) + goto port_err; + + ret = platform_device_add_data(ppdev, &ppd, sizeof(ppd)); + if (ret) + goto port_err; + + ret = platform_device_add(ppdev); + if (ret) + goto port_err; + + port_platdev[dev_num] = ppdev; + + return 0; + +port_err: + platform_device_put(ppdev); + return ret; +} + +static int mv643xx_eth_shared_of_probe(struct platform_device *pdev) +{ + struct mv643xx_eth_shared_platform_data *pd; + struct device_node *pnp, *np = pdev->dev.of_node; + int ret; + + /* bail out if not registered from DT */ + if (!np) + return 0; + + pd = devm_kzalloc(&pdev->dev, sizeof(*pd), GFP_KERNEL); + if (!pd) + return -ENOMEM; + pdev->dev.platform_data = pd; + + mv643xx_eth_property(np, "tx-checksum-limit", pd->tx_csum_limit); + + for_each_available_child_of_node(np, pnp) { + ret = mv643xx_eth_shared_of_add_port(pdev, pnp); + if (ret) + return ret; + } + return 0; +} + +static void mv643xx_eth_shared_of_remove(void) +{ + int n; + + for (n = 0; n < 3; n++) { + platform_device_del(port_platdev[n]); + port_platdev[n] = NULL; + } +} +#else +static inline int mv643xx_eth_shared_of_probe(struct platform_device *pdev) +{ + return 0; +} + +static inline void mv643xx_eth_shared_of_remove(void) +{ +} +#endif + static int mv643xx_eth_shared_probe(struct platform_device *pdev) { static int mv643xx_eth_version_printed; - struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data; + struct mv643xx_eth_shared_platform_data *pd; struct mv643xx_eth_shared_private *msp; const struct mbus_dram_target_info *dram; struct resource *res; + int ret; if (!mv643xx_eth_version_printed++) pr_notice("MV-643xx 10/100/1000 ethernet driver version %s\n", @@ -2469,8 +2621,9 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev) msp = devm_kzalloc(&pdev->dev, sizeof(*msp), GFP_KERNEL); if (msp == NULL) return -ENOMEM; + platform_set_drvdata(pdev, msp); - msp->base = ioremap(res->start, resource_size(res)); + msp->base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (msp->base == NULL) return -ENOMEM; @@ -2485,12 +2638,15 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev) if (dram) mv643xx_eth_conf_mbus_windows(msp, dram); + ret = mv643xx_eth_shared_of_probe(pdev); + if (ret) + return ret; + pd = pdev->dev.platform_data; + msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ? pd->tx_csum_limit : 9 * 1024; infer_hw_params(msp); - platform_set_drvdata(pdev, msp); - return 0; } @@ -2498,10 +2654,9 @@ static int mv643xx_eth_shared_remove(struct platform_device *pdev) { struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev); - iounmap(msp->base); + mv643xx_eth_shared_of_remove(); if (!IS_ERR(msp->clk)) clk_disable_unprepare(msp->clk); - return 0; } @@ -2511,6 +2666,7 @@ static struct platform_driver mv643xx_eth_shared_driver = { .driver = { .name = MV643XX_ETH_SHARED_NAME, .owner = THIS_MODULE, + .of_match_table = of_match_ptr(mv643xx_eth_shared_ids), }, }; @@ -2701,6 +2857,15 @@ static int mv643xx_eth_probe(struct platform_device *pdev) mp->dev = dev; + /* Kirkwood resets some registers on gated clocks. Especially + * CLK125_BYPASS_EN must be cleared but is not available on + * all other SoCs/System Controllers using this driver. + */ + if (of_device_is_compatible(pdev->dev.of_node, + "marvell,kirkwood-eth-port")) + wrlp(mp, PORT_SERIAL_CONTROL1, + rdlp(mp, PORT_SERIAL_CONTROL1) & ~CLK125_BYPASS_EN); + /* * Start with a default rate, and if there is a clock, allow * it to override the default. @@ -2710,23 +2875,35 @@ static int mv643xx_eth_probe(struct platform_device *pdev) if (!IS_ERR(mp->clk)) { clk_prepare_enable(mp->clk); mp->t_clk = clk_get_rate(mp->clk); + } else if (!IS_ERR(mp->shared->clk)) { + mp->t_clk = clk_get_rate(mp->shared->clk); } set_params(mp, pd); netif_set_real_num_tx_queues(dev, mp->txq_count); netif_set_real_num_rx_queues(dev, mp->rxq_count); - if (pd->phy_addr != MV643XX_ETH_PHY_NONE) { + err = 0; + if (pd->phy_node) { + mp->phy = of_phy_connect(mp->dev, pd->phy_node, + mv643xx_eth_adjust_link, 0, + PHY_INTERFACE_MODE_GMII); + if (!mp->phy) + err = -ENODEV; + } else if (pd->phy_addr != MV643XX_ETH_PHY_NONE) { mp->phy = phy_scan(mp, pd->phy_addr); - if (IS_ERR(mp->phy)) { + if (IS_ERR(mp->phy)) err = PTR_ERR(mp->phy); - if (err == -ENODEV) - err = -EPROBE_DEFER; - goto out; - } - phy_init(mp, pd->speed, pd->duplex); + else + phy_init(mp, pd->speed, pd->duplex); } + if (err == -ENODEV) { + err = -EPROBE_DEFER; + goto out; + } + if (err) + goto out; SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops); @@ -2805,7 +2982,7 @@ static int mv643xx_eth_remove(struct platform_device *pdev) unregister_netdev(mp->dev); if (mp->phy != NULL) - phy_detach(mp->phy); + phy_disconnect(mp->phy); cancel_work_sync(&mp->tx_timeout_task); if (!IS_ERR(mp->clk)) @@ -2813,8 +2990,6 @@ static int mv643xx_eth_remove(struct platform_device *pdev) free_netdev(mp->dev); - platform_set_drvdata(pdev, NULL); - return 0; } diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index c96678555233..712779fb12b7 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -2251,6 +2251,21 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu) return 0; } +/* Get mac address */ +static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr) +{ + u32 mac_addr_l, mac_addr_h; + + mac_addr_l = mvreg_read(pp, MVNETA_MAC_ADDR_LOW); + mac_addr_h = mvreg_read(pp, MVNETA_MAC_ADDR_HIGH); + addr[0] = (mac_addr_h >> 24) & 0xFF; + addr[1] = (mac_addr_h >> 16) & 0xFF; + addr[2] = (mac_addr_h >> 8) & 0xFF; + addr[3] = mac_addr_h & 0xFF; + addr[4] = (mac_addr_l >> 8) & 0xFF; + addr[5] = mac_addr_l & 0xFF; +} + /* Handle setting mac address */ static int mvneta_set_mac_addr(struct net_device *dev, void *addr) { @@ -2667,7 +2682,9 @@ static int mvneta_probe(struct platform_device *pdev) u32 phy_addr; struct mvneta_port *pp; struct net_device *dev; - const char *mac_addr; + const char *dt_mac_addr; + char hw_mac_addr[ETH_ALEN]; + const char *mac_from; int phy_mode; int err; @@ -2703,13 +2720,6 @@ static int mvneta_probe(struct platform_device *pdev) goto err_free_irq; } - mac_addr = of_get_mac_address(dn); - - if (!mac_addr || !is_valid_ether_addr(mac_addr)) - eth_hw_addr_random(dev); - else - memcpy(dev->dev_addr, mac_addr, ETH_ALEN); - dev->tx_queue_len = MVNETA_MAX_TXD; dev->watchdog_timeo = 5 * HZ; dev->netdev_ops = &mvneta_netdev_ops; @@ -2740,6 +2750,21 @@ static int mvneta_probe(struct platform_device *pdev) clk_prepare_enable(pp->clk); + dt_mac_addr = of_get_mac_address(dn); + if (dt_mac_addr && is_valid_ether_addr(dt_mac_addr)) { + mac_from = "device tree"; + memcpy(dev->dev_addr, dt_mac_addr, ETH_ALEN); + } else { + mvneta_get_mac_addr(pp, hw_mac_addr); + if (is_valid_ether_addr(hw_mac_addr)) { + mac_from = "hardware"; + memcpy(dev->dev_addr, hw_mac_addr, ETH_ALEN); + } else { + mac_from = "random"; + eth_hw_addr_random(dev); + } + } + pp->tx_done_timer.data = (unsigned long)dev; pp->tx_ring_size = MVNETA_MAX_TXD; @@ -2772,7 +2797,8 @@ static int mvneta_probe(struct platform_device *pdev) goto err_deinit; } - netdev_info(dev, "mac: %pM\n", dev->dev_addr); + netdev_info(dev, "Using %s mac address %pM\n", mac_from, + dev->dev_addr); platform_set_drvdata(pdev, pp->dev); @@ -2804,8 +2830,6 @@ static int mvneta_remove(struct platform_device *pdev) irq_dispose_mapping(dev->irq); free_netdev(dev); - platform_set_drvdata(pdev, NULL); - return 0; } diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index 1c8af8ba08d9..db481477bcc5 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -357,7 +357,7 @@ static void rxq_refill(struct net_device *dev) /* Get 'used' Rx descriptor */ used_rx_desc = pep->rx_used_desc_q; p_used_rx_desc = &pep->p_rx_desc_area[used_rx_desc]; - size = skb->end - skb->data; + size = skb_end_pointer(skb) - skb->data; p_used_rx_desc->buf_ptr = dma_map_single(NULL, skb->data, size, @@ -1602,7 +1602,6 @@ static int pxa168_eth_remove(struct platform_device *pdev) unregister_netdev(dev); cancel_work_sync(&pep->tx_timeout_task); free_netdev(dev); - platform_set_drvdata(pdev, NULL); return 0; } diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index 171f4b3dda07..c896079728e1 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -3706,7 +3706,7 @@ static const struct file_operations skge_debug_fops = { static int skge_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct skge_port *skge; struct dentry *d; diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index d175bbd3ffd3..e09a8c6f8536 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -4642,7 +4642,7 @@ static const struct file_operations sky2_debug_fops = { static int sky2_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct sky2_port *sky2 = netdev_priv(dev); if (dev->netdev_ops->ndo_open != sky2_open || !sky2_debug) diff --git a/drivers/net/ethernet/mellanox/Kconfig b/drivers/net/ethernet/mellanox/Kconfig index bcdbc14aeff0..8cf7563a8d92 100644 --- a/drivers/net/ethernet/mellanox/Kconfig +++ b/drivers/net/ethernet/mellanox/Kconfig @@ -19,5 +19,6 @@ config NET_VENDOR_MELLANOX if NET_VENDOR_MELLANOX source "drivers/net/ethernet/mellanox/mlx4/Kconfig" +source "drivers/net/ethernet/mellanox/mlx5/core/Kconfig" endif # NET_VENDOR_MELLANOX diff --git a/drivers/net/ethernet/mellanox/Makefile b/drivers/net/ethernet/mellanox/Makefile index 37afb9683372..38fe32ef5e5f 100644 --- a/drivers/net/ethernet/mellanox/Makefile +++ b/drivers/net/ethernet/mellanox/Makefile @@ -3,3 +3,4 @@ # obj-$(CONFIG_MLX4_CORE) += mlx4/ +obj-$(CONFIG_MLX5_CORE) += mlx5/core/ diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index 0e572a527154..299d0184f983 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -39,6 +39,7 @@ #include <linux/errno.h> #include <linux/mlx4/cmd.h> +#include <linux/mlx4/device.h> #include <linux/semaphore.h> #include <rdma/ib_smi.h> @@ -111,6 +112,14 @@ enum { GO_BIT_TIMEOUT_MSECS = 10000 }; +enum mlx4_vlan_transition { + MLX4_VLAN_TRANSITION_VST_VST = 0, + MLX4_VLAN_TRANSITION_VST_VGT = 1, + MLX4_VLAN_TRANSITION_VGT_VST = 2, + MLX4_VLAN_TRANSITION_VGT_VGT = 3, +}; + + struct mlx4_cmd_context { struct completion done; int result; @@ -256,6 +265,8 @@ static int mlx4_comm_cmd_wait(struct mlx4_dev *dev, u8 op, if (!wait_for_completion_timeout(&context->done, msecs_to_jiffies(timeout))) { + mlx4_warn(dev, "communication channel command 0x%x timed out\n", + op); err = -EBUSY; goto out; } @@ -485,6 +496,8 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param, } if (cmd_pending(dev)) { + mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n", + op); err = -ETIMEDOUT; goto out; } @@ -548,6 +561,8 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param, if (!wait_for_completion_timeout(&context->done, msecs_to_jiffies(timeout))) { + mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n", + op); err = -EBUSY; goto out; } @@ -785,6 +800,15 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave, vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); } +int MLX4_CMD_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + return -EPERM; +} + int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, @@ -1219,6 +1243,15 @@ static struct mlx4_cmd_info cmd_info[] = { .wrapper = mlx4_GEN_QP_wrapper }, { + .opcode = MLX4_CMD_UPDATE_QP, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = MLX4_CMD_UPDATE_QP_wrapper + }, + { .opcode = MLX4_CMD_CONF_SPECIAL_QP, .has_inbox = false, .has_outbox = false, @@ -1488,6 +1521,102 @@ out: return ret; } +static int calculate_transition(u16 oper_vlan, u16 admin_vlan) +{ + return (2 * (oper_vlan == MLX4_VGT) + (admin_vlan == MLX4_VGT)); +} + +int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv, + int slave, int port) +{ + struct mlx4_vport_oper_state *vp_oper; + struct mlx4_vport_state *vp_admin; + struct mlx4_vf_immed_vlan_work *work; + struct mlx4_dev *dev = &(priv->dev); + int err; + int admin_vlan_ix = NO_INDX; + enum mlx4_vlan_transition vlan_trans; + + vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; + vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port]; + + if (vp_oper->state.default_vlan == vp_admin->default_vlan && + vp_oper->state.default_qos == vp_admin->default_qos && + vp_oper->state.link_state == vp_admin->link_state) + return 0; + + vlan_trans = calculate_transition(vp_oper->state.default_vlan, + vp_admin->default_vlan); + + if (!(priv->mfunc.master.slave_state[slave].active && + dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP && + vlan_trans == MLX4_VLAN_TRANSITION_VST_VST)) { + /* even if the UPDATE_QP command isn't supported, we still want + * to set this VF link according to the admin directive + */ + vp_oper->state.link_state = vp_admin->link_state; + return -1; + } + + mlx4_dbg(dev, "updating immediately admin params slave %d port %d\n", + slave, port); + mlx4_dbg(dev, "vlan %d QoS %d link down %d\n", vp_admin->default_vlan, + vp_admin->default_qos, vp_admin->link_state); + + work = kzalloc(sizeof(*work), GFP_KERNEL); + if (!work) + return -ENOMEM; + + if (vp_oper->state.default_vlan != vp_admin->default_vlan) { + err = __mlx4_register_vlan(&priv->dev, port, + vp_admin->default_vlan, + &admin_vlan_ix); + if (err) { + kfree(work); + mlx4_warn((&priv->dev), + "No vlan resources slave %d, port %d\n", + slave, port); + return err; + } + work->flags |= MLX4_VF_IMMED_VLAN_FLAG_VLAN; + mlx4_dbg((&(priv->dev)), + "alloc vlan %d idx %d slave %d port %d\n", + (int)(vp_admin->default_vlan), + admin_vlan_ix, slave, port); + } + + /* save original vlan ix and vlan id */ + work->orig_vlan_id = vp_oper->state.default_vlan; + work->orig_vlan_ix = vp_oper->vlan_idx; + + /* handle new qos */ + if (vp_oper->state.default_qos != vp_admin->default_qos) + work->flags |= MLX4_VF_IMMED_VLAN_FLAG_QOS; + + if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN) + vp_oper->vlan_idx = admin_vlan_ix; + + vp_oper->state.default_vlan = vp_admin->default_vlan; + vp_oper->state.default_qos = vp_admin->default_qos; + vp_oper->state.link_state = vp_admin->link_state; + + if (vp_admin->link_state == IFLA_VF_LINK_STATE_DISABLE) + work->flags |= MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE; + + /* iterate over QPs owned by this slave, using UPDATE_QP */ + work->port = port; + work->slave = slave; + work->qos = vp_oper->state.default_qos; + work->vlan_id = vp_oper->state.default_vlan; + work->vlan_ix = vp_oper->vlan_idx; + work->priv = priv; + INIT_WORK(&work->work, mlx4_vf_immed_vlan_work_handler); + queue_work(priv->mfunc.master.comm_wq, &work->work); + + return 0; +} + + static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave) { int port, err; @@ -2102,10 +2231,12 @@ int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac) } EXPORT_SYMBOL_GPL(mlx4_set_vf_mac); + int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos) { struct mlx4_priv *priv = mlx4_priv(dev); - struct mlx4_vport_state *s_info; + struct mlx4_vport_oper_state *vf_oper; + struct mlx4_vport_state *vf_admin; int slave; if ((!mlx4_is_master(dev)) || @@ -2119,12 +2250,19 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos) if (slave < 0) return -EINVAL; - s_info = &priv->mfunc.master.vf_admin[slave].vport[port]; + vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port]; + vf_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; + if ((0 == vlan) && (0 == qos)) - s_info->default_vlan = MLX4_VGT; + vf_admin->default_vlan = MLX4_VGT; else - s_info->default_vlan = vlan; - s_info->default_qos = qos; + vf_admin->default_vlan = vlan; + vf_admin->default_qos = qos; + + if (mlx4_master_immediate_activate_vlan_qos(priv, slave, port)) + mlx4_info(dev, + "updating vf %d port %d config will take effect on next VF restart\n", + vf, port); return 0; } EXPORT_SYMBOL_GPL(mlx4_set_vf_vlan); @@ -2178,7 +2316,55 @@ int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_in ivf->qos = s_info->default_qos; ivf->tx_rate = s_info->tx_rate; ivf->spoofchk = s_info->spoofchk; + ivf->linkstate = s_info->link_state; return 0; } EXPORT_SYMBOL_GPL(mlx4_get_vf_config); + +int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_vport_state *s_info; + int slave; + u8 link_stat_event; + + slave = mlx4_get_slave_indx(dev, vf); + if (slave < 0) + return -EINVAL; + + switch (link_state) { + case IFLA_VF_LINK_STATE_AUTO: + /* get current link state */ + if (!priv->sense.do_sense_port[port]) + link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_ACTIVE; + else + link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_DOWN; + break; + + case IFLA_VF_LINK_STATE_ENABLE: + link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_ACTIVE; + break; + + case IFLA_VF_LINK_STATE_DISABLE: + link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_DOWN; + break; + + default: + mlx4_warn(dev, "unknown value for link_state %02x on slave %d port %d\n", + link_state, slave, port); + return -EINVAL; + }; + s_info = &priv->mfunc.master.vf_admin[slave].vport[port]; + s_info->link_state = link_state; + + /* send event */ + mlx4_gen_port_state_change_eqe(dev, slave, port, link_stat_event); + + if (mlx4_master_immediate_activate_vlan_qos(priv, slave, port)) + mlx4_dbg(dev, + "updating vf %d port %d no link state HW enforcment\n", + vf, port); + return 0; +} +EXPORT_SYMBOL_GPL(mlx4_set_vf_link_state); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c index 1e6c594d6d04..3e2d5047cdb3 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c @@ -139,6 +139,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, if (!cq->is_tx) { netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64); + napi_hash_add(&cq->napi); napi_enable(&cq->napi); } @@ -162,6 +163,8 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) { if (!cq->is_tx) { napi_disable(&cq->napi); + napi_hash_del(&cq->napi); + synchronize_rcu(); netif_napi_del(&cq->napi); } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c index 0f91222ea3d7..9d4a1ea030d8 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c @@ -207,9 +207,6 @@ static int mlx4_en_dcbnl_ieee_getmaxrate(struct net_device *dev, struct mlx4_en_priv *priv = netdev_priv(dev); int i; - if (!priv->maxrate) - return -EINVAL; - for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) maxrate->tc_maxrate[i] = priv->maxrate[i] * MLX4_RATELIMIT_UNITS_IN_KB; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index c9e6b62dd000..727874f575ce 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -222,7 +222,12 @@ static int mlx4_en_get_sset_count(struct net_device *dev, int sset) switch (sset) { case ETH_SS_STATS: return (priv->stats_bitmap ? bit_count : NUM_ALL_STATS) + - (priv->tx_ring_num + priv->rx_ring_num) * 2; + (priv->tx_ring_num * 2) + +#ifdef CONFIG_NET_LL_RX_POLL + (priv->rx_ring_num * 5); +#else + (priv->rx_ring_num * 2); +#endif case ETH_SS_TEST: return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UC_LOOPBACK) * 2; @@ -271,6 +276,11 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev, for (i = 0; i < priv->rx_ring_num; i++) { data[index++] = priv->rx_ring[i].packets; data[index++] = priv->rx_ring[i].bytes; +#ifdef CONFIG_NET_LL_RX_POLL + data[index++] = priv->rx_ring[i].yields; + data[index++] = priv->rx_ring[i].misses; + data[index++] = priv->rx_ring[i].cleaned; +#endif } spin_unlock_bh(&priv->stats_lock); @@ -334,6 +344,14 @@ static void mlx4_en_get_strings(struct net_device *dev, "rx%d_packets", i); sprintf(data + (index++) * ETH_GSTRING_LEN, "rx%d_bytes", i); +#ifdef CONFIG_NET_LL_RX_POLL + sprintf(data + (index++) * ETH_GSTRING_LEN, + "rx%d_napi_yield", i); + sprintf(data + (index++) * ETH_GSTRING_LEN, + "rx%d_misses", i); + sprintf(data + (index++) * ETH_GSTRING_LEN, + "rx%d_cleaned", i); +#endif } break; } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c index a5c9df07a7d0..a071cda2dd04 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c @@ -310,7 +310,7 @@ static void *mlx4_en_add(struct mlx4_dev *dev) err_mr: (void) mlx4_mr_free(dev, &mdev->mr); err_map: - if (!mdev->uar_map) + if (mdev->uar_map) iounmap(mdev->uar_map); err_uar: mlx4_uar_free(dev, &mdev->priv_uar); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 89c47ea84b50..5eac871399d8 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -38,6 +38,7 @@ #include <linux/slab.h> #include <linux/hash.h> #include <net/ip.h> +#include <net/busy_poll.h> #include <linux/mlx4/driver.h> #include <linux/mlx4/device.h> @@ -67,6 +68,34 @@ int mlx4_en_setup_tc(struct net_device *dev, u8 up) return 0; } +#ifdef CONFIG_NET_LL_RX_POLL +/* must be called with local_bh_disable()d */ +static int mlx4_en_low_latency_recv(struct napi_struct *napi) +{ + struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi); + struct net_device *dev = cq->dev; + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_rx_ring *rx_ring = &priv->rx_ring[cq->ring]; + int done; + + if (!priv->port_up) + return LL_FLUSH_FAILED; + + if (!mlx4_en_cq_lock_poll(cq)) + return LL_FLUSH_BUSY; + + done = mlx4_en_process_rx_cq(dev, cq, 4); + if (likely(done)) + rx_ring->cleaned += done; + else + rx_ring->misses++; + + mlx4_en_cq_unlock_poll(cq); + + return done; +} +#endif /* CONFIG_NET_LL_RX_POLL */ + #ifdef CONFIG_RFS_ACCEL struct mlx4_en_filter { @@ -376,7 +405,7 @@ static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, en_err(priv, "Failed configuring VLAN filter\n"); } if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx)) - en_err(priv, "failed adding vlan %d\n", vid); + en_dbg(HW, priv, "failed adding vlan %d\n", vid); mutex_unlock(&mdev->state_lock); return 0; @@ -399,7 +428,7 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, if (!mlx4_find_cached_vlan(mdev->dev, priv->port, vid, &idx)) mlx4_unregister_vlan(mdev->dev, priv->port, idx); else - en_err(priv, "could not find vid %d in cache\n", vid); + en_dbg(HW, priv, "could not find vid %d in cache\n", vid); if (mdev->device_up && priv->port_up) { err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); @@ -1207,10 +1236,19 @@ static void mlx4_en_tx_timeout(struct net_device *dev) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; + int i; if (netif_msg_timer(priv)) en_warn(priv, "Tx timeout called on port:%d\n", priv->port); + for (i = 0; i < priv->tx_ring_num; i++) { + if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i))) + continue; + en_warn(priv, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n", + i, priv->tx_ring[i].qpn, priv->tx_ring[i].cqn, + priv->tx_ring[i].cons, priv->tx_ring[i].prod); + } + priv->port_stats.tx_timeout++; en_dbg(DRV, priv, "Scheduling watchdog\n"); queue_work(mdev->workqueue, &priv->watchdog_task); @@ -1346,12 +1384,13 @@ static void mlx4_en_do_get_stats(struct work_struct *work) mutex_lock(&mdev->state_lock); if (mdev->device_up) { - err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0); - if (err) - en_dbg(HW, priv, "Could not update stats\n"); + if (priv->port_up) { + err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0); + if (err) + en_dbg(HW, priv, "Could not update stats\n"); - if (priv->port_up) mlx4_en_auto_moderation(priv); + } queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); } @@ -1445,6 +1484,8 @@ int mlx4_en_start_port(struct net_device *dev) for (i = 0; i < priv->rx_ring_num; i++) { cq = &priv->rx_cq[i]; + mlx4_en_cq_init_lock(cq); + err = mlx4_en_activate_cq(priv, cq, i); if (err) { en_err(priv, "Failed activating Rx CQ\n"); @@ -1603,6 +1644,9 @@ void mlx4_en_stop_port(struct net_device *dev, int detach) return; } + /* close port*/ + mlx4_CLOSE_PORT(mdev->dev, priv->port); + /* Synchronize with tx routine */ netif_tx_lock_bh(dev); if (detach) @@ -1694,14 +1738,20 @@ void mlx4_en_stop_port(struct net_device *dev, int detach) /* Free RX Rings */ for (i = 0; i < priv->rx_ring_num; i++) { - mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]); - while (test_bit(NAPI_STATE_SCHED, &priv->rx_cq[i].napi.state)) + struct mlx4_en_cq *cq = &priv->rx_cq[i]; + + local_bh_disable(); + while (!mlx4_en_cq_lock_napi(cq)) { + pr_info("CQ %d locked\n", i); + mdelay(1); + } + local_bh_enable(); + + while (test_bit(NAPI_STATE_SCHED, &cq->napi.state)) msleep(1); - mlx4_en_deactivate_cq(priv, &priv->rx_cq[i]); + mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]); + mlx4_en_deactivate_cq(priv, cq); } - - /* close port*/ - mlx4_CLOSE_PORT(mdev->dev, priv->port); } static void mlx4_en_restart(struct work_struct *work) @@ -2061,6 +2111,13 @@ static int mlx4_en_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_ return mlx4_get_vf_config(mdev->dev, en_priv->port, vf, ivf); } +static int mlx4_en_set_vf_link_state(struct net_device *dev, int vf, int link_state) +{ + struct mlx4_en_priv *en_priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = en_priv->mdev; + + return mlx4_set_vf_link_state(mdev->dev, en_priv->port, vf, link_state); +} static const struct net_device_ops mlx4_netdev_ops = { .ndo_open = mlx4_en_open, .ndo_stop = mlx4_en_close, @@ -2083,6 +2140,9 @@ static const struct net_device_ops mlx4_netdev_ops = { #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = mlx4_en_filter_rfs, #endif +#ifdef CONFIG_NET_LL_RX_POLL + .ndo_busy_poll = mlx4_en_low_latency_recv, +#endif }; static const struct net_device_ops mlx4_netdev_ops_master = { @@ -2101,6 +2161,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = { .ndo_set_vf_mac = mlx4_en_set_vf_mac, .ndo_set_vf_vlan = mlx4_en_set_vf_vlan, .ndo_set_vf_spoofchk = mlx4_en_set_vf_spoofchk, + .ndo_set_vf_link_state = mlx4_en_set_vf_link_state, .ndo_get_vf_config = mlx4_en_get_vf_config, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = mlx4_en_netpoll, @@ -2271,6 +2332,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, mdev->pndev[port] = dev; netif_carrier_off(dev); + mlx4_en_set_default_moderation(priv); + err = register_netdev(dev); if (err) { en_err(priv, "Netdev registration failed for port %d\n", port); @@ -2302,7 +2365,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, en_err(priv, "Failed Initializing port\n"); goto out; } - mlx4_en_set_default_moderation(priv); queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 02aee1ebd203..dec455c8f627 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -31,6 +31,7 @@ * */ +#include <net/busy_poll.h> #include <linux/mlx4/cq.h> #include <linux/slab.h> #include <linux/mlx4/qp.h> @@ -42,40 +43,64 @@ #include "mlx4_en.h" +static int mlx4_alloc_pages(struct mlx4_en_priv *priv, + struct mlx4_en_rx_alloc *page_alloc, + const struct mlx4_en_frag_info *frag_info, + gfp_t _gfp) +{ + int order; + struct page *page; + dma_addr_t dma; + + for (order = MLX4_EN_ALLOC_PREFER_ORDER; ;) { + gfp_t gfp = _gfp; + + if (order) + gfp |= __GFP_COMP | __GFP_NOWARN; + page = alloc_pages(gfp, order); + if (likely(page)) + break; + if (--order < 0 || + ((PAGE_SIZE << order) < frag_info->frag_size)) + return -ENOMEM; + } + dma = dma_map_page(priv->ddev, page, 0, PAGE_SIZE << order, + PCI_DMA_FROMDEVICE); + if (dma_mapping_error(priv->ddev, dma)) { + put_page(page); + return -ENOMEM; + } + page_alloc->size = PAGE_SIZE << order; + page_alloc->page = page; + page_alloc->dma = dma; + page_alloc->offset = frag_info->frag_align; + /* Not doing get_page() for each frag is a big win + * on asymetric workloads. + */ + atomic_set(&page->_count, page_alloc->size / frag_info->frag_stride); + return 0; +} + static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv, struct mlx4_en_rx_desc *rx_desc, struct mlx4_en_rx_alloc *frags, - struct mlx4_en_rx_alloc *ring_alloc) + struct mlx4_en_rx_alloc *ring_alloc, + gfp_t gfp) { struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS]; - struct mlx4_en_frag_info *frag_info; + const struct mlx4_en_frag_info *frag_info; struct page *page; dma_addr_t dma; int i; for (i = 0; i < priv->num_frags; i++) { frag_info = &priv->frag_info[i]; - if (ring_alloc[i].offset == frag_info->last_offset) { - page = alloc_pages(GFP_ATOMIC | __GFP_COMP, - MLX4_EN_ALLOC_ORDER); - if (!page) - goto out; - dma = dma_map_page(priv->ddev, page, 0, - MLX4_EN_ALLOC_SIZE, PCI_DMA_FROMDEVICE); - if (dma_mapping_error(priv->ddev, dma)) { - put_page(page); - goto out; - } - page_alloc[i].page = page; - page_alloc[i].dma = dma; - page_alloc[i].offset = frag_info->frag_align; - } else { - page_alloc[i].page = ring_alloc[i].page; - get_page(ring_alloc[i].page); - page_alloc[i].dma = ring_alloc[i].dma; - page_alloc[i].offset = ring_alloc[i].offset + - frag_info->frag_stride; - } + page_alloc[i] = ring_alloc[i]; + page_alloc[i].offset += frag_info->frag_stride; + if (page_alloc[i].offset + frag_info->frag_stride <= ring_alloc[i].size) + continue; + if (mlx4_alloc_pages(priv, &page_alloc[i], frag_info, gfp)) + goto out; } for (i = 0; i < priv->num_frags; i++) { @@ -87,14 +112,16 @@ static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv, return 0; - out: while (i--) { frag_info = &priv->frag_info[i]; - if (ring_alloc[i].offset == frag_info->last_offset) + if (page_alloc[i].page != ring_alloc[i].page) { dma_unmap_page(priv->ddev, page_alloc[i].dma, - MLX4_EN_ALLOC_SIZE, PCI_DMA_FROMDEVICE); - put_page(page_alloc[i].page); + page_alloc[i].size, PCI_DMA_FROMDEVICE); + page = page_alloc[i].page; + atomic_set(&page->_count, 1); + put_page(page); + } } return -ENOMEM; } @@ -103,12 +130,12 @@ static void mlx4_en_free_frag(struct mlx4_en_priv *priv, struct mlx4_en_rx_alloc *frags, int i) { - struct mlx4_en_frag_info *frag_info = &priv->frag_info[i]; + const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i]; - if (frags[i].offset == frag_info->last_offset) { - dma_unmap_page(priv->ddev, frags[i].dma, MLX4_EN_ALLOC_SIZE, + if (frags[i].offset + frag_info->frag_stride > frags[i].size) + dma_unmap_page(priv->ddev, frags[i].dma, frags[i].size, PCI_DMA_FROMDEVICE); - } + if (frags[i].page) put_page(frags[i].page); } @@ -116,35 +143,28 @@ static void mlx4_en_free_frag(struct mlx4_en_priv *priv, static int mlx4_en_init_allocator(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring) { - struct mlx4_en_rx_alloc *page_alloc; int i; + struct mlx4_en_rx_alloc *page_alloc; for (i = 0; i < priv->num_frags; i++) { - page_alloc = &ring->page_alloc[i]; - page_alloc->page = alloc_pages(GFP_ATOMIC | __GFP_COMP, - MLX4_EN_ALLOC_ORDER); - if (!page_alloc->page) - goto out; + const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i]; - page_alloc->dma = dma_map_page(priv->ddev, page_alloc->page, 0, - MLX4_EN_ALLOC_SIZE, PCI_DMA_FROMDEVICE); - if (dma_mapping_error(priv->ddev, page_alloc->dma)) { - put_page(page_alloc->page); - page_alloc->page = NULL; + if (mlx4_alloc_pages(priv, &ring->page_alloc[i], + frag_info, GFP_KERNEL)) goto out; - } - page_alloc->offset = priv->frag_info[i].frag_align; - en_dbg(DRV, priv, "Initialized allocator:%d with page:%p\n", - i, page_alloc->page); } return 0; out: while (i--) { + struct page *page; + page_alloc = &ring->page_alloc[i]; dma_unmap_page(priv->ddev, page_alloc->dma, - MLX4_EN_ALLOC_SIZE, PCI_DMA_FROMDEVICE); - put_page(page_alloc->page); + page_alloc->size, PCI_DMA_FROMDEVICE); + page = page_alloc->page; + atomic_set(&page->_count, 1); + put_page(page); page_alloc->page = NULL; } return -ENOMEM; @@ -157,13 +177,18 @@ static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv, int i; for (i = 0; i < priv->num_frags; i++) { + const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i]; + page_alloc = &ring->page_alloc[i]; en_dbg(DRV, priv, "Freeing allocator:%d count:%d\n", i, page_count(page_alloc->page)); dma_unmap_page(priv->ddev, page_alloc->dma, - MLX4_EN_ALLOC_SIZE, PCI_DMA_FROMDEVICE); - put_page(page_alloc->page); + page_alloc->size, PCI_DMA_FROMDEVICE); + while (page_alloc->offset + frag_info->frag_stride < page_alloc->size) { + put_page(page_alloc->page); + page_alloc->offset += frag_info->frag_stride; + } page_alloc->page = NULL; } } @@ -194,13 +219,14 @@ static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv, } static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv, - struct mlx4_en_rx_ring *ring, int index) + struct mlx4_en_rx_ring *ring, int index, + gfp_t gfp) { struct mlx4_en_rx_desc *rx_desc = ring->buf + (index * ring->stride); struct mlx4_en_rx_alloc *frags = ring->rx_info + (index << priv->log_rx_info); - return mlx4_en_alloc_frags(priv, rx_desc, frags, ring->page_alloc); + return mlx4_en_alloc_frags(priv, rx_desc, frags, ring->page_alloc, gfp); } static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring) @@ -234,7 +260,8 @@ static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv) ring = &priv->rx_ring[ring_ind]; if (mlx4_en_prepare_rx_desc(priv, ring, - ring->actual_size)) { + ring->actual_size, + GFP_KERNEL)) { if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) { en_err(priv, "Failed to allocate " "enough rx buffers\n"); @@ -449,11 +476,11 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv, DMA_FROM_DEVICE); /* Save page reference in skb */ - get_page(frags[nr].page); __skb_frag_set_page(&skb_frags_rx[nr], frags[nr].page); skb_frag_size_set(&skb_frags_rx[nr], frag_info->frag_size); skb_frags_rx[nr].page_offset = frags[nr].offset; skb->truesize += frag_info->frag_stride; + frags[nr].page = NULL; } /* Adjust size of last fragment to match actual length */ if (nr > 0) @@ -546,7 +573,7 @@ static void mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv, int index = ring->prod & ring->size_mask; while ((u32) (ring->prod - ring->cons) < ring->actual_size) { - if (mlx4_en_prepare_rx_desc(priv, ring, index)) + if (mlx4_en_prepare_rx_desc(priv, ring, index, GFP_ATOMIC)) break; ring->prod++; index = ring->prod & ring->size_mask; @@ -656,8 +683,11 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud * - DIX Ethernet (type interpretation) * - TCP/IP (v4) * - without IP options - * - not an IP fragment */ - if (dev->features & NETIF_F_GRO) { + * - not an IP fragment + * - no LLS polling in progress + */ + if (!mlx4_en_cq_ll_polling(cq) && + (dev->features & NETIF_F_GRO)) { struct sk_buff *gro_skb = napi_get_frags(&cq->napi); if (!gro_skb) goto next; @@ -737,6 +767,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud timestamp); } + skb_mark_napi_id(skb, &cq->napi); + /* Push it up the stack */ netif_receive_skb(skb); @@ -781,8 +813,13 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget) struct mlx4_en_priv *priv = netdev_priv(dev); int done; + if (!mlx4_en_cq_lock_napi(cq)) + return budget; + done = mlx4_en_process_rx_cq(dev, cq, budget); + mlx4_en_cq_unlock_napi(cq); + /* If we used up all the quota - we're probably not done yet... */ if (done == budget) INC_PERF_COUNTER(priv->pstats.napi_quota); @@ -794,21 +831,7 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget) return done; } - -/* Calculate the last offset position that accommodates a full fragment - * (assuming fagment size = stride-align) */ -static int mlx4_en_last_alloc_offset(struct mlx4_en_priv *priv, u16 stride, u16 align) -{ - u16 res = MLX4_EN_ALLOC_SIZE % stride; - u16 offset = MLX4_EN_ALLOC_SIZE - stride - res + align; - - en_dbg(DRV, priv, "Calculated last offset for stride:%d align:%d " - "res:%d offset:%d\n", stride, align, res, offset); - return offset; -} - - -static int frag_sizes[] = { +static const int frag_sizes[] = { FRAG_SZ0, FRAG_SZ1, FRAG_SZ2, @@ -836,9 +859,6 @@ void mlx4_en_calc_rx_buf(struct net_device *dev) priv->frag_info[i].frag_stride = ALIGN(frag_sizes[i], SMP_CACHE_BYTES); } - priv->frag_info[i].last_offset = mlx4_en_last_alloc_offset( - priv, priv->frag_info[i].frag_stride, - priv->frag_info[i].frag_align); buf_size += priv->frag_info[i].frag_size; i++; } @@ -850,13 +870,13 @@ void mlx4_en_calc_rx_buf(struct net_device *dev) en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d " "num_frags:%d):\n", eff_mtu, priv->num_frags); for (i = 0; i < priv->num_frags; i++) { - en_dbg(DRV, priv, " frag:%d - size:%d prefix:%d align:%d " - "stride:%d last_offset:%d\n", i, - priv->frag_info[i].frag_size, - priv->frag_info[i].frag_prefix_size, - priv->frag_info[i].frag_align, - priv->frag_info[i].frag_stride, - priv->frag_info[i].last_offset); + en_err(priv, + " frag:%d - size:%d prefix:%d align:%d stride:%d\n", + i, + priv->frag_info[i].frag_size, + priv->frag_info[i].frag_prefix_size, + priv->frag_info[i].frag_align, + priv->frag_info[i].frag_stride); } } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 4e6877a032a8..7c492382da09 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -544,7 +544,7 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb) if (vlan_tx_tag_present(skb)) up = vlan_tx_tag_get(skb) >> VLAN_PRIO_SHIFT; - return __skb_tx_hash(dev, skb, rings_p_up) + up * rings_p_up; + return __netdev_pick_tx(dev, skb) % rings_p_up + up * rings_p_up; } static void mlx4_bf_copy(void __iomem *dst, unsigned long *src, unsigned bytecnt) diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index 6000342f9725..7e042869ef0c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -448,6 +448,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) int i; enum slave_port_gen_event gen_event; unsigned long flags; + struct mlx4_vport_state *s_info; while ((eqe = next_eqe_sw(eq, dev->caps.eqe_factor))) { /* @@ -556,7 +557,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN" " to slave: %d, port:%d\n", __func__, i, port); - mlx4_slave_event(dev, i, eqe); + s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state; + if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) + mlx4_slave_event(dev, i, eqe); } else { /* IB port */ set_and_calc_slave_port_state(dev, i, port, MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN, @@ -580,7 +583,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) for (i = 0; i < dev->num_slaves; i++) { if (i == mlx4_master_func_num(dev)) continue; - mlx4_slave_event(dev, i, eqe); + s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state; + if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) + mlx4_slave_event(dev, i, eqe); } else /* IB port */ /* port-up event will be sent to a slave when the diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 2c97901c6a6d..8873d6802c80 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -133,7 +133,8 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags) [4] = "Automatic MAC reassignment support", [5] = "Time stamping support", [6] = "VST (control vlan insertion/stripping) support", - [7] = "FSM (MAC anti-spoofing) support" + [7] = "FSM (MAC anti-spoofing) support", + [8] = "Dynamic QP updates support" }; int i; @@ -659,6 +660,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) QUERY_DEV_CAP_MAX_COUNTERS_OFFSET); MLX4_GET(field32, outbox, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET); + if (field32 & (1 << 16)) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_UPDATE_QP; if (field32 & (1 << 26)) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VLAN_CONTROL; if (field32 & (1 << 20)) @@ -830,8 +833,10 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, u8 port_type; u16 short_field; int err; + int admin_link_state; #define MLX4_VF_PORT_NO_LINK_SENSE_MASK 0xE0 +#define MLX4_PORT_LINK_UP_MASK 0x80 #define QUERY_PORT_CUR_MAX_PKEY_OFFSET 0x0c #define QUERY_PORT_CUR_MAX_GID_OFFSET 0x0e @@ -861,6 +866,12 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, /* set port type to currently operating port type */ port_type |= (dev->caps.port_type[vhcr->in_modifier] & 0x3); + admin_link_state = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.link_state; + if (IFLA_VF_LINK_STATE_ENABLE == admin_link_state) + port_type |= MLX4_PORT_LINK_UP_MASK; + else if (IFLA_VF_LINK_STATE_DISABLE == admin_link_state) + port_type &= ~MLX4_PORT_LINK_UP_MASK; + MLX4_PUT(outbox->buf, port_type, QUERY_PORT_SUPPORTED_TYPE_OFFSET); diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 264ddeb846a3..e85af922dcdc 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -842,11 +842,11 @@ static ssize_t set_port_ib_mtu(struct device *dev, return -EINVAL; } - err = sscanf(buf, "%d", &mtu); - if (err > 0) + err = kstrtoint(buf, 0, &mtu); + if (!err) ibta_mtu = int_to_ibta_mtu(mtu); - if (err <= 0 || ibta_mtu < 0) { + if (err || ibta_mtu < 0) { mlx4_err(mdev, "%s is invalid IBTA mtu\n", buf); return -EINVAL; } @@ -2080,6 +2080,11 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data) num_vfs, MLX4_MAX_NUM_VF); return -EINVAL; } + + if (num_vfs < 0) { + pr_err("num_vfs module parameter cannot be negative\n"); + return -EINVAL; + } /* * Check for BARs. */ diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index df15bb6631cc..17d9277e33ef 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -482,6 +482,7 @@ struct mlx4_vport_state { u8 default_qos; u32 tx_rate; bool spoofchk; + u32 link_state; }; struct mlx4_vf_admin_state { @@ -570,6 +571,25 @@ struct mlx4_cmd { u8 comm_toggle; }; +enum { + MLX4_VF_IMMED_VLAN_FLAG_VLAN = 1 << 0, + MLX4_VF_IMMED_VLAN_FLAG_QOS = 1 << 1, + MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE = 1 << 2, +}; +struct mlx4_vf_immed_vlan_work { + struct work_struct work; + struct mlx4_priv *priv; + int flags; + int slave; + int vlan_ix; + int orig_vlan_ix; + u8 port; + u8 qos; + u16 vlan_id; + u16 orig_vlan_id; +}; + + struct mlx4_uar_table { struct mlx4_bitmap bitmap; }; @@ -1217,4 +1237,6 @@ static inline spinlock_t *mlx4_tlock(struct mlx4_dev *dev) #define NOT_MASKED_PD_BITS 17 +void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work); + #endif /* MLX4_H */ diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index b1d7657b2bf5..35fb60e2320c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -96,13 +96,14 @@ /* Use the maximum between 16384 and a single page */ #define MLX4_EN_ALLOC_SIZE PAGE_ALIGN(16384) -#define MLX4_EN_ALLOC_ORDER get_order(MLX4_EN_ALLOC_SIZE) -/* Receive fragment sizes; we use at most 4 fragments (for 9600 byte MTU +#define MLX4_EN_ALLOC_PREFER_ORDER PAGE_ALLOC_COSTLY_ORDER + +/* Receive fragment sizes; we use at most 3 fragments (for 9600 byte MTU * and 4K allocations) */ enum { - FRAG_SZ0 = 512 - NET_IP_ALIGN, - FRAG_SZ1 = 1024, + FRAG_SZ0 = 1536 - NET_IP_ALIGN, + FRAG_SZ1 = 4096, FRAG_SZ2 = 4096, FRAG_SZ3 = MLX4_EN_ALLOC_SIZE }; @@ -234,9 +235,10 @@ struct mlx4_en_tx_desc { #define MLX4_EN_CX3_HIGH_ID 0x1005 struct mlx4_en_rx_alloc { - struct page *page; - dma_addr_t dma; - u16 offset; + struct page *page; + dma_addr_t dma; + u32 offset; + u32 size; }; struct mlx4_en_tx_ring { @@ -290,6 +292,11 @@ struct mlx4_en_rx_ring { void *rx_info; unsigned long bytes; unsigned long packets; +#ifdef CONFIG_NET_LL_RX_POLL + unsigned long yields; + unsigned long misses; + unsigned long cleaned; +#endif unsigned long csum_ok; unsigned long csum_none; int hwtstamp_rx_filter; @@ -310,6 +317,19 @@ struct mlx4_en_cq { u16 moder_cnt; struct mlx4_cqe *buf; #define MLX4_EN_OPCODE_ERROR 0x1e + +#ifdef CONFIG_NET_LL_RX_POLL + unsigned int state; +#define MLX4_EN_CQ_STATE_IDLE 0 +#define MLX4_EN_CQ_STATE_NAPI 1 /* NAPI owns this CQ */ +#define MLX4_EN_CQ_STATE_POLL 2 /* poll owns this CQ */ +#define MLX4_CQ_LOCKED (MLX4_EN_CQ_STATE_NAPI | MLX4_EN_CQ_STATE_POLL) +#define MLX4_EN_CQ_STATE_NAPI_YIELD 4 /* NAPI yielded this CQ */ +#define MLX4_EN_CQ_STATE_POLL_YIELD 8 /* poll yielded this CQ */ +#define CQ_YIELD (MLX4_EN_CQ_STATE_NAPI_YIELD | MLX4_EN_CQ_STATE_POLL_YIELD) +#define CQ_USER_PEND (MLX4_EN_CQ_STATE_POLL | MLX4_EN_CQ_STATE_POLL_YIELD) + spinlock_t poll_lock; /* protects from LLS/napi conflicts */ +#endif /* CONFIG_NET_LL_RX_POLL */ }; struct mlx4_en_port_profile { @@ -421,8 +441,6 @@ struct mlx4_en_frag_info { u16 frag_prefix_size; u16 frag_stride; u16 frag_align; - u16 last_offset; - }; #ifdef CONFIG_MLX4_EN_DCB @@ -562,6 +580,115 @@ struct mlx4_mac_entry { struct rcu_head rcu; }; +#ifdef CONFIG_NET_LL_RX_POLL +static inline void mlx4_en_cq_init_lock(struct mlx4_en_cq *cq) +{ + spin_lock_init(&cq->poll_lock); + cq->state = MLX4_EN_CQ_STATE_IDLE; +} + +/* called from the device poll rutine to get ownership of a cq */ +static inline bool mlx4_en_cq_lock_napi(struct mlx4_en_cq *cq) +{ + int rc = true; + spin_lock(&cq->poll_lock); + if (cq->state & MLX4_CQ_LOCKED) { + WARN_ON(cq->state & MLX4_EN_CQ_STATE_NAPI); + cq->state |= MLX4_EN_CQ_STATE_NAPI_YIELD; + rc = false; + } else + /* we don't care if someone yielded */ + cq->state = MLX4_EN_CQ_STATE_NAPI; + spin_unlock(&cq->poll_lock); + return rc; +} + +/* returns true is someone tried to get the cq while napi had it */ +static inline bool mlx4_en_cq_unlock_napi(struct mlx4_en_cq *cq) +{ + int rc = false; + spin_lock(&cq->poll_lock); + WARN_ON(cq->state & (MLX4_EN_CQ_STATE_POLL | + MLX4_EN_CQ_STATE_NAPI_YIELD)); + + if (cq->state & MLX4_EN_CQ_STATE_POLL_YIELD) + rc = true; + cq->state = MLX4_EN_CQ_STATE_IDLE; + spin_unlock(&cq->poll_lock); + return rc; +} + +/* called from mlx4_en_low_latency_poll() */ +static inline bool mlx4_en_cq_lock_poll(struct mlx4_en_cq *cq) +{ + int rc = true; + spin_lock_bh(&cq->poll_lock); + if ((cq->state & MLX4_CQ_LOCKED)) { + struct net_device *dev = cq->dev; + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_rx_ring *rx_ring = &priv->rx_ring[cq->ring]; + + cq->state |= MLX4_EN_CQ_STATE_POLL_YIELD; + rc = false; + rx_ring->yields++; + } else + /* preserve yield marks */ + cq->state |= MLX4_EN_CQ_STATE_POLL; + spin_unlock_bh(&cq->poll_lock); + return rc; +} + +/* returns true if someone tried to get the cq while it was locked */ +static inline bool mlx4_en_cq_unlock_poll(struct mlx4_en_cq *cq) +{ + int rc = false; + spin_lock_bh(&cq->poll_lock); + WARN_ON(cq->state & (MLX4_EN_CQ_STATE_NAPI)); + + if (cq->state & MLX4_EN_CQ_STATE_POLL_YIELD) + rc = true; + cq->state = MLX4_EN_CQ_STATE_IDLE; + spin_unlock_bh(&cq->poll_lock); + return rc; +} + +/* true if a socket is polling, even if it did not get the lock */ +static inline bool mlx4_en_cq_ll_polling(struct mlx4_en_cq *cq) +{ + WARN_ON(!(cq->state & MLX4_CQ_LOCKED)); + return cq->state & CQ_USER_PEND; +} +#else +static inline void mlx4_en_cq_init_lock(struct mlx4_en_cq *cq) +{ +} + +static inline bool mlx4_en_cq_lock_napi(struct mlx4_en_cq *cq) +{ + return true; +} + +static inline bool mlx4_en_cq_unlock_napi(struct mlx4_en_cq *cq) +{ + return false; +} + +static inline bool mlx4_en_cq_lock_poll(struct mlx4_en_cq *cq) +{ + return false; +} + +static inline bool mlx4_en_cq_unlock_poll(struct mlx4_en_cq *cq) +{ + return false; +} + +static inline bool mlx4_en_cq_ll_polling(struct mlx4_en_cq *cq) +{ + return false; +} +#endif /* CONFIG_NET_LL_RX_POLL */ + #define MLX4_EN_WOL_DO_MODIFY (1ULL << 63) void mlx4_en_update_loopback_state(struct net_device *dev, diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 1157f028a90f..f984a89c27df 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -101,6 +101,8 @@ struct res_qp { spinlock_t mcg_spl; int local_qpn; atomic_t ref_count; + u32 qpc_flags; + u8 sched_queue; }; enum res_mtt_states { @@ -355,7 +357,7 @@ static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox, static int update_vport_qp_param(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox, - u8 slave) + u8 slave, u32 qpn) { struct mlx4_qp_context *qpc = inbox->buf + 8; struct mlx4_vport_oper_state *vp_oper; @@ -369,12 +371,30 @@ static int update_vport_qp_param(struct mlx4_dev *dev, if (MLX4_VGT != vp_oper->state.default_vlan) { qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff; - if (MLX4_QP_ST_RC == qp_type) + if (MLX4_QP_ST_RC == qp_type || + (MLX4_QP_ST_UD == qp_type && + !mlx4_is_qp_reserved(dev, qpn))) return -EINVAL; + /* the reserved QPs (special, proxy, tunnel) + * do not operate over vlans + */ + if (mlx4_is_qp_reserved(dev, qpn)) + return 0; + /* force strip vlan by clear vsd */ qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN); - if (0 != vp_oper->state.default_vlan) { + + if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE && + dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) { + qpc->pri_path.vlan_control = + MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | + MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED | + MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED | + MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED | + MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED | + MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED; + } else if (0 != vp_oper->state.default_vlan) { qpc->pri_path.vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED | @@ -2114,6 +2134,8 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave, if (err) return err; qp->local_qpn = local_qpn; + qp->sched_queue = 0; + qp->qpc_flags = be32_to_cpu(qpc->flags); err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); if (err) @@ -2836,6 +2858,9 @@ int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave, { int err; struct mlx4_qp_context *qpc = inbox->buf + 8; + int qpn = vhcr->in_modifier & 0x7fffff; + struct res_qp *qp; + u8 orig_sched_queue; err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave); if (err) @@ -2844,11 +2869,30 @@ int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave, update_pkey_index(dev, slave, inbox); update_gid(dev, inbox, (u8)slave); adjust_proxy_tun_qkey(dev, vhcr, qpc); - err = update_vport_qp_param(dev, inbox, slave); + orig_sched_queue = qpc->pri_path.sched_queue; + err = update_vport_qp_param(dev, inbox, slave, qpn); if (err) return err; - return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); + err = get_res(dev, slave, qpn, RES_QP, &qp); + if (err) + return err; + if (qp->com.from_state != RES_QP_HW) { + err = -EBUSY; + goto out; + } + + err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); +out: + /* if no error, save sched queue value passed in by VF. This is + * essentially the QOS value provided by the VF. This will be useful + * if we allow dynamic changes from VST back to VGT + */ + if (!err) + qp->sched_queue = orig_sched_queue; + + put_res(dev, slave, qpn, RES_QP); + return err; } int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, @@ -3932,3 +3976,112 @@ void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave) rem_slave_xrcdns(dev, slave); mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); } + +void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work) +{ + struct mlx4_vf_immed_vlan_work *work = + container_of(_work, struct mlx4_vf_immed_vlan_work, work); + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_update_qp_context *upd_context; + struct mlx4_dev *dev = &work->priv->dev; + struct mlx4_resource_tracker *tracker = + &work->priv->mfunc.master.res_tracker; + struct list_head *qp_list = + &tracker->slave_list[work->slave].res_list[RES_QP]; + struct res_qp *qp; + struct res_qp *tmp; + u64 qp_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) | + (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) | + (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) | + (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) | + (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) | + (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED) | + (1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) | + (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE)); + + int err; + int port, errors = 0; + u8 vlan_control; + + if (mlx4_is_slave(dev)) { + mlx4_warn(dev, "Trying to update-qp in slave %d\n", + work->slave); + goto out; + } + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + goto out; + if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */ + vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | + MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED | + MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED | + MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED | + MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED | + MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED; + else if (!work->vlan_id) + vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | + MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED; + else + vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | + MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED | + MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED; + + upd_context = mailbox->buf; + upd_context->primary_addr_path_mask = cpu_to_be64(qp_mask); + upd_context->qp_context.pri_path.vlan_control = vlan_control; + upd_context->qp_context.pri_path.vlan_index = work->vlan_ix; + + spin_lock_irq(mlx4_tlock(dev)); + list_for_each_entry_safe(qp, tmp, qp_list, com.list) { + spin_unlock_irq(mlx4_tlock(dev)); + if (qp->com.owner == work->slave) { + if (qp->com.from_state != RES_QP_HW || + !qp->sched_queue || /* no INIT2RTR trans yet */ + mlx4_is_qp_reserved(dev, qp->local_qpn) || + qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) { + spin_lock_irq(mlx4_tlock(dev)); + continue; + } + port = (qp->sched_queue >> 6 & 1) + 1; + if (port != work->port) { + spin_lock_irq(mlx4_tlock(dev)); + continue; + } + upd_context->qp_context.pri_path.sched_queue = + qp->sched_queue & 0xC7; + upd_context->qp_context.pri_path.sched_queue |= + ((work->qos & 0x7) << 3); + + err = mlx4_cmd(dev, mailbox->dma, + qp->local_qpn & 0xffffff, + 0, MLX4_CMD_UPDATE_QP, + MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); + if (err) { + mlx4_info(dev, "UPDATE_QP failed for slave %d, " + "port %d, qpn %d (%d)\n", + work->slave, port, qp->local_qpn, + err); + errors++; + } + } + spin_lock_irq(mlx4_tlock(dev)); + } + spin_unlock_irq(mlx4_tlock(dev)); + mlx4_free_cmd_mailbox(dev, mailbox); + + if (errors) + mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n", + errors, work->slave, work->port); + + /* unregister previous vlan_id if needed and we had no errors + * while updating the QPs + */ + if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors && + NO_INDX != work->orig_vlan_ix) + __mlx4_unregister_vlan(&work->priv->dev, work->port, + work->orig_vlan_ix); +out: + kfree(work); + return; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig new file mode 100644 index 000000000000..21962828925a --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -0,0 +1,18 @@ +# +# Mellanox driver configuration +# + +config MLX5_CORE + tristate + depends on PCI && X86 + default n + +config MLX5_DEBUG + bool "Verbose debugging output" if (MLX5_CORE && EXPERT) + depends on MLX5_CORE + default y + ---help--- + This option causes debugging code to be compiled into the + mlx5_core driver. The output can be turned on via the + debug_mask module parameter (which can also be set after + the driver is loaded through sysfs). diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile new file mode 100644 index 000000000000..105780bb980b --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -0,0 +1,5 @@ +obj-$(CONFIG_MLX5_CORE) += mlx5_core.o + +mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ + health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \ + mad.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c new file mode 100644 index 000000000000..b215742b842f --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c @@ -0,0 +1,238 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/errno.h> +#include <linux/slab.h> +#include <linux/mm.h> +#include <linux/export.h> +#include <linux/bitmap.h> +#include <linux/dma-mapping.h> +#include <linux/vmalloc.h> +#include <linux/mlx5/driver.h> + +#include "mlx5_core.h" + +/* Handling for queue buffers -- we allocate a bunch of memory and + * register it in a memory region at HCA virtual address 0. If the + * requested size is > max_direct, we split the allocation into + * multiple pages, so we don't require too much contiguous memory. + */ + +int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct, + struct mlx5_buf *buf) +{ + dma_addr_t t; + + buf->size = size; + if (size <= max_direct) { + buf->nbufs = 1; + buf->npages = 1; + buf->page_shift = get_order(size) + PAGE_SHIFT; + buf->direct.buf = dma_zalloc_coherent(&dev->pdev->dev, + size, &t, GFP_KERNEL); + if (!buf->direct.buf) + return -ENOMEM; + + buf->direct.map = t; + + while (t & ((1 << buf->page_shift) - 1)) { + --buf->page_shift; + buf->npages *= 2; + } + } else { + int i; + + buf->direct.buf = NULL; + buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE; + buf->npages = buf->nbufs; + buf->page_shift = PAGE_SHIFT; + buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list), + GFP_KERNEL); + if (!buf->page_list) + return -ENOMEM; + + for (i = 0; i < buf->nbufs; i++) { + buf->page_list[i].buf = + dma_zalloc_coherent(&dev->pdev->dev, PAGE_SIZE, + &t, GFP_KERNEL); + if (!buf->page_list[i].buf) + goto err_free; + + buf->page_list[i].map = t; + } + + if (BITS_PER_LONG == 64) { + struct page **pages; + pages = kmalloc(sizeof(*pages) * buf->nbufs, GFP_KERNEL); + if (!pages) + goto err_free; + for (i = 0; i < buf->nbufs; i++) + pages[i] = virt_to_page(buf->page_list[i].buf); + buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL); + kfree(pages); + if (!buf->direct.buf) + goto err_free; + } + } + + return 0; + +err_free: + mlx5_buf_free(dev, buf); + + return -ENOMEM; +} +EXPORT_SYMBOL_GPL(mlx5_buf_alloc); + +void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf) +{ + int i; + + if (buf->nbufs == 1) + dma_free_coherent(&dev->pdev->dev, buf->size, buf->direct.buf, + buf->direct.map); + else { + if (BITS_PER_LONG == 64 && buf->direct.buf) + vunmap(buf->direct.buf); + + for (i = 0; i < buf->nbufs; i++) + if (buf->page_list[i].buf) + dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, + buf->page_list[i].buf, + buf->page_list[i].map); + kfree(buf->page_list); + } +} +EXPORT_SYMBOL_GPL(mlx5_buf_free); + +static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct device *dma_device) +{ + struct mlx5_db_pgdir *pgdir; + + pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL); + if (!pgdir) + return NULL; + + bitmap_fill(pgdir->bitmap, MLX5_DB_PER_PAGE); + pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE, + &pgdir->db_dma, GFP_KERNEL); + if (!pgdir->db_page) { + kfree(pgdir); + return NULL; + } + + return pgdir; +} + +static int mlx5_alloc_db_from_pgdir(struct mlx5_db_pgdir *pgdir, + struct mlx5_db *db) +{ + int offset; + int i; + + i = find_first_bit(pgdir->bitmap, MLX5_DB_PER_PAGE); + if (i >= MLX5_DB_PER_PAGE) + return -ENOMEM; + + __clear_bit(i, pgdir->bitmap); + + db->u.pgdir = pgdir; + db->index = i; + offset = db->index * L1_CACHE_BYTES; + db->db = pgdir->db_page + offset / sizeof(*pgdir->db_page); + db->dma = pgdir->db_dma + offset; + + return 0; +} + +int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db) +{ + struct mlx5_db_pgdir *pgdir; + int ret = 0; + + mutex_lock(&dev->priv.pgdir_mutex); + + list_for_each_entry(pgdir, &dev->priv.pgdir_list, list) + if (!mlx5_alloc_db_from_pgdir(pgdir, db)) + goto out; + + pgdir = mlx5_alloc_db_pgdir(&(dev->pdev->dev)); + if (!pgdir) { + ret = -ENOMEM; + goto out; + } + + list_add(&pgdir->list, &dev->priv.pgdir_list); + + /* This should never fail -- we just allocated an empty page: */ + WARN_ON(mlx5_alloc_db_from_pgdir(pgdir, db)); + +out: + mutex_unlock(&dev->priv.pgdir_mutex); + + return ret; +} +EXPORT_SYMBOL_GPL(mlx5_db_alloc); + +void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db) +{ + mutex_lock(&dev->priv.pgdir_mutex); + + __set_bit(db->index, db->u.pgdir->bitmap); + + if (bitmap_full(db->u.pgdir->bitmap, MLX5_DB_PER_PAGE)) { + dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE, + db->u.pgdir->db_page, db->u.pgdir->db_dma); + list_del(&db->u.pgdir->list); + kfree(db->u.pgdir); + } + + mutex_unlock(&dev->priv.pgdir_mutex); +} +EXPORT_SYMBOL_GPL(mlx5_db_free); + + +void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas) +{ + u64 addr; + int i; + + for (i = 0; i < buf->npages; i++) { + if (buf->nbufs == 1) + addr = buf->direct.map + (i << buf->page_shift); + else + addr = buf->page_list[i].map; + + pas[i] = cpu_to_be64(addr); + } +} +EXPORT_SYMBOL_GPL(mlx5_fill_page_array); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c new file mode 100644 index 000000000000..205753a04cfc --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -0,0 +1,1515 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <asm-generic/kmap_types.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/errno.h> +#include <linux/pci.h> +#include <linux/dma-mapping.h> +#include <linux/slab.h> +#include <linux/delay.h> +#include <linux/random.h> +#include <linux/io-mapping.h> +#include <linux/mlx5/driver.h> +#include <linux/debugfs.h> + +#include "mlx5_core.h" + +enum { + CMD_IF_REV = 3, +}; + +enum { + CMD_MODE_POLLING, + CMD_MODE_EVENTS +}; + +enum { + NUM_LONG_LISTS = 2, + NUM_MED_LISTS = 64, + LONG_LIST_SIZE = (2ULL * 1024 * 1024 * 1024 / PAGE_SIZE) * 8 + 16 + + MLX5_CMD_DATA_BLOCK_SIZE, + MED_LIST_SIZE = 16 + MLX5_CMD_DATA_BLOCK_SIZE, +}; + +enum { + MLX5_CMD_DELIVERY_STAT_OK = 0x0, + MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR = 0x1, + MLX5_CMD_DELIVERY_STAT_TOK_ERR = 0x2, + MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR = 0x3, + MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR = 0x4, + MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR = 0x5, + MLX5_CMD_DELIVERY_STAT_FW_ERR = 0x6, + MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR = 0x7, + MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR = 0x8, + MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR = 0x9, + MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10, +}; + +enum { + MLX5_CMD_STAT_OK = 0x0, + MLX5_CMD_STAT_INT_ERR = 0x1, + MLX5_CMD_STAT_BAD_OP_ERR = 0x2, + MLX5_CMD_STAT_BAD_PARAM_ERR = 0x3, + MLX5_CMD_STAT_BAD_SYS_STATE_ERR = 0x4, + MLX5_CMD_STAT_BAD_RES_ERR = 0x5, + MLX5_CMD_STAT_RES_BUSY = 0x6, + MLX5_CMD_STAT_LIM_ERR = 0x8, + MLX5_CMD_STAT_BAD_RES_STATE_ERR = 0x9, + MLX5_CMD_STAT_IX_ERR = 0xa, + MLX5_CMD_STAT_NO_RES_ERR = 0xf, + MLX5_CMD_STAT_BAD_INP_LEN_ERR = 0x50, + MLX5_CMD_STAT_BAD_OUTP_LEN_ERR = 0x51, + MLX5_CMD_STAT_BAD_QP_STATE_ERR = 0x10, + MLX5_CMD_STAT_BAD_PKT_ERR = 0x30, + MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR = 0x40, +}; + +static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd, + struct mlx5_cmd_msg *in, + struct mlx5_cmd_msg *out, + mlx5_cmd_cbk_t cbk, + void *context, int page_queue) +{ + gfp_t alloc_flags = cbk ? GFP_ATOMIC : GFP_KERNEL; + struct mlx5_cmd_work_ent *ent; + + ent = kzalloc(sizeof(*ent), alloc_flags); + if (!ent) + return ERR_PTR(-ENOMEM); + + ent->in = in; + ent->out = out; + ent->callback = cbk; + ent->context = context; + ent->cmd = cmd; + ent->page_queue = page_queue; + + return ent; +} + +static u8 alloc_token(struct mlx5_cmd *cmd) +{ + u8 token; + + spin_lock(&cmd->token_lock); + token = cmd->token++ % 255 + 1; + spin_unlock(&cmd->token_lock); + + return token; +} + +static int alloc_ent(struct mlx5_cmd *cmd) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&cmd->alloc_lock, flags); + ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds); + if (ret < cmd->max_reg_cmds) + clear_bit(ret, &cmd->bitmask); + spin_unlock_irqrestore(&cmd->alloc_lock, flags); + + return ret < cmd->max_reg_cmds ? ret : -ENOMEM; +} + +static void free_ent(struct mlx5_cmd *cmd, int idx) +{ + unsigned long flags; + + spin_lock_irqsave(&cmd->alloc_lock, flags); + set_bit(idx, &cmd->bitmask); + spin_unlock_irqrestore(&cmd->alloc_lock, flags); +} + +static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx) +{ + return cmd->cmd_buf + (idx << cmd->log_stride); +} + +static u8 xor8_buf(void *buf, int len) +{ + u8 *ptr = buf; + u8 sum = 0; + int i; + + for (i = 0; i < len; i++) + sum ^= ptr[i]; + + return sum; +} + +static int verify_block_sig(struct mlx5_cmd_prot_block *block) +{ + if (xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 1) != 0xff) + return -EINVAL; + + if (xor8_buf(block, sizeof(*block)) != 0xff) + return -EINVAL; + + return 0; +} + +static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token) +{ + block->token = token; + block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 2); + block->sig = ~xor8_buf(block, sizeof(*block) - 1); +} + +static void calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token) +{ + struct mlx5_cmd_mailbox *next = msg->next; + + while (next) { + calc_block_sig(next->buf, token); + next = next->next; + } +} + +static void set_signature(struct mlx5_cmd_work_ent *ent) +{ + ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay)); + calc_chain_sig(ent->in, ent->token); + calc_chain_sig(ent->out, ent->token); +} + +static void poll_timeout(struct mlx5_cmd_work_ent *ent) +{ + unsigned long poll_end = jiffies + msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC + 1000); + u8 own; + + do { + own = ent->lay->status_own; + if (!(own & CMD_OWNER_HW)) { + ent->ret = 0; + return; + } + usleep_range(5000, 10000); + } while (time_before(jiffies, poll_end)); + + ent->ret = -ETIMEDOUT; +} + +static void free_cmd(struct mlx5_cmd_work_ent *ent) +{ + kfree(ent); +} + + +static int verify_signature(struct mlx5_cmd_work_ent *ent) +{ + struct mlx5_cmd_mailbox *next = ent->out->next; + int err; + u8 sig; + + sig = xor8_buf(ent->lay, sizeof(*ent->lay)); + if (sig != 0xff) + return -EINVAL; + + while (next) { + err = verify_block_sig(next->buf); + if (err) + return err; + + next = next->next; + } + + return 0; +} + +static void dump_buf(void *buf, int size, int data_only, int offset) +{ + __be32 *p = buf; + int i; + + for (i = 0; i < size; i += 16) { + pr_debug("%03x: %08x %08x %08x %08x\n", offset, be32_to_cpu(p[0]), + be32_to_cpu(p[1]), be32_to_cpu(p[2]), + be32_to_cpu(p[3])); + p += 4; + offset += 16; + } + if (!data_only) + pr_debug("\n"); +} + +const char *mlx5_command_str(int command) +{ + switch (command) { + case MLX5_CMD_OP_QUERY_HCA_CAP: + return "QUERY_HCA_CAP"; + + case MLX5_CMD_OP_SET_HCA_CAP: + return "SET_HCA_CAP"; + + case MLX5_CMD_OP_QUERY_ADAPTER: + return "QUERY_ADAPTER"; + + case MLX5_CMD_OP_INIT_HCA: + return "INIT_HCA"; + + case MLX5_CMD_OP_TEARDOWN_HCA: + return "TEARDOWN_HCA"; + + case MLX5_CMD_OP_QUERY_PAGES: + return "QUERY_PAGES"; + + case MLX5_CMD_OP_MANAGE_PAGES: + return "MANAGE_PAGES"; + + case MLX5_CMD_OP_CREATE_MKEY: + return "CREATE_MKEY"; + + case MLX5_CMD_OP_QUERY_MKEY: + return "QUERY_MKEY"; + + case MLX5_CMD_OP_DESTROY_MKEY: + return "DESTROY_MKEY"; + + case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS: + return "QUERY_SPECIAL_CONTEXTS"; + + case MLX5_CMD_OP_CREATE_EQ: + return "CREATE_EQ"; + + case MLX5_CMD_OP_DESTROY_EQ: + return "DESTROY_EQ"; + + case MLX5_CMD_OP_QUERY_EQ: + return "QUERY_EQ"; + + case MLX5_CMD_OP_CREATE_CQ: + return "CREATE_CQ"; + + case MLX5_CMD_OP_DESTROY_CQ: + return "DESTROY_CQ"; + + case MLX5_CMD_OP_QUERY_CQ: + return "QUERY_CQ"; + + case MLX5_CMD_OP_MODIFY_CQ: + return "MODIFY_CQ"; + + case MLX5_CMD_OP_CREATE_QP: + return "CREATE_QP"; + + case MLX5_CMD_OP_DESTROY_QP: + return "DESTROY_QP"; + + case MLX5_CMD_OP_RST2INIT_QP: + return "RST2INIT_QP"; + + case MLX5_CMD_OP_INIT2RTR_QP: + return "INIT2RTR_QP"; + + case MLX5_CMD_OP_RTR2RTS_QP: + return "RTR2RTS_QP"; + + case MLX5_CMD_OP_RTS2RTS_QP: + return "RTS2RTS_QP"; + + case MLX5_CMD_OP_SQERR2RTS_QP: + return "SQERR2RTS_QP"; + + case MLX5_CMD_OP_2ERR_QP: + return "2ERR_QP"; + + case MLX5_CMD_OP_RTS2SQD_QP: + return "RTS2SQD_QP"; + + case MLX5_CMD_OP_SQD2RTS_QP: + return "SQD2RTS_QP"; + + case MLX5_CMD_OP_2RST_QP: + return "2RST_QP"; + + case MLX5_CMD_OP_QUERY_QP: + return "QUERY_QP"; + + case MLX5_CMD_OP_CONF_SQP: + return "CONF_SQP"; + + case MLX5_CMD_OP_MAD_IFC: + return "MAD_IFC"; + + case MLX5_CMD_OP_INIT2INIT_QP: + return "INIT2INIT_QP"; + + case MLX5_CMD_OP_SUSPEND_QP: + return "SUSPEND_QP"; + + case MLX5_CMD_OP_UNSUSPEND_QP: + return "UNSUSPEND_QP"; + + case MLX5_CMD_OP_SQD2SQD_QP: + return "SQD2SQD_QP"; + + case MLX5_CMD_OP_ALLOC_QP_COUNTER_SET: + return "ALLOC_QP_COUNTER_SET"; + + case MLX5_CMD_OP_DEALLOC_QP_COUNTER_SET: + return "DEALLOC_QP_COUNTER_SET"; + + case MLX5_CMD_OP_QUERY_QP_COUNTER_SET: + return "QUERY_QP_COUNTER_SET"; + + case MLX5_CMD_OP_CREATE_PSV: + return "CREATE_PSV"; + + case MLX5_CMD_OP_DESTROY_PSV: + return "DESTROY_PSV"; + + case MLX5_CMD_OP_QUERY_PSV: + return "QUERY_PSV"; + + case MLX5_CMD_OP_QUERY_SIG_RULE_TABLE: + return "QUERY_SIG_RULE_TABLE"; + + case MLX5_CMD_OP_QUERY_BLOCK_SIZE_TABLE: + return "QUERY_BLOCK_SIZE_TABLE"; + + case MLX5_CMD_OP_CREATE_SRQ: + return "CREATE_SRQ"; + + case MLX5_CMD_OP_DESTROY_SRQ: + return "DESTROY_SRQ"; + + case MLX5_CMD_OP_QUERY_SRQ: + return "QUERY_SRQ"; + + case MLX5_CMD_OP_ARM_RQ: + return "ARM_RQ"; + + case MLX5_CMD_OP_RESIZE_SRQ: + return "RESIZE_SRQ"; + + case MLX5_CMD_OP_ALLOC_PD: + return "ALLOC_PD"; + + case MLX5_CMD_OP_DEALLOC_PD: + return "DEALLOC_PD"; + + case MLX5_CMD_OP_ALLOC_UAR: + return "ALLOC_UAR"; + + case MLX5_CMD_OP_DEALLOC_UAR: + return "DEALLOC_UAR"; + + case MLX5_CMD_OP_ATTACH_TO_MCG: + return "ATTACH_TO_MCG"; + + case MLX5_CMD_OP_DETACH_FROM_MCG: + return "DETACH_FROM_MCG"; + + case MLX5_CMD_OP_ALLOC_XRCD: + return "ALLOC_XRCD"; + + case MLX5_CMD_OP_DEALLOC_XRCD: + return "DEALLOC_XRCD"; + + case MLX5_CMD_OP_ACCESS_REG: + return "MLX5_CMD_OP_ACCESS_REG"; + + default: return "unknown command opcode"; + } +} + +static void dump_command(struct mlx5_core_dev *dev, + struct mlx5_cmd_work_ent *ent, int input) +{ + u16 op = be16_to_cpu(((struct mlx5_inbox_hdr *)(ent->lay->in))->opcode); + struct mlx5_cmd_msg *msg = input ? ent->in : ent->out; + struct mlx5_cmd_mailbox *next = msg->next; + int data_only; + int offset = 0; + int dump_len; + + data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA)); + + if (data_only) + mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_DATA, + "dump command data %s(0x%x) %s\n", + mlx5_command_str(op), op, + input ? "INPUT" : "OUTPUT"); + else + mlx5_core_dbg(dev, "dump command %s(0x%x) %s\n", + mlx5_command_str(op), op, + input ? "INPUT" : "OUTPUT"); + + if (data_only) { + if (input) { + dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset); + offset += sizeof(ent->lay->in); + } else { + dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset); + offset += sizeof(ent->lay->out); + } + } else { + dump_buf(ent->lay, sizeof(*ent->lay), 0, offset); + offset += sizeof(*ent->lay); + } + + while (next && offset < msg->len) { + if (data_only) { + dump_len = min_t(int, MLX5_CMD_DATA_BLOCK_SIZE, msg->len - offset); + dump_buf(next->buf, dump_len, 1, offset); + offset += MLX5_CMD_DATA_BLOCK_SIZE; + } else { + mlx5_core_dbg(dev, "command block:\n"); + dump_buf(next->buf, sizeof(struct mlx5_cmd_prot_block), 0, offset); + offset += sizeof(struct mlx5_cmd_prot_block); + } + next = next->next; + } + + if (data_only) + pr_debug("\n"); +} + +static void cmd_work_handler(struct work_struct *work) +{ + struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work); + struct mlx5_cmd *cmd = ent->cmd; + struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd); + struct mlx5_cmd_layout *lay; + struct semaphore *sem; + + sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; + down(sem); + if (!ent->page_queue) { + ent->idx = alloc_ent(cmd); + if (ent->idx < 0) { + mlx5_core_err(dev, "failed to allocate command entry\n"); + up(sem); + return; + } + } else { + ent->idx = cmd->max_reg_cmds; + } + + ent->token = alloc_token(cmd); + cmd->ent_arr[ent->idx] = ent; + lay = get_inst(cmd, ent->idx); + ent->lay = lay; + memset(lay, 0, sizeof(*lay)); + memcpy(lay->in, ent->in->first.data, sizeof(lay->in)); + if (ent->in->next) + lay->in_ptr = cpu_to_be64(ent->in->next->dma); + lay->inlen = cpu_to_be32(ent->in->len); + if (ent->out->next) + lay->out_ptr = cpu_to_be64(ent->out->next->dma); + lay->outlen = cpu_to_be32(ent->out->len); + lay->type = MLX5_PCI_CMD_XPORT; + lay->token = ent->token; + lay->status_own = CMD_OWNER_HW; + if (!cmd->checksum_disabled) + set_signature(ent); + dump_command(dev, ent, 1); + ktime_get_ts(&ent->ts1); + + /* ring doorbell after the descriptor is valid */ + wmb(); + iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell); + mlx5_core_dbg(dev, "write 0x%x to command doorbell\n", 1 << ent->idx); + mmiowb(); + if (cmd->mode == CMD_MODE_POLLING) { + poll_timeout(ent); + /* make sure we read the descriptor after ownership is SW */ + rmb(); + mlx5_cmd_comp_handler(dev, 1UL << ent->idx); + } +} + +static const char *deliv_status_to_str(u8 status) +{ + switch (status) { + case MLX5_CMD_DELIVERY_STAT_OK: + return "no errors"; + case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR: + return "signature error"; + case MLX5_CMD_DELIVERY_STAT_TOK_ERR: + return "token error"; + case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR: + return "bad block number"; + case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR: + return "output pointer not aligned to block size"; + case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR: + return "input pointer not aligned to block size"; + case MLX5_CMD_DELIVERY_STAT_FW_ERR: + return "firmware internal error"; + case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR: + return "command input length error"; + case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR: + return "command ouput length error"; + case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR: + return "reserved fields not cleared"; + case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR: + return "bad command descriptor type"; + default: + return "unknown status code"; + } +} + +static u16 msg_to_opcode(struct mlx5_cmd_msg *in) +{ + struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data); + + return be16_to_cpu(hdr->opcode); +} + +static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent) +{ + unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC); + struct mlx5_cmd *cmd = &dev->cmd; + int err; + + if (cmd->mode == CMD_MODE_POLLING) { + wait_for_completion(&ent->done); + err = ent->ret; + } else { + if (!wait_for_completion_timeout(&ent->done, timeout)) + err = -ETIMEDOUT; + else + err = 0; + } + if (err == -ETIMEDOUT) { + mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n", + mlx5_command_str(msg_to_opcode(ent->in)), + msg_to_opcode(ent->in)); + } + mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n", err, + deliv_status_to_str(ent->status), ent->status); + + return err; +} + +/* Notes: + * 1. Callback functions may not sleep + * 2. page queue commands do not support asynchrous completion + */ +static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, + struct mlx5_cmd_msg *out, mlx5_cmd_cbk_t callback, + void *context, int page_queue, u8 *status) +{ + struct mlx5_cmd *cmd = &dev->cmd; + struct mlx5_cmd_work_ent *ent; + ktime_t t1, t2, delta; + struct mlx5_cmd_stats *stats; + int err = 0; + s64 ds; + u16 op; + + if (callback && page_queue) + return -EINVAL; + + ent = alloc_cmd(cmd, in, out, callback, context, page_queue); + if (IS_ERR(ent)) + return PTR_ERR(ent); + + if (!callback) + init_completion(&ent->done); + + INIT_WORK(&ent->work, cmd_work_handler); + if (page_queue) { + cmd_work_handler(&ent->work); + } else if (!queue_work(cmd->wq, &ent->work)) { + mlx5_core_warn(dev, "failed to queue work\n"); + err = -ENOMEM; + goto out_free; + } + + if (!callback) { + err = wait_func(dev, ent); + if (err == -ETIMEDOUT) + goto out; + + t1 = timespec_to_ktime(ent->ts1); + t2 = timespec_to_ktime(ent->ts2); + delta = ktime_sub(t2, t1); + ds = ktime_to_ns(delta); + op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode); + if (op < ARRAY_SIZE(cmd->stats)) { + stats = &cmd->stats[op]; + spin_lock(&stats->lock); + stats->sum += ds; + ++stats->n; + spin_unlock(&stats->lock); + } + mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME, + "fw exec time for %s is %lld nsec\n", + mlx5_command_str(op), ds); + *status = ent->status; + free_cmd(ent); + } + + return err; + +out_free: + free_cmd(ent); +out: + return err; +} + +static ssize_t dbg_write(struct file *filp, const char __user *buf, + size_t count, loff_t *pos) +{ + struct mlx5_core_dev *dev = filp->private_data; + struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; + char lbuf[3]; + int err; + + if (!dbg->in_msg || !dbg->out_msg) + return -ENOMEM; + + if (copy_from_user(lbuf, buf, sizeof(lbuf))) + return -EFAULT; + + lbuf[sizeof(lbuf) - 1] = 0; + + if (strcmp(lbuf, "go")) + return -EINVAL; + + err = mlx5_cmd_exec(dev, dbg->in_msg, dbg->inlen, dbg->out_msg, dbg->outlen); + + return err ? err : count; +} + + +static const struct file_operations fops = { + .owner = THIS_MODULE, + .open = simple_open, + .write = dbg_write, +}; + +static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size) +{ + struct mlx5_cmd_prot_block *block; + struct mlx5_cmd_mailbox *next; + int copy; + + if (!to || !from) + return -ENOMEM; + + copy = min_t(int, size, sizeof(to->first.data)); + memcpy(to->first.data, from, copy); + size -= copy; + from += copy; + + next = to->next; + while (size) { + if (!next) { + /* this is a BUG */ + return -ENOMEM; + } + + copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE); + block = next->buf; + memcpy(block->data, from, copy); + from += copy; + size -= copy; + next = next->next; + } + + return 0; +} + +static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size) +{ + struct mlx5_cmd_prot_block *block; + struct mlx5_cmd_mailbox *next; + int copy; + + if (!to || !from) + return -ENOMEM; + + copy = min_t(int, size, sizeof(from->first.data)); + memcpy(to, from->first.data, copy); + size -= copy; + to += copy; + + next = from->next; + while (size) { + if (!next) { + /* this is a BUG */ + return -ENOMEM; + } + + copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE); + block = next->buf; + if (xor8_buf(block, sizeof(*block)) != 0xff) + return -EINVAL; + + memcpy(to, block->data, copy); + to += copy; + size -= copy; + next = next->next; + } + + return 0; +} + +static struct mlx5_cmd_mailbox *alloc_cmd_box(struct mlx5_core_dev *dev, + gfp_t flags) +{ + struct mlx5_cmd_mailbox *mailbox; + + mailbox = kmalloc(sizeof(*mailbox), flags); + if (!mailbox) + return ERR_PTR(-ENOMEM); + + mailbox->buf = pci_pool_alloc(dev->cmd.pool, flags, + &mailbox->dma); + if (!mailbox->buf) { + mlx5_core_dbg(dev, "failed allocation\n"); + kfree(mailbox); + return ERR_PTR(-ENOMEM); + } + memset(mailbox->buf, 0, sizeof(struct mlx5_cmd_prot_block)); + mailbox->next = NULL; + + return mailbox; +} + +static void free_cmd_box(struct mlx5_core_dev *dev, + struct mlx5_cmd_mailbox *mailbox) +{ + pci_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma); + kfree(mailbox); +} + +static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev, + gfp_t flags, int size) +{ + struct mlx5_cmd_mailbox *tmp, *head = NULL; + struct mlx5_cmd_prot_block *block; + struct mlx5_cmd_msg *msg; + int blen; + int err; + int n; + int i; + + msg = kzalloc(sizeof(*msg), GFP_KERNEL); + if (!msg) + return ERR_PTR(-ENOMEM); + + blen = size - min_t(int, sizeof(msg->first.data), size); + n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1) / MLX5_CMD_DATA_BLOCK_SIZE; + + for (i = 0; i < n; i++) { + tmp = alloc_cmd_box(dev, flags); + if (IS_ERR(tmp)) { + mlx5_core_warn(dev, "failed allocating block\n"); + err = PTR_ERR(tmp); + goto err_alloc; + } + + block = tmp->buf; + tmp->next = head; + block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0); + block->block_num = cpu_to_be32(n - i - 1); + head = tmp; + } + msg->next = head; + msg->len = size; + return msg; + +err_alloc: + while (head) { + tmp = head->next; + free_cmd_box(dev, head); + head = tmp; + } + kfree(msg); + + return ERR_PTR(err); +} + +static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev, + struct mlx5_cmd_msg *msg) +{ + struct mlx5_cmd_mailbox *head = msg->next; + struct mlx5_cmd_mailbox *next; + + while (head) { + next = head->next; + free_cmd_box(dev, head); + head = next; + } + kfree(msg); +} + +static ssize_t data_write(struct file *filp, const char __user *buf, + size_t count, loff_t *pos) +{ + struct mlx5_core_dev *dev = filp->private_data; + struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; + void *ptr; + int err; + + if (*pos != 0) + return -EINVAL; + + kfree(dbg->in_msg); + dbg->in_msg = NULL; + dbg->inlen = 0; + + ptr = kzalloc(count, GFP_KERNEL); + if (!ptr) + return -ENOMEM; + + if (copy_from_user(ptr, buf, count)) { + err = -EFAULT; + goto out; + } + dbg->in_msg = ptr; + dbg->inlen = count; + + *pos = count; + + return count; + +out: + kfree(ptr); + return err; +} + +static ssize_t data_read(struct file *filp, char __user *buf, size_t count, + loff_t *pos) +{ + struct mlx5_core_dev *dev = filp->private_data; + struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; + int copy; + + if (*pos) + return 0; + + if (!dbg->out_msg) + return -ENOMEM; + + copy = min_t(int, count, dbg->outlen); + if (copy_to_user(buf, dbg->out_msg, copy)) + return -EFAULT; + + *pos += copy; + + return copy; +} + +static const struct file_operations dfops = { + .owner = THIS_MODULE, + .open = simple_open, + .write = data_write, + .read = data_read, +}; + +static ssize_t outlen_read(struct file *filp, char __user *buf, size_t count, + loff_t *pos) +{ + struct mlx5_core_dev *dev = filp->private_data; + struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; + char outlen[8]; + int err; + + if (*pos) + return 0; + + err = snprintf(outlen, sizeof(outlen), "%d", dbg->outlen); + if (err < 0) + return err; + + if (copy_to_user(buf, &outlen, err)) + return -EFAULT; + + *pos += err; + + return err; +} + +static ssize_t outlen_write(struct file *filp, const char __user *buf, + size_t count, loff_t *pos) +{ + struct mlx5_core_dev *dev = filp->private_data; + struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; + char outlen_str[8]; + int outlen; + void *ptr; + int err; + + if (*pos != 0 || count > 6) + return -EINVAL; + + kfree(dbg->out_msg); + dbg->out_msg = NULL; + dbg->outlen = 0; + + if (copy_from_user(outlen_str, buf, count)) + return -EFAULT; + + outlen_str[7] = 0; + + err = sscanf(outlen_str, "%d", &outlen); + if (err < 0) + return err; + + ptr = kzalloc(outlen, GFP_KERNEL); + if (!ptr) + return -ENOMEM; + + dbg->out_msg = ptr; + dbg->outlen = outlen; + + *pos = count; + + return count; +} + +static const struct file_operations olfops = { + .owner = THIS_MODULE, + .open = simple_open, + .write = outlen_write, + .read = outlen_read, +}; + +static void set_wqname(struct mlx5_core_dev *dev) +{ + struct mlx5_cmd *cmd = &dev->cmd; + + snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s", + dev_name(&dev->pdev->dev)); +} + +static void clean_debug_files(struct mlx5_core_dev *dev) +{ + struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; + + if (!mlx5_debugfs_root) + return; + + mlx5_cmdif_debugfs_cleanup(dev); + debugfs_remove_recursive(dbg->dbg_root); +} + +static int create_debugfs_files(struct mlx5_core_dev *dev) +{ + struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; + int err = -ENOMEM; + + if (!mlx5_debugfs_root) + return 0; + + dbg->dbg_root = debugfs_create_dir("cmd", dev->priv.dbg_root); + if (!dbg->dbg_root) + return err; + + dbg->dbg_in = debugfs_create_file("in", 0400, dbg->dbg_root, + dev, &dfops); + if (!dbg->dbg_in) + goto err_dbg; + + dbg->dbg_out = debugfs_create_file("out", 0200, dbg->dbg_root, + dev, &dfops); + if (!dbg->dbg_out) + goto err_dbg; + + dbg->dbg_outlen = debugfs_create_file("out_len", 0600, dbg->dbg_root, + dev, &olfops); + if (!dbg->dbg_outlen) + goto err_dbg; + + dbg->dbg_status = debugfs_create_u8("status", 0600, dbg->dbg_root, + &dbg->status); + if (!dbg->dbg_status) + goto err_dbg; + + dbg->dbg_run = debugfs_create_file("run", 0200, dbg->dbg_root, dev, &fops); + if (!dbg->dbg_run) + goto err_dbg; + + mlx5_cmdif_debugfs_init(dev); + + return 0; + +err_dbg: + clean_debug_files(dev); + return err; +} + +void mlx5_cmd_use_events(struct mlx5_core_dev *dev) +{ + struct mlx5_cmd *cmd = &dev->cmd; + int i; + + for (i = 0; i < cmd->max_reg_cmds; i++) + down(&cmd->sem); + + down(&cmd->pages_sem); + + flush_workqueue(cmd->wq); + + cmd->mode = CMD_MODE_EVENTS; + + up(&cmd->pages_sem); + for (i = 0; i < cmd->max_reg_cmds; i++) + up(&cmd->sem); +} + +void mlx5_cmd_use_polling(struct mlx5_core_dev *dev) +{ + struct mlx5_cmd *cmd = &dev->cmd; + int i; + + for (i = 0; i < cmd->max_reg_cmds; i++) + down(&cmd->sem); + + down(&cmd->pages_sem); + + flush_workqueue(cmd->wq); + cmd->mode = CMD_MODE_POLLING; + + up(&cmd->pages_sem); + for (i = 0; i < cmd->max_reg_cmds; i++) + up(&cmd->sem); +} + +void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector) +{ + struct mlx5_cmd *cmd = &dev->cmd; + struct mlx5_cmd_work_ent *ent; + mlx5_cmd_cbk_t callback; + void *context; + int err; + int i; + + for (i = 0; i < (1 << cmd->log_sz); i++) { + if (test_bit(i, &vector)) { + ent = cmd->ent_arr[i]; + ktime_get_ts(&ent->ts2); + memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out)); + dump_command(dev, ent, 0); + if (!ent->ret) { + if (!cmd->checksum_disabled) + ent->ret = verify_signature(ent); + else + ent->ret = 0; + ent->status = ent->lay->status_own >> 1; + mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n", + ent->ret, deliv_status_to_str(ent->status), ent->status); + } + free_ent(cmd, ent->idx); + if (ent->callback) { + callback = ent->callback; + context = ent->context; + err = ent->ret; + free_cmd(ent); + callback(err, context); + } else { + complete(&ent->done); + } + if (ent->page_queue) + up(&cmd->pages_sem); + else + up(&cmd->sem); + } + } +} +EXPORT_SYMBOL(mlx5_cmd_comp_handler); + +static int status_to_err(u8 status) +{ + return status ? -1 : 0; /* TBD more meaningful codes */ +} + +static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size) +{ + struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM); + struct mlx5_cmd *cmd = &dev->cmd; + struct cache_ent *ent = NULL; + + if (in_size > MED_LIST_SIZE && in_size <= LONG_LIST_SIZE) + ent = &cmd->cache.large; + else if (in_size > 16 && in_size <= MED_LIST_SIZE) + ent = &cmd->cache.med; + + if (ent) { + spin_lock(&ent->lock); + if (!list_empty(&ent->head)) { + msg = list_entry(ent->head.next, typeof(*msg), list); + /* For cached lists, we must explicitly state what is + * the real size + */ + msg->len = in_size; + list_del(&msg->list); + } + spin_unlock(&ent->lock); + } + + if (IS_ERR(msg)) + msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, in_size); + + return msg; +} + +static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg) +{ + if (msg->cache) { + spin_lock(&msg->cache->lock); + list_add_tail(&msg->list, &msg->cache->head); + spin_unlock(&msg->cache->lock); + } else { + mlx5_free_cmd_msg(dev, msg); + } +} + +static int is_manage_pages(struct mlx5_inbox_hdr *in) +{ + return be16_to_cpu(in->opcode) == MLX5_CMD_OP_MANAGE_PAGES; +} + +int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, + int out_size) +{ + struct mlx5_cmd_msg *inb; + struct mlx5_cmd_msg *outb; + int pages_queue; + int err; + u8 status = 0; + + pages_queue = is_manage_pages(in); + + inb = alloc_msg(dev, in_size); + if (IS_ERR(inb)) { + err = PTR_ERR(inb); + return err; + } + + err = mlx5_copy_to_msg(inb, in, in_size); + if (err) { + mlx5_core_warn(dev, "err %d\n", err); + goto out_in; + } + + outb = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, out_size); + if (IS_ERR(outb)) { + err = PTR_ERR(outb); + goto out_in; + } + + err = mlx5_cmd_invoke(dev, inb, outb, NULL, NULL, pages_queue, &status); + if (err) + goto out_out; + + mlx5_core_dbg(dev, "err %d, status %d\n", err, status); + if (status) { + err = status_to_err(status); + goto out_out; + } + + err = mlx5_copy_from_msg(out, outb, out_size); + +out_out: + mlx5_free_cmd_msg(dev, outb); + +out_in: + free_msg(dev, inb); + return err; +} +EXPORT_SYMBOL(mlx5_cmd_exec); + +static void destroy_msg_cache(struct mlx5_core_dev *dev) +{ + struct mlx5_cmd *cmd = &dev->cmd; + struct mlx5_cmd_msg *msg; + struct mlx5_cmd_msg *n; + + list_for_each_entry_safe(msg, n, &cmd->cache.large.head, list) { + list_del(&msg->list); + mlx5_free_cmd_msg(dev, msg); + } + + list_for_each_entry_safe(msg, n, &cmd->cache.med.head, list) { + list_del(&msg->list); + mlx5_free_cmd_msg(dev, msg); + } +} + +static int create_msg_cache(struct mlx5_core_dev *dev) +{ + struct mlx5_cmd *cmd = &dev->cmd; + struct mlx5_cmd_msg *msg; + int err; + int i; + + spin_lock_init(&cmd->cache.large.lock); + INIT_LIST_HEAD(&cmd->cache.large.head); + spin_lock_init(&cmd->cache.med.lock); + INIT_LIST_HEAD(&cmd->cache.med.head); + + for (i = 0; i < NUM_LONG_LISTS; i++) { + msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE); + if (IS_ERR(msg)) { + err = PTR_ERR(msg); + goto ex_err; + } + msg->cache = &cmd->cache.large; + list_add_tail(&msg->list, &cmd->cache.large.head); + } + + for (i = 0; i < NUM_MED_LISTS; i++) { + msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE); + if (IS_ERR(msg)) { + err = PTR_ERR(msg); + goto ex_err; + } + msg->cache = &cmd->cache.med; + list_add_tail(&msg->list, &cmd->cache.med.head); + } + + return 0; + +ex_err: + destroy_msg_cache(dev); + return err; +} + +int mlx5_cmd_init(struct mlx5_core_dev *dev) +{ + int size = sizeof(struct mlx5_cmd_prot_block); + int align = roundup_pow_of_two(size); + struct mlx5_cmd *cmd = &dev->cmd; + u32 cmd_h, cmd_l; + u16 cmd_if_rev; + int err; + int i; + + cmd_if_rev = cmdif_rev(dev); + if (cmd_if_rev != CMD_IF_REV) { + dev_err(&dev->pdev->dev, + "Driver cmdif rev(%d) differs from firmware's(%d)\n", + CMD_IF_REV, cmd_if_rev); + return -EINVAL; + } + + cmd->pool = pci_pool_create("mlx5_cmd", dev->pdev, size, align, 0); + if (!cmd->pool) + return -ENOMEM; + + cmd->cmd_buf = (void *)__get_free_pages(GFP_ATOMIC, 0); + if (!cmd->cmd_buf) { + err = -ENOMEM; + goto err_free_pool; + } + cmd->dma = dma_map_single(&dev->pdev->dev, cmd->cmd_buf, PAGE_SIZE, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(&dev->pdev->dev, cmd->dma)) { + err = -ENOMEM; + goto err_free; + } + + cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff; + cmd->log_sz = cmd_l >> 4 & 0xf; + cmd->log_stride = cmd_l & 0xf; + if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) { + dev_err(&dev->pdev->dev, "firmware reports too many outstanding commands %d\n", + 1 << cmd->log_sz); + err = -EINVAL; + goto err_map; + } + + if (cmd->log_sz + cmd->log_stride > PAGE_SHIFT) { + dev_err(&dev->pdev->dev, "command queue size overflow\n"); + err = -EINVAL; + goto err_map; + } + + cmd->max_reg_cmds = (1 << cmd->log_sz) - 1; + cmd->bitmask = (1 << cmd->max_reg_cmds) - 1; + + cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16; + if (cmd->cmdif_rev > CMD_IF_REV) { + dev_err(&dev->pdev->dev, "driver does not support command interface version. driver %d, firmware %d\n", + CMD_IF_REV, cmd->cmdif_rev); + err = -ENOTSUPP; + goto err_map; + } + + spin_lock_init(&cmd->alloc_lock); + spin_lock_init(&cmd->token_lock); + for (i = 0; i < ARRAY_SIZE(cmd->stats); i++) + spin_lock_init(&cmd->stats[i].lock); + + sema_init(&cmd->sem, cmd->max_reg_cmds); + sema_init(&cmd->pages_sem, 1); + + cmd_h = (u32)((u64)(cmd->dma) >> 32); + cmd_l = (u32)(cmd->dma); + if (cmd_l & 0xfff) { + dev_err(&dev->pdev->dev, "invalid command queue address\n"); + err = -ENOMEM; + goto err_map; + } + + iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h); + iowrite32be(cmd_l, &dev->iseg->cmdq_addr_l_sz); + + /* Make sure firmware sees the complete address before we proceed */ + wmb(); + + mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma)); + + cmd->mode = CMD_MODE_POLLING; + + err = create_msg_cache(dev); + if (err) { + dev_err(&dev->pdev->dev, "failed to create command cache\n"); + goto err_map; + } + + set_wqname(dev); + cmd->wq = create_singlethread_workqueue(cmd->wq_name); + if (!cmd->wq) { + dev_err(&dev->pdev->dev, "failed to create command workqueue\n"); + err = -ENOMEM; + goto err_cache; + } + + err = create_debugfs_files(dev); + if (err) { + err = -ENOMEM; + goto err_wq; + } + + return 0; + +err_wq: + destroy_workqueue(cmd->wq); + +err_cache: + destroy_msg_cache(dev); + +err_map: + dma_unmap_single(&dev->pdev->dev, cmd->dma, PAGE_SIZE, + DMA_BIDIRECTIONAL); +err_free: + free_pages((unsigned long)cmd->cmd_buf, 0); + +err_free_pool: + pci_pool_destroy(cmd->pool); + + return err; +} +EXPORT_SYMBOL(mlx5_cmd_init); + +void mlx5_cmd_cleanup(struct mlx5_core_dev *dev) +{ + struct mlx5_cmd *cmd = &dev->cmd; + + clean_debug_files(dev); + destroy_workqueue(cmd->wq); + destroy_msg_cache(dev); + dma_unmap_single(&dev->pdev->dev, cmd->dma, PAGE_SIZE, + DMA_BIDIRECTIONAL); + free_pages((unsigned long)cmd->cmd_buf, 0); + pci_pool_destroy(cmd->pool); +} +EXPORT_SYMBOL(mlx5_cmd_cleanup); + +static const char *cmd_status_str(u8 status) +{ + switch (status) { + case MLX5_CMD_STAT_OK: + return "OK"; + case MLX5_CMD_STAT_INT_ERR: + return "internal error"; + case MLX5_CMD_STAT_BAD_OP_ERR: + return "bad operation"; + case MLX5_CMD_STAT_BAD_PARAM_ERR: + return "bad parameter"; + case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: + return "bad system state"; + case MLX5_CMD_STAT_BAD_RES_ERR: + return "bad resource"; + case MLX5_CMD_STAT_RES_BUSY: + return "resource busy"; + case MLX5_CMD_STAT_LIM_ERR: + return "limits exceeded"; + case MLX5_CMD_STAT_BAD_RES_STATE_ERR: + return "bad resource state"; + case MLX5_CMD_STAT_IX_ERR: + return "bad index"; + case MLX5_CMD_STAT_NO_RES_ERR: + return "no resources"; + case MLX5_CMD_STAT_BAD_INP_LEN_ERR: + return "bad input length"; + case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: + return "bad output length"; + case MLX5_CMD_STAT_BAD_QP_STATE_ERR: + return "bad QP state"; + case MLX5_CMD_STAT_BAD_PKT_ERR: + return "bad packet (discarded)"; + case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: + return "bad size too many outstanding CQEs"; + default: + return "unknown status"; + } +} + +int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr) +{ + if (!hdr->status) + return 0; + + pr_warn("command failed, status %s(0x%x), syndrome 0x%x\n", + cmd_status_str(hdr->status), hdr->status, + be32_to_cpu(hdr->syndrome)); + + switch (hdr->status) { + case MLX5_CMD_STAT_OK: return 0; + case MLX5_CMD_STAT_INT_ERR: return -EIO; + case MLX5_CMD_STAT_BAD_OP_ERR: return -EINVAL; + case MLX5_CMD_STAT_BAD_PARAM_ERR: return -EINVAL; + case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO; + case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL; + case MLX5_CMD_STAT_RES_BUSY: return -EBUSY; + case MLX5_CMD_STAT_LIM_ERR: return -EINVAL; + case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL; + case MLX5_CMD_STAT_IX_ERR: return -EINVAL; + case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN; + case MLX5_CMD_STAT_BAD_INP_LEN_ERR: return -EIO; + case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: return -EIO; + case MLX5_CMD_STAT_BAD_QP_STATE_ERR: return -EINVAL; + case MLX5_CMD_STAT_BAD_PKT_ERR: return -EINVAL; + case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return -EINVAL; + default: return -EIO; + } +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c new file mode 100644 index 000000000000..c2d660be6f76 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c @@ -0,0 +1,224 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/hardirq.h> +#include <linux/mlx5/driver.h> +#include <linux/mlx5/cmd.h> +#include <rdma/ib_verbs.h> +#include <linux/mlx5/cq.h> +#include "mlx5_core.h" + +void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn) +{ + struct mlx5_core_cq *cq; + struct mlx5_cq_table *table = &dev->priv.cq_table; + + spin_lock(&table->lock); + cq = radix_tree_lookup(&table->tree, cqn); + if (likely(cq)) + atomic_inc(&cq->refcount); + spin_unlock(&table->lock); + + if (!cq) { + mlx5_core_warn(dev, "Completion event for bogus CQ 0x%x\n", cqn); + return; + } + + ++cq->arm_sn; + + cq->comp(cq); + + if (atomic_dec_and_test(&cq->refcount)) + complete(&cq->free); +} + +void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type) +{ + struct mlx5_cq_table *table = &dev->priv.cq_table; + struct mlx5_core_cq *cq; + + spin_lock(&table->lock); + + cq = radix_tree_lookup(&table->tree, cqn); + if (cq) + atomic_inc(&cq->refcount); + + spin_unlock(&table->lock); + + if (!cq) { + mlx5_core_warn(dev, "Async event for bogus CQ 0x%x\n", cqn); + return; + } + + cq->event(cq, event_type); + + if (atomic_dec_and_test(&cq->refcount)) + complete(&cq->free); +} + + +int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, + struct mlx5_create_cq_mbox_in *in, int inlen) +{ + int err; + struct mlx5_cq_table *table = &dev->priv.cq_table; + struct mlx5_create_cq_mbox_out out; + struct mlx5_destroy_cq_mbox_in din; + struct mlx5_destroy_cq_mbox_out dout; + + in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_CQ); + memset(&out, 0, sizeof(out)); + err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return mlx5_cmd_status_to_err(&out.hdr); + + cq->cqn = be32_to_cpu(out.cqn) & 0xffffff; + cq->cons_index = 0; + cq->arm_sn = 0; + atomic_set(&cq->refcount, 1); + init_completion(&cq->free); + + spin_lock_irq(&table->lock); + err = radix_tree_insert(&table->tree, cq->cqn, cq); + spin_unlock_irq(&table->lock); + if (err) + goto err_cmd; + + cq->pid = current->pid; + err = mlx5_debug_cq_add(dev, cq); + if (err) + mlx5_core_dbg(dev, "failed adding CP 0x%x to debug file system\n", + cq->cqn); + + return 0; + +err_cmd: + memset(&din, 0, sizeof(din)); + memset(&dout, 0, sizeof(dout)); + din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_CQ); + mlx5_cmd_exec(dev, &din, sizeof(din), &dout, sizeof(dout)); + return err; +} +EXPORT_SYMBOL(mlx5_core_create_cq); + +int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) +{ + struct mlx5_cq_table *table = &dev->priv.cq_table; + struct mlx5_destroy_cq_mbox_in in; + struct mlx5_destroy_cq_mbox_out out; + struct mlx5_core_cq *tmp; + int err; + + spin_lock_irq(&table->lock); + tmp = radix_tree_delete(&table->tree, cq->cqn); + spin_unlock_irq(&table->lock); + if (!tmp) { + mlx5_core_warn(dev, "cq 0x%x not found in tree\n", cq->cqn); + return -EINVAL; + } + if (tmp != cq) { + mlx5_core_warn(dev, "corruption on srqn 0x%x\n", cq->cqn); + return -EINVAL; + } + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_CQ); + in.cqn = cpu_to_be32(cq->cqn); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return mlx5_cmd_status_to_err(&out.hdr); + + synchronize_irq(cq->irqn); + + mlx5_debug_cq_remove(dev, cq); + if (atomic_dec_and_test(&cq->refcount)) + complete(&cq->free); + wait_for_completion(&cq->free); + + return 0; +} +EXPORT_SYMBOL(mlx5_core_destroy_cq); + +int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, + struct mlx5_query_cq_mbox_out *out) +{ + struct mlx5_query_cq_mbox_in in; + int err; + + memset(&in, 0, sizeof(in)); + memset(out, 0, sizeof(*out)); + + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_CQ); + in.cqn = cpu_to_be32(cq->cqn); + err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out)); + if (err) + return err; + + if (out->hdr.status) + return mlx5_cmd_status_to_err(&out->hdr); + + return err; +} +EXPORT_SYMBOL(mlx5_core_query_cq); + + +int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, + int type, struct mlx5_cq_modify_params *params) +{ + return -ENOSYS; +} + +int mlx5_init_cq_table(struct mlx5_core_dev *dev) +{ + struct mlx5_cq_table *table = &dev->priv.cq_table; + int err; + + spin_lock_init(&table->lock); + INIT_RADIX_TREE(&table->tree, GFP_ATOMIC); + err = mlx5_cq_debugfs_init(dev); + + return err; +} + +void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev) +{ + mlx5_cq_debugfs_cleanup(dev); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c new file mode 100644 index 000000000000..9c7194b26ee2 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c @@ -0,0 +1,583 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/module.h> +#include <linux/debugfs.h> +#include <linux/mlx5/qp.h> +#include <linux/mlx5/cq.h> +#include <linux/mlx5/driver.h> +#include "mlx5_core.h" + +enum { + QP_PID, + QP_STATE, + QP_XPORT, + QP_MTU, + QP_N_RECV, + QP_RECV_SZ, + QP_N_SEND, + QP_LOG_PG_SZ, + QP_RQPN, +}; + +static char *qp_fields[] = { + [QP_PID] = "pid", + [QP_STATE] = "state", + [QP_XPORT] = "transport", + [QP_MTU] = "mtu", + [QP_N_RECV] = "num_recv", + [QP_RECV_SZ] = "rcv_wqe_sz", + [QP_N_SEND] = "num_send", + [QP_LOG_PG_SZ] = "log2_page_sz", + [QP_RQPN] = "remote_qpn", +}; + +enum { + EQ_NUM_EQES, + EQ_INTR, + EQ_LOG_PG_SZ, +}; + +static char *eq_fields[] = { + [EQ_NUM_EQES] = "num_eqes", + [EQ_INTR] = "intr", + [EQ_LOG_PG_SZ] = "log_page_size", +}; + +enum { + CQ_PID, + CQ_NUM_CQES, + CQ_LOG_PG_SZ, +}; + +static char *cq_fields[] = { + [CQ_PID] = "pid", + [CQ_NUM_CQES] = "num_cqes", + [CQ_LOG_PG_SZ] = "log_page_size", +}; + +struct dentry *mlx5_debugfs_root; +EXPORT_SYMBOL(mlx5_debugfs_root); + +void mlx5_register_debugfs(void) +{ + mlx5_debugfs_root = debugfs_create_dir("mlx5", NULL); + if (IS_ERR_OR_NULL(mlx5_debugfs_root)) + mlx5_debugfs_root = NULL; +} + +void mlx5_unregister_debugfs(void) +{ + debugfs_remove(mlx5_debugfs_root); +} + +int mlx5_qp_debugfs_init(struct mlx5_core_dev *dev) +{ + if (!mlx5_debugfs_root) + return 0; + + atomic_set(&dev->num_qps, 0); + + dev->priv.qp_debugfs = debugfs_create_dir("QPs", dev->priv.dbg_root); + if (!dev->priv.qp_debugfs) + return -ENOMEM; + + return 0; +} + +void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev) +{ + if (!mlx5_debugfs_root) + return; + + debugfs_remove_recursive(dev->priv.qp_debugfs); +} + +int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev) +{ + if (!mlx5_debugfs_root) + return 0; + + dev->priv.eq_debugfs = debugfs_create_dir("EQs", dev->priv.dbg_root); + if (!dev->priv.eq_debugfs) + return -ENOMEM; + + return 0; +} + +void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev) +{ + if (!mlx5_debugfs_root) + return; + + debugfs_remove_recursive(dev->priv.eq_debugfs); +} + +static ssize_t average_read(struct file *filp, char __user *buf, size_t count, + loff_t *pos) +{ + struct mlx5_cmd_stats *stats; + u64 field = 0; + int ret; + char tbuf[22]; + + if (*pos) + return 0; + + stats = filp->private_data; + spin_lock(&stats->lock); + if (stats->n) + field = div64_u64(stats->sum, stats->n); + spin_unlock(&stats->lock); + ret = snprintf(tbuf, sizeof(tbuf), "%llu\n", field); + if (ret > 0) { + if (copy_to_user(buf, tbuf, ret)) + return -EFAULT; + } + + *pos += ret; + return ret; +} + + +static ssize_t average_write(struct file *filp, const char __user *buf, + size_t count, loff_t *pos) +{ + struct mlx5_cmd_stats *stats; + + stats = filp->private_data; + spin_lock(&stats->lock); + stats->sum = 0; + stats->n = 0; + spin_unlock(&stats->lock); + + *pos += count; + + return count; +} + +static const struct file_operations stats_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = average_read, + .write = average_write, +}; + +int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev) +{ + struct mlx5_cmd_stats *stats; + struct dentry **cmd; + const char *namep; + int err; + int i; + + if (!mlx5_debugfs_root) + return 0; + + cmd = &dev->priv.cmdif_debugfs; + *cmd = debugfs_create_dir("commands", dev->priv.dbg_root); + if (!*cmd) + return -ENOMEM; + + for (i = 0; i < ARRAY_SIZE(dev->cmd.stats); i++) { + stats = &dev->cmd.stats[i]; + namep = mlx5_command_str(i); + if (strcmp(namep, "unknown command opcode")) { + stats->root = debugfs_create_dir(namep, *cmd); + if (!stats->root) { + mlx5_core_warn(dev, "failed adding command %d\n", + i); + err = -ENOMEM; + goto out; + } + + stats->avg = debugfs_create_file("average", 0400, + stats->root, stats, + &stats_fops); + if (!stats->avg) { + mlx5_core_warn(dev, "failed creating debugfs file\n"); + err = -ENOMEM; + goto out; + } + + stats->count = debugfs_create_u64("n", 0400, + stats->root, + &stats->n); + if (!stats->count) { + mlx5_core_warn(dev, "failed creating debugfs file\n"); + err = -ENOMEM; + goto out; + } + } + } + + return 0; +out: + debugfs_remove_recursive(dev->priv.cmdif_debugfs); + return err; +} + +void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev) +{ + if (!mlx5_debugfs_root) + return; + + debugfs_remove_recursive(dev->priv.cmdif_debugfs); +} + +int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev) +{ + if (!mlx5_debugfs_root) + return 0; + + dev->priv.cq_debugfs = debugfs_create_dir("CQs", dev->priv.dbg_root); + if (!dev->priv.cq_debugfs) + return -ENOMEM; + + return 0; +} + +void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev) +{ + if (!mlx5_debugfs_root) + return; + + debugfs_remove_recursive(dev->priv.cq_debugfs); +} + +static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, + int index) +{ + struct mlx5_query_qp_mbox_out *out; + struct mlx5_qp_context *ctx; + u64 param = 0; + int err; + int no_sq; + + out = kzalloc(sizeof(*out), GFP_KERNEL); + if (!out) + return param; + + err = mlx5_core_qp_query(dev, qp, out, sizeof(*out)); + if (err) { + mlx5_core_warn(dev, "failed to query qp\n"); + goto out; + } + + ctx = &out->ctx; + switch (index) { + case QP_PID: + param = qp->pid; + break; + case QP_STATE: + param = be32_to_cpu(ctx->flags) >> 28; + break; + case QP_XPORT: + param = (be32_to_cpu(ctx->flags) >> 16) & 0xff; + break; + case QP_MTU: + param = ctx->mtu_msgmax >> 5; + break; + case QP_N_RECV: + param = 1 << ((ctx->rq_size_stride >> 3) & 0xf); + break; + case QP_RECV_SZ: + param = 1 << ((ctx->rq_size_stride & 7) + 4); + break; + case QP_N_SEND: + no_sq = be16_to_cpu(ctx->sq_crq_size) >> 15; + if (!no_sq) + param = 1 << (be16_to_cpu(ctx->sq_crq_size) >> 11); + else + param = 0; + break; + case QP_LOG_PG_SZ: + param = (be32_to_cpu(ctx->log_pg_sz_remote_qpn) >> 24) & 0x1f; + param += 12; + break; + case QP_RQPN: + param = be32_to_cpu(ctx->log_pg_sz_remote_qpn) & 0xffffff; + break; + } + +out: + kfree(out); + return param; +} + +static u64 eq_read_field(struct mlx5_core_dev *dev, struct mlx5_eq *eq, + int index) +{ + struct mlx5_query_eq_mbox_out *out; + struct mlx5_eq_context *ctx; + u64 param = 0; + int err; + + out = kzalloc(sizeof(*out), GFP_KERNEL); + if (!out) + return param; + + ctx = &out->ctx; + + err = mlx5_core_eq_query(dev, eq, out, sizeof(*out)); + if (err) { + mlx5_core_warn(dev, "failed to query eq\n"); + goto out; + } + + switch (index) { + case EQ_NUM_EQES: + param = 1 << ((be32_to_cpu(ctx->log_sz_usr_page) >> 24) & 0x1f); + break; + case EQ_INTR: + param = ctx->intr; + break; + case EQ_LOG_PG_SZ: + param = (ctx->log_page_size & 0x1f) + 12; + break; + } + +out: + kfree(out); + return param; +} + +static u64 cq_read_field(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, + int index) +{ + struct mlx5_query_cq_mbox_out *out; + struct mlx5_cq_context *ctx; + u64 param = 0; + int err; + + out = kzalloc(sizeof(*out), GFP_KERNEL); + if (!out) + return param; + + ctx = &out->ctx; + + err = mlx5_core_query_cq(dev, cq, out); + if (err) { + mlx5_core_warn(dev, "failed to query cq\n"); + goto out; + } + + switch (index) { + case CQ_PID: + param = cq->pid; + break; + case CQ_NUM_CQES: + param = 1 << ((be32_to_cpu(ctx->log_sz_usr_page) >> 24) & 0x1f); + break; + case CQ_LOG_PG_SZ: + param = (ctx->log_pg_sz & 0x1f) + 12; + break; + } + +out: + kfree(out); + return param; +} + +static ssize_t dbg_read(struct file *filp, char __user *buf, size_t count, + loff_t *pos) +{ + struct mlx5_field_desc *desc; + struct mlx5_rsc_debug *d; + char tbuf[18]; + u64 field; + int ret; + + if (*pos) + return 0; + + desc = filp->private_data; + d = (void *)(desc - desc->i) - sizeof(*d); + switch (d->type) { + case MLX5_DBG_RSC_QP: + field = qp_read_field(d->dev, d->object, desc->i); + break; + + case MLX5_DBG_RSC_EQ: + field = eq_read_field(d->dev, d->object, desc->i); + break; + + case MLX5_DBG_RSC_CQ: + field = cq_read_field(d->dev, d->object, desc->i); + break; + + default: + mlx5_core_warn(d->dev, "invalid resource type %d\n", d->type); + return -EINVAL; + } + + ret = snprintf(tbuf, sizeof(tbuf), "0x%llx\n", field); + if (ret > 0) { + if (copy_to_user(buf, tbuf, ret)) + return -EFAULT; + } + + *pos += ret; + return ret; +} + +static const struct file_operations fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = dbg_read, +}; + +static int add_res_tree(struct mlx5_core_dev *dev, enum dbg_rsc_type type, + struct dentry *root, struct mlx5_rsc_debug **dbg, + int rsn, char **field, int nfile, void *data) +{ + struct mlx5_rsc_debug *d; + char resn[32]; + int err; + int i; + + d = kzalloc(sizeof(*d) + nfile * sizeof(d->fields[0]), GFP_KERNEL); + if (!d) + return -ENOMEM; + + d->dev = dev; + d->object = data; + d->type = type; + sprintf(resn, "0x%x", rsn); + d->root = debugfs_create_dir(resn, root); + if (!d->root) { + err = -ENOMEM; + goto out_free; + } + + for (i = 0; i < nfile; i++) { + d->fields[i].i = i; + d->fields[i].dent = debugfs_create_file(field[i], 0400, + d->root, &d->fields[i], + &fops); + if (!d->fields[i].dent) { + err = -ENOMEM; + goto out_rem; + } + } + *dbg = d; + + return 0; +out_rem: + debugfs_remove_recursive(d->root); + +out_free: + kfree(d); + return err; +} + +static void rem_res_tree(struct mlx5_rsc_debug *d) +{ + debugfs_remove_recursive(d->root); + kfree(d); +} + +int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp) +{ + int err; + + if (!mlx5_debugfs_root) + return 0; + + err = add_res_tree(dev, MLX5_DBG_RSC_QP, dev->priv.qp_debugfs, + &qp->dbg, qp->qpn, qp_fields, + ARRAY_SIZE(qp_fields), qp); + if (err) + qp->dbg = NULL; + + return err; +} + +void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp) +{ + if (!mlx5_debugfs_root) + return; + + if (qp->dbg) + rem_res_tree(qp->dbg); +} + + +int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq) +{ + int err; + + if (!mlx5_debugfs_root) + return 0; + + err = add_res_tree(dev, MLX5_DBG_RSC_EQ, dev->priv.eq_debugfs, + &eq->dbg, eq->eqn, eq_fields, + ARRAY_SIZE(eq_fields), eq); + if (err) + eq->dbg = NULL; + + return err; +} + +void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq) +{ + if (!mlx5_debugfs_root) + return; + + if (eq->dbg) + rem_res_tree(eq->dbg); +} + +int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) +{ + int err; + + if (!mlx5_debugfs_root) + return 0; + + err = add_res_tree(dev, MLX5_DBG_RSC_CQ, dev->priv.cq_debugfs, + &cq->dbg, cq->cqn, cq_fields, + ARRAY_SIZE(cq_fields), cq); + if (err) + cq->dbg = NULL; + + return err; +} + +void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) +{ + if (!mlx5_debugfs_root) + return; + + if (cq->dbg) + rem_res_tree(cq->dbg); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c new file mode 100644 index 000000000000..c02cbcfd0fb8 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -0,0 +1,521 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/mlx5/driver.h> +#include <linux/mlx5/cmd.h> +#include "mlx5_core.h" + +enum { + MLX5_EQE_SIZE = sizeof(struct mlx5_eqe), + MLX5_EQE_OWNER_INIT_VAL = 0x1, +}; + +enum { + MLX5_EQ_STATE_ARMED = 0x9, + MLX5_EQ_STATE_FIRED = 0xa, + MLX5_EQ_STATE_ALWAYS_ARMED = 0xb, +}; + +enum { + MLX5_NUM_SPARE_EQE = 0x80, + MLX5_NUM_ASYNC_EQE = 0x100, + MLX5_NUM_CMD_EQE = 32, +}; + +enum { + MLX5_EQ_DOORBEL_OFFSET = 0x40, +}; + +#define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG) | \ + (1ull << MLX5_EVENT_TYPE_COMM_EST) | \ + (1ull << MLX5_EVENT_TYPE_SQ_DRAINED) | \ + (1ull << MLX5_EVENT_TYPE_CQ_ERROR) | \ + (1ull << MLX5_EVENT_TYPE_WQ_CATAS_ERROR) | \ + (1ull << MLX5_EVENT_TYPE_PATH_MIG_FAILED) | \ + (1ull << MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \ + (1ull << MLX5_EVENT_TYPE_WQ_ACCESS_ERROR) | \ + (1ull << MLX5_EVENT_TYPE_PORT_CHANGE) | \ + (1ull << MLX5_EVENT_TYPE_SRQ_CATAS_ERROR) | \ + (1ull << MLX5_EVENT_TYPE_SRQ_LAST_WQE) | \ + (1ull << MLX5_EVENT_TYPE_SRQ_RQ_LIMIT)) + +struct map_eq_in { + u64 mask; + u32 reserved; + u32 unmap_eqn; +}; + +struct cre_des_eq { + u8 reserved[15]; + u8 eqn; +}; + +static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn) +{ + struct mlx5_destroy_eq_mbox_in in; + struct mlx5_destroy_eq_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_EQ); + in.eqn = eqn; + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (!err) + goto ex; + + if (out.hdr.status) + err = mlx5_cmd_status_to_err(&out.hdr); + +ex: + return err; +} + +static struct mlx5_eqe *get_eqe(struct mlx5_eq *eq, u32 entry) +{ + return mlx5_buf_offset(&eq->buf, entry * MLX5_EQE_SIZE); +} + +static struct mlx5_eqe *next_eqe_sw(struct mlx5_eq *eq) +{ + struct mlx5_eqe *eqe = get_eqe(eq, eq->cons_index & (eq->nent - 1)); + + return ((eqe->owner & 1) ^ !!(eq->cons_index & eq->nent)) ? NULL : eqe; +} + +static const char *eqe_type_str(u8 type) +{ + switch (type) { + case MLX5_EVENT_TYPE_COMP: + return "MLX5_EVENT_TYPE_COMP"; + case MLX5_EVENT_TYPE_PATH_MIG: + return "MLX5_EVENT_TYPE_PATH_MIG"; + case MLX5_EVENT_TYPE_COMM_EST: + return "MLX5_EVENT_TYPE_COMM_EST"; + case MLX5_EVENT_TYPE_SQ_DRAINED: + return "MLX5_EVENT_TYPE_SQ_DRAINED"; + case MLX5_EVENT_TYPE_SRQ_LAST_WQE: + return "MLX5_EVENT_TYPE_SRQ_LAST_WQE"; + case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT: + return "MLX5_EVENT_TYPE_SRQ_RQ_LIMIT"; + case MLX5_EVENT_TYPE_CQ_ERROR: + return "MLX5_EVENT_TYPE_CQ_ERROR"; + case MLX5_EVENT_TYPE_WQ_CATAS_ERROR: + return "MLX5_EVENT_TYPE_WQ_CATAS_ERROR"; + case MLX5_EVENT_TYPE_PATH_MIG_FAILED: + return "MLX5_EVENT_TYPE_PATH_MIG_FAILED"; + case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR: + return "MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR"; + case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR: + return "MLX5_EVENT_TYPE_WQ_ACCESS_ERROR"; + case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR: + return "MLX5_EVENT_TYPE_SRQ_CATAS_ERROR"; + case MLX5_EVENT_TYPE_INTERNAL_ERROR: + return "MLX5_EVENT_TYPE_INTERNAL_ERROR"; + case MLX5_EVENT_TYPE_PORT_CHANGE: + return "MLX5_EVENT_TYPE_PORT_CHANGE"; + case MLX5_EVENT_TYPE_GPIO_EVENT: + return "MLX5_EVENT_TYPE_GPIO_EVENT"; + case MLX5_EVENT_TYPE_REMOTE_CONFIG: + return "MLX5_EVENT_TYPE_REMOTE_CONFIG"; + case MLX5_EVENT_TYPE_DB_BF_CONGESTION: + return "MLX5_EVENT_TYPE_DB_BF_CONGESTION"; + case MLX5_EVENT_TYPE_STALL_EVENT: + return "MLX5_EVENT_TYPE_STALL_EVENT"; + case MLX5_EVENT_TYPE_CMD: + return "MLX5_EVENT_TYPE_CMD"; + case MLX5_EVENT_TYPE_PAGE_REQUEST: + return "MLX5_EVENT_TYPE_PAGE_REQUEST"; + default: + return "Unrecognized event"; + } +} + +static enum mlx5_dev_event port_subtype_event(u8 subtype) +{ + switch (subtype) { + case MLX5_PORT_CHANGE_SUBTYPE_DOWN: + return MLX5_DEV_EVENT_PORT_DOWN; + case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE: + return MLX5_DEV_EVENT_PORT_UP; + case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED: + return MLX5_DEV_EVENT_PORT_INITIALIZED; + case MLX5_PORT_CHANGE_SUBTYPE_LID: + return MLX5_DEV_EVENT_LID_CHANGE; + case MLX5_PORT_CHANGE_SUBTYPE_PKEY: + return MLX5_DEV_EVENT_PKEY_CHANGE; + case MLX5_PORT_CHANGE_SUBTYPE_GUID: + return MLX5_DEV_EVENT_GUID_CHANGE; + case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG: + return MLX5_DEV_EVENT_CLIENT_REREG; + } + return -1; +} + +static void eq_update_ci(struct mlx5_eq *eq, int arm) +{ + __be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2); + u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24); + __raw_writel((__force u32) cpu_to_be32(val), addr); + /* We still want ordering, just not swabbing, so add a barrier */ + mb(); +} + +static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq) +{ + struct mlx5_eqe *eqe; + int eqes_found = 0; + int set_ci = 0; + u32 cqn; + u32 srqn; + u8 port; + + while ((eqe = next_eqe_sw(eq))) { + /* + * Make sure we read EQ entry contents after we've + * checked the ownership bit. + */ + rmb(); + + mlx5_core_dbg(eq->dev, "eqn %d, eqe type %s\n", eq->eqn, eqe_type_str(eqe->type)); + switch (eqe->type) { + case MLX5_EVENT_TYPE_COMP: + cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff; + mlx5_cq_completion(dev, cqn); + break; + + case MLX5_EVENT_TYPE_PATH_MIG: + case MLX5_EVENT_TYPE_COMM_EST: + case MLX5_EVENT_TYPE_SQ_DRAINED: + case MLX5_EVENT_TYPE_SRQ_LAST_WQE: + case MLX5_EVENT_TYPE_WQ_CATAS_ERROR: + case MLX5_EVENT_TYPE_PATH_MIG_FAILED: + case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR: + case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR: + mlx5_core_dbg(dev, "event %s(%d) arrived\n", + eqe_type_str(eqe->type), eqe->type); + mlx5_qp_event(dev, be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff, + eqe->type); + break; + + case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT: + case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR: + srqn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff; + mlx5_core_dbg(dev, "SRQ event %s(%d): srqn 0x%x\n", + eqe_type_str(eqe->type), eqe->type, srqn); + mlx5_srq_event(dev, srqn, eqe->type); + break; + + case MLX5_EVENT_TYPE_CMD: + mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector)); + break; + + case MLX5_EVENT_TYPE_PORT_CHANGE: + port = (eqe->data.port.port >> 4) & 0xf; + switch (eqe->sub_type) { + case MLX5_PORT_CHANGE_SUBTYPE_DOWN: + case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE: + case MLX5_PORT_CHANGE_SUBTYPE_LID: + case MLX5_PORT_CHANGE_SUBTYPE_PKEY: + case MLX5_PORT_CHANGE_SUBTYPE_GUID: + case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG: + case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED: + dev->event(dev, port_subtype_event(eqe->sub_type), &port); + break; + default: + mlx5_core_warn(dev, "Port event with unrecognized subtype: port %d, sub_type %d\n", + port, eqe->sub_type); + } + break; + case MLX5_EVENT_TYPE_CQ_ERROR: + cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff; + mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrom 0x%x\n", + cqn, eqe->data.cq_err.syndrome); + mlx5_cq_event(dev, cqn, eqe->type); + break; + + case MLX5_EVENT_TYPE_PAGE_REQUEST: + { + u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id); + s16 npages = be16_to_cpu(eqe->data.req_pages.num_pages); + + mlx5_core_dbg(dev, "page request for func 0x%x, napges %d\n", func_id, npages); + mlx5_core_req_pages_handler(dev, func_id, npages); + } + break; + + + default: + mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n", eqe->type, eq->eqn); + break; + } + + ++eq->cons_index; + eqes_found = 1; + ++set_ci; + + /* The HCA will think the queue has overflowed if we + * don't tell it we've been processing events. We + * create our EQs with MLX5_NUM_SPARE_EQE extra + * entries, so we must update our consumer index at + * least that often. + */ + if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) { + eq_update_ci(eq, 0); + set_ci = 0; + } + } + + eq_update_ci(eq, 1); + + return eqes_found; +} + +static irqreturn_t mlx5_msix_handler(int irq, void *eq_ptr) +{ + struct mlx5_eq *eq = eq_ptr; + struct mlx5_core_dev *dev = eq->dev; + + mlx5_eq_int(dev, eq); + + /* MSI-X vectors always belong to us */ + return IRQ_HANDLED; +} + +static void init_eq_buf(struct mlx5_eq *eq) +{ + struct mlx5_eqe *eqe; + int i; + + for (i = 0; i < eq->nent; i++) { + eqe = get_eqe(eq, i); + eqe->owner = MLX5_EQE_OWNER_INIT_VAL; + } +} + +int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, + int nent, u64 mask, const char *name, struct mlx5_uar *uar) +{ + struct mlx5_eq_table *table = &dev->priv.eq_table; + struct mlx5_create_eq_mbox_in *in; + struct mlx5_create_eq_mbox_out out; + int err; + int inlen; + + eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE); + err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, 2 * PAGE_SIZE, + &eq->buf); + if (err) + return err; + + init_eq_buf(eq); + + inlen = sizeof(*in) + sizeof(in->pas[0]) * eq->buf.npages; + in = mlx5_vzalloc(inlen); + if (!in) { + err = -ENOMEM; + goto err_buf; + } + memset(&out, 0, sizeof(out)); + + mlx5_fill_page_array(&eq->buf, in->pas); + + in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_EQ); + in->ctx.log_sz_usr_page = cpu_to_be32(ilog2(eq->nent) << 24 | uar->index); + in->ctx.intr = vecidx; + in->ctx.log_page_size = PAGE_SHIFT - 12; + in->events_mask = cpu_to_be64(mask); + + err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); + if (err) + goto err_in; + + if (out.hdr.status) { + err = mlx5_cmd_status_to_err(&out.hdr); + goto err_in; + } + + eq->eqn = out.eq_number; + err = request_irq(table->msix_arr[vecidx].vector, mlx5_msix_handler, 0, + name, eq); + if (err) + goto err_eq; + + eq->irqn = vecidx; + eq->dev = dev; + eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET; + + err = mlx5_debug_eq_add(dev, eq); + if (err) + goto err_irq; + + /* EQs are created in ARMED state + */ + eq_update_ci(eq, 1); + + mlx5_vfree(in); + return 0; + +err_irq: + free_irq(table->msix_arr[vecidx].vector, eq); + +err_eq: + mlx5_cmd_destroy_eq(dev, eq->eqn); + +err_in: + mlx5_vfree(in); + +err_buf: + mlx5_buf_free(dev, &eq->buf); + return err; +} +EXPORT_SYMBOL_GPL(mlx5_create_map_eq); + +int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq) +{ + struct mlx5_eq_table *table = &dev->priv.eq_table; + int err; + + mlx5_debug_eq_remove(dev, eq); + free_irq(table->msix_arr[eq->irqn].vector, eq); + err = mlx5_cmd_destroy_eq(dev, eq->eqn); + if (err) + mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n", + eq->eqn); + mlx5_buf_free(dev, &eq->buf); + + return err; +} +EXPORT_SYMBOL_GPL(mlx5_destroy_unmap_eq); + +int mlx5_eq_init(struct mlx5_core_dev *dev) +{ + int err; + + spin_lock_init(&dev->priv.eq_table.lock); + + err = mlx5_eq_debugfs_init(dev); + + return err; +} + + +void mlx5_eq_cleanup(struct mlx5_core_dev *dev) +{ + mlx5_eq_debugfs_cleanup(dev); +} + +int mlx5_start_eqs(struct mlx5_core_dev *dev) +{ + struct mlx5_eq_table *table = &dev->priv.eq_table; + int err; + + err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD, + MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD, + "mlx5_cmd_eq", &dev->priv.uuari.uars[0]); + if (err) { + mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err); + return err; + } + + mlx5_cmd_use_events(dev); + + err = mlx5_create_map_eq(dev, &table->async_eq, MLX5_EQ_VEC_ASYNC, + MLX5_NUM_ASYNC_EQE, MLX5_ASYNC_EVENT_MASK, + "mlx5_async_eq", &dev->priv.uuari.uars[0]); + if (err) { + mlx5_core_warn(dev, "failed to create async EQ %d\n", err); + goto err1; + } + + err = mlx5_create_map_eq(dev, &table->pages_eq, + MLX5_EQ_VEC_PAGES, + dev->caps.max_vf + 1, + 1 << MLX5_EVENT_TYPE_PAGE_REQUEST, "mlx5_pages_eq", + &dev->priv.uuari.uars[0]); + if (err) { + mlx5_core_warn(dev, "failed to create pages EQ %d\n", err); + goto err2; + } + + return err; + +err2: + mlx5_destroy_unmap_eq(dev, &table->async_eq); + +err1: + mlx5_cmd_use_polling(dev); + mlx5_destroy_unmap_eq(dev, &table->cmd_eq); + return err; +} + +int mlx5_stop_eqs(struct mlx5_core_dev *dev) +{ + struct mlx5_eq_table *table = &dev->priv.eq_table; + int err; + + err = mlx5_destroy_unmap_eq(dev, &table->pages_eq); + if (err) + return err; + + mlx5_destroy_unmap_eq(dev, &table->async_eq); + mlx5_cmd_use_polling(dev); + + err = mlx5_destroy_unmap_eq(dev, &table->cmd_eq); + if (err) + mlx5_cmd_use_events(dev); + + return err; +} + +int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq, + struct mlx5_query_eq_mbox_out *out, int outlen) +{ + struct mlx5_query_eq_mbox_in in; + int err; + + memset(&in, 0, sizeof(in)); + memset(out, 0, outlen); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_EQ); + in.eqn = eq->eqn; + err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen); + if (err) + return err; + + if (out->hdr.status) + err = mlx5_cmd_status_to_err(&out->hdr); + + return err; +} +EXPORT_SYMBOL_GPL(mlx5_core_eq_query); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c new file mode 100644 index 000000000000..72a5222447f5 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -0,0 +1,185 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/mlx5/driver.h> +#include <linux/mlx5/cmd.h> +#include <linux/module.h> +#include "mlx5_core.h" + +int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev) +{ + struct mlx5_cmd_query_adapter_mbox_out *out; + struct mlx5_cmd_query_adapter_mbox_in in; + int err; + + out = kzalloc(sizeof(*out), GFP_KERNEL); + if (!out) + return -ENOMEM; + + memset(&in, 0, sizeof(in)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_ADAPTER); + err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out)); + if (err) + goto out_out; + + if (out->hdr.status) { + err = mlx5_cmd_status_to_err(&out->hdr); + goto out_out; + } + + memcpy(dev->board_id, out->vsd_psid, sizeof(out->vsd_psid)); + +out_out: + kfree(out); + + return err; +} + +int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev, + struct mlx5_caps *caps) +{ + struct mlx5_cmd_query_hca_cap_mbox_out *out; + struct mlx5_cmd_query_hca_cap_mbox_in in; + struct mlx5_query_special_ctxs_mbox_out ctx_out; + struct mlx5_query_special_ctxs_mbox_in ctx_in; + int err; + u16 t16; + + out = kzalloc(sizeof(*out), GFP_KERNEL); + if (!out) + return -ENOMEM; + + memset(&in, 0, sizeof(in)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_HCA_CAP); + in.hdr.opmod = cpu_to_be16(0x1); + err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out)); + if (err) + goto out_out; + + if (out->hdr.status) { + err = mlx5_cmd_status_to_err(&out->hdr); + goto out_out; + } + + + caps->log_max_eq = out->hca_cap.log_max_eq & 0xf; + caps->max_cqes = 1 << out->hca_cap.log_max_cq_sz; + caps->max_wqes = 1 << out->hca_cap.log_max_qp_sz; + caps->max_sq_desc_sz = be16_to_cpu(out->hca_cap.max_desc_sz_sq); + caps->max_rq_desc_sz = be16_to_cpu(out->hca_cap.max_desc_sz_rq); + caps->flags = be64_to_cpu(out->hca_cap.flags); + caps->stat_rate_support = be16_to_cpu(out->hca_cap.stat_rate_support); + caps->log_max_msg = out->hca_cap.log_max_msg & 0x1f; + caps->num_ports = out->hca_cap.num_ports & 0xf; + caps->log_max_cq = out->hca_cap.log_max_cq & 0x1f; + if (caps->num_ports > MLX5_MAX_PORTS) { + mlx5_core_err(dev, "device has %d ports while the driver supports max %d ports\n", + caps->num_ports, MLX5_MAX_PORTS); + err = -EINVAL; + goto out_out; + } + caps->log_max_qp = out->hca_cap.log_max_qp & 0x1f; + caps->log_max_mkey = out->hca_cap.log_max_mkey & 0x3f; + caps->log_max_pd = out->hca_cap.log_max_pd & 0x1f; + caps->log_max_srq = out->hca_cap.log_max_srqs & 0x1f; + caps->local_ca_ack_delay = out->hca_cap.local_ca_ack_delay & 0x1f; + caps->log_max_mcg = out->hca_cap.log_max_mcg; + caps->max_qp_mcg = be16_to_cpu(out->hca_cap.max_qp_mcg); + caps->max_ra_res_qp = 1 << (out->hca_cap.log_max_ra_res_qp & 0x3f); + caps->max_ra_req_qp = 1 << (out->hca_cap.log_max_ra_req_qp & 0x3f); + caps->max_srq_wqes = 1 << out->hca_cap.log_max_srq_sz; + t16 = be16_to_cpu(out->hca_cap.bf_log_bf_reg_size); + if (t16 & 0x8000) { + caps->bf_reg_size = 1 << (t16 & 0x1f); + caps->bf_regs_per_page = MLX5_BF_REGS_PER_PAGE; + } else { + caps->bf_reg_size = 0; + caps->bf_regs_per_page = 0; + } + caps->min_page_sz = ~(u32)((1 << out->hca_cap.log_pg_sz) - 1); + + memset(&ctx_in, 0, sizeof(ctx_in)); + memset(&ctx_out, 0, sizeof(ctx_out)); + ctx_in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS); + err = mlx5_cmd_exec(dev, &ctx_in, sizeof(ctx_in), + &ctx_out, sizeof(ctx_out)); + if (err) + goto out_out; + + if (ctx_out.hdr.status) + err = mlx5_cmd_status_to_err(&ctx_out.hdr); + + caps->reserved_lkey = be32_to_cpu(ctx_out.reserved_lkey); + +out_out: + kfree(out); + + return err; +} + +int mlx5_cmd_init_hca(struct mlx5_core_dev *dev) +{ + struct mlx5_cmd_init_hca_mbox_in in; + struct mlx5_cmd_init_hca_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_INIT_HCA); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + err = mlx5_cmd_status_to_err(&out.hdr); + + return err; +} + +int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev) +{ + struct mlx5_cmd_teardown_hca_mbox_in in; + struct mlx5_cmd_teardown_hca_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_TEARDOWN_HCA); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + err = mlx5_cmd_status_to_err(&out.hdr); + + return err; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c new file mode 100644 index 000000000000..748f10a155c4 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -0,0 +1,227 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/random.h> +#include <linux/vmalloc.h> +#include <linux/mlx5/driver.h> +#include <linux/mlx5/cmd.h> +#include "mlx5_core.h" + +enum { + MLX5_HEALTH_POLL_INTERVAL = 2 * HZ, + MAX_MISSES = 3, +}; + +enum { + MLX5_HEALTH_SYNDR_FW_ERR = 0x1, + MLX5_HEALTH_SYNDR_IRISC_ERR = 0x7, + MLX5_HEALTH_SYNDR_CRC_ERR = 0x9, + MLX5_HEALTH_SYNDR_FETCH_PCI_ERR = 0xa, + MLX5_HEALTH_SYNDR_HW_FTL_ERR = 0xb, + MLX5_HEALTH_SYNDR_ASYNC_EQ_OVERRUN_ERR = 0xc, + MLX5_HEALTH_SYNDR_EQ_ERR = 0xd, + MLX5_HEALTH_SYNDR_FFSER_ERR = 0xf, +}; + +static DEFINE_SPINLOCK(health_lock); + +static LIST_HEAD(health_list); +static struct work_struct health_work; + +static health_handler_t reg_handler; +int mlx5_register_health_report_handler(health_handler_t handler) +{ + spin_lock_irq(&health_lock); + if (reg_handler) { + spin_unlock_irq(&health_lock); + return -EEXIST; + } + reg_handler = handler; + spin_unlock_irq(&health_lock); + + return 0; +} +EXPORT_SYMBOL(mlx5_register_health_report_handler); + +void mlx5_unregister_health_report_handler(void) +{ + spin_lock_irq(&health_lock); + reg_handler = NULL; + spin_unlock_irq(&health_lock); +} +EXPORT_SYMBOL(mlx5_unregister_health_report_handler); + +static void health_care(struct work_struct *work) +{ + struct mlx5_core_health *health, *n; + struct mlx5_core_dev *dev; + struct mlx5_priv *priv; + LIST_HEAD(tlist); + + spin_lock_irq(&health_lock); + list_splice_init(&health_list, &tlist); + + spin_unlock_irq(&health_lock); + + list_for_each_entry_safe(health, n, &tlist, list) { + priv = container_of(health, struct mlx5_priv, health); + dev = container_of(priv, struct mlx5_core_dev, priv); + mlx5_core_warn(dev, "handling bad device here\n"); + spin_lock_irq(&health_lock); + if (reg_handler) + reg_handler(dev->pdev, health->health, + sizeof(health->health)); + + list_del_init(&health->list); + spin_unlock_irq(&health_lock); + } +} + +static const char *hsynd_str(u8 synd) +{ + switch (synd) { + case MLX5_HEALTH_SYNDR_FW_ERR: + return "firmware internal error"; + case MLX5_HEALTH_SYNDR_IRISC_ERR: + return "irisc not responding"; + case MLX5_HEALTH_SYNDR_CRC_ERR: + return "firmware CRC error"; + case MLX5_HEALTH_SYNDR_FETCH_PCI_ERR: + return "ICM fetch PCI error"; + case MLX5_HEALTH_SYNDR_HW_FTL_ERR: + return "HW fatal error\n"; + case MLX5_HEALTH_SYNDR_ASYNC_EQ_OVERRUN_ERR: + return "async EQ buffer overrun"; + case MLX5_HEALTH_SYNDR_EQ_ERR: + return "EQ error"; + case MLX5_HEALTH_SYNDR_FFSER_ERR: + return "FFSER error"; + default: + return "unrecognized error"; + } +} + +static u16 read_be16(__be16 __iomem *p) +{ + return swab16(readl((__force u16 __iomem *) p)); +} + +static u32 read_be32(__be32 __iomem *p) +{ + return swab32(readl((__force u32 __iomem *) p)); +} + +static void print_health_info(struct mlx5_core_dev *dev) +{ + struct mlx5_core_health *health = &dev->priv.health; + struct health_buffer __iomem *h = health->health; + int i; + + for (i = 0; i < ARRAY_SIZE(h->assert_var); i++) + pr_info("assert_var[%d] 0x%08x\n", i, read_be32(h->assert_var + i)); + + pr_info("assert_exit_ptr 0x%08x\n", read_be32(&h->assert_exit_ptr)); + pr_info("assert_callra 0x%08x\n", read_be32(&h->assert_callra)); + pr_info("fw_ver 0x%08x\n", read_be32(&h->fw_ver)); + pr_info("hw_id 0x%08x\n", read_be32(&h->hw_id)); + pr_info("irisc_index %d\n", readb(&h->irisc_index)); + pr_info("synd 0x%x: %s\n", readb(&h->synd), hsynd_str(readb(&h->synd))); + pr_info("ext_sync 0x%04x\n", read_be16(&h->ext_sync)); +} + +static void poll_health(unsigned long data) +{ + struct mlx5_core_dev *dev = (struct mlx5_core_dev *)data; + struct mlx5_core_health *health = &dev->priv.health; + unsigned long next; + u32 count; + + count = ioread32be(health->health_counter); + if (count == health->prev) + ++health->miss_counter; + else + health->miss_counter = 0; + + health->prev = count; + if (health->miss_counter == MAX_MISSES) { + mlx5_core_err(dev, "device's health compromised\n"); + print_health_info(dev); + spin_lock_irq(&health_lock); + list_add_tail(&health->list, &health_list); + spin_unlock_irq(&health_lock); + + queue_work(mlx5_core_wq, &health_work); + } else { + get_random_bytes(&next, sizeof(next)); + next %= HZ; + next += jiffies + MLX5_HEALTH_POLL_INTERVAL; + mod_timer(&health->timer, next); + } +} + +void mlx5_start_health_poll(struct mlx5_core_dev *dev) +{ + struct mlx5_core_health *health = &dev->priv.health; + + INIT_LIST_HEAD(&health->list); + init_timer(&health->timer); + health->health = &dev->iseg->health; + health->health_counter = &dev->iseg->health_counter; + + health->timer.data = (unsigned long)dev; + health->timer.function = poll_health; + health->timer.expires = round_jiffies(jiffies + MLX5_HEALTH_POLL_INTERVAL); + add_timer(&health->timer); +} + +void mlx5_stop_health_poll(struct mlx5_core_dev *dev) +{ + struct mlx5_core_health *health = &dev->priv.health; + + del_timer_sync(&health->timer); + + spin_lock_irq(&health_lock); + if (!list_empty(&health->list)) + list_del_init(&health->list); + spin_unlock_irq(&health_lock); +} + +void mlx5_health_cleanup(void) +{ +} + +void __init mlx5_health_init(void) +{ + INIT_WORK(&health_work, health_care); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mad.c b/drivers/net/ethernet/mellanox/mlx5/core/mad.c new file mode 100644 index 000000000000..18d6fd5dd90b --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/mad.c @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mlx5/driver.h> +#include <linux/mlx5/cmd.h> +#include "mlx5_core.h" + +int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, void *inb, void *outb, + u16 opmod, int port) +{ + struct mlx5_mad_ifc_mbox_in *in = NULL; + struct mlx5_mad_ifc_mbox_out *out = NULL; + int err; + + in = kzalloc(sizeof(*in), GFP_KERNEL); + if (!in) + return -ENOMEM; + + out = kzalloc(sizeof(*out), GFP_KERNEL); + if (!out) { + err = -ENOMEM; + goto out; + } + + in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MAD_IFC); + in->hdr.opmod = cpu_to_be16(opmod); + in->port = port; + + memcpy(in->data, inb, sizeof(in->data)); + + err = mlx5_cmd_exec(dev, in, sizeof(*in), out, sizeof(*out)); + if (err) + goto out; + + if (out->hdr.status) { + err = mlx5_cmd_status_to_err(&out->hdr); + goto out; + } + + memcpy(outb, out->data, sizeof(out->data)); + +out: + kfree(out); + kfree(in); + return err; +} +EXPORT_SYMBOL_GPL(mlx5_core_mad_ifc); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c new file mode 100644 index 000000000000..12242de2b0e3 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -0,0 +1,475 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <asm-generic/kmap_types.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/errno.h> +#include <linux/pci.h> +#include <linux/dma-mapping.h> +#include <linux/slab.h> +#include <linux/io-mapping.h> +#include <linux/mlx5/driver.h> +#include <linux/mlx5/cq.h> +#include <linux/mlx5/qp.h> +#include <linux/mlx5/srq.h> +#include <linux/debugfs.h> +#include "mlx5_core.h" + +#define DRIVER_NAME "mlx5_core" +#define DRIVER_VERSION "1.0" +#define DRIVER_RELDATE "June 2013" + +MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>"); +MODULE_DESCRIPTION("Mellanox ConnectX-IB HCA core library"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_VERSION(DRIVER_VERSION); + +int mlx5_core_debug_mask; +module_param_named(debug_mask, mlx5_core_debug_mask, int, 0644); +MODULE_PARM_DESC(debug_mask, "debug mask: 1 = dump cmd data, 2 = dump cmd exec time, 3 = both. Default=0"); + +struct workqueue_struct *mlx5_core_wq; + +static int set_dma_caps(struct pci_dev *pdev) +{ + int err; + + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err) { + dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n"); + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n"); + return err; + } + } + + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err) { + dev_warn(&pdev->dev, + "Warning: couldn't set 64-bit consistent PCI DMA mask.\n"); + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, + "Can't set consistent PCI DMA mask, aborting.\n"); + return err; + } + } + + dma_set_max_seg_size(&pdev->dev, 2u * 1024 * 1024 * 1024); + return err; +} + +static int request_bar(struct pci_dev *pdev) +{ + int err = 0; + + if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { + dev_err(&pdev->dev, "Missing registers BAR, aborting.\n"); + return -ENODEV; + } + + err = pci_request_regions(pdev, DRIVER_NAME); + if (err) + dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n"); + + return err; +} + +static void release_bar(struct pci_dev *pdev) +{ + pci_release_regions(pdev); +} + +static int mlx5_enable_msix(struct mlx5_core_dev *dev) +{ + struct mlx5_eq_table *table = &dev->priv.eq_table; + int num_eqs = 1 << dev->caps.log_max_eq; + int nvec; + int err; + int i; + + nvec = dev->caps.num_ports * num_online_cpus() + MLX5_EQ_VEC_COMP_BASE; + nvec = min_t(int, nvec, num_eqs); + if (nvec <= MLX5_EQ_VEC_COMP_BASE) + return -ENOMEM; + + table->msix_arr = kzalloc(nvec * sizeof(*table->msix_arr), GFP_KERNEL); + if (!table->msix_arr) + return -ENOMEM; + + for (i = 0; i < nvec; i++) + table->msix_arr[i].entry = i; + +retry: + table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE; + err = pci_enable_msix(dev->pdev, table->msix_arr, nvec); + if (err <= 0) { + return err; + } else if (err > 2) { + nvec = err; + goto retry; + } + + mlx5_core_dbg(dev, "received %d MSI vectors out of %d requested\n", err, nvec); + + return 0; +} + +static void mlx5_disable_msix(struct mlx5_core_dev *dev) +{ + struct mlx5_eq_table *table = &dev->priv.eq_table; + + pci_disable_msix(dev->pdev); + kfree(table->msix_arr); +} + +struct mlx5_reg_host_endianess { + u8 he; + u8 rsvd[15]; +}; + +static int handle_hca_cap(struct mlx5_core_dev *dev) +{ + struct mlx5_cmd_query_hca_cap_mbox_out *query_out = NULL; + struct mlx5_cmd_set_hca_cap_mbox_in *set_ctx = NULL; + struct mlx5_cmd_query_hca_cap_mbox_in query_ctx; + struct mlx5_cmd_set_hca_cap_mbox_out set_out; + struct mlx5_profile *prof = dev->profile; + u64 flags; + int csum = 1; + int err; + + memset(&query_ctx, 0, sizeof(query_ctx)); + query_out = kzalloc(sizeof(*query_out), GFP_KERNEL); + if (!query_out) + return -ENOMEM; + + set_ctx = kzalloc(sizeof(*set_ctx), GFP_KERNEL); + if (!set_ctx) { + err = -ENOMEM; + goto query_ex; + } + + query_ctx.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_HCA_CAP); + query_ctx.hdr.opmod = cpu_to_be16(0x1); + err = mlx5_cmd_exec(dev, &query_ctx, sizeof(query_ctx), + query_out, sizeof(*query_out)); + if (err) + goto query_ex; + + err = mlx5_cmd_status_to_err(&query_out->hdr); + if (err) { + mlx5_core_warn(dev, "query hca cap failed, %d\n", err); + goto query_ex; + } + + memcpy(&set_ctx->hca_cap, &query_out->hca_cap, + sizeof(set_ctx->hca_cap)); + + if (prof->mask & MLX5_PROF_MASK_CMDIF_CSUM) { + csum = !!prof->cmdif_csum; + flags = be64_to_cpu(set_ctx->hca_cap.flags); + if (csum) + flags |= MLX5_DEV_CAP_FLAG_CMDIF_CSUM; + else + flags &= ~MLX5_DEV_CAP_FLAG_CMDIF_CSUM; + + set_ctx->hca_cap.flags = cpu_to_be64(flags); + } + + if (dev->profile->mask & MLX5_PROF_MASK_QP_SIZE) + set_ctx->hca_cap.log_max_qp = dev->profile->log_max_qp; + + memset(&set_out, 0, sizeof(set_out)); + set_ctx->hca_cap.log_uar_page_sz = cpu_to_be16(PAGE_SHIFT - 12); + set_ctx->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_SET_HCA_CAP); + err = mlx5_cmd_exec(dev, set_ctx, sizeof(*set_ctx), + &set_out, sizeof(set_out)); + if (err) { + mlx5_core_warn(dev, "set hca cap failed, %d\n", err); + goto query_ex; + } + + err = mlx5_cmd_status_to_err(&set_out.hdr); + if (err) + goto query_ex; + + if (!csum) + dev->cmd.checksum_disabled = 1; + +query_ex: + kfree(query_out); + kfree(set_ctx); + + return err; +} + +static int set_hca_ctrl(struct mlx5_core_dev *dev) +{ + struct mlx5_reg_host_endianess he_in; + struct mlx5_reg_host_endianess he_out; + int err; + + memset(&he_in, 0, sizeof(he_in)); + he_in.he = MLX5_SET_HOST_ENDIANNESS; + err = mlx5_core_access_reg(dev, &he_in, sizeof(he_in), + &he_out, sizeof(he_out), + MLX5_REG_HOST_ENDIANNESS, 0, 1); + return err; +} + +int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev) +{ + struct mlx5_priv *priv = &dev->priv; + int err; + + dev->pdev = pdev; + pci_set_drvdata(dev->pdev, dev); + strncpy(priv->name, dev_name(&pdev->dev), MLX5_MAX_NAME_LEN); + priv->name[MLX5_MAX_NAME_LEN - 1] = 0; + + mutex_init(&priv->pgdir_mutex); + INIT_LIST_HEAD(&priv->pgdir_list); + spin_lock_init(&priv->mkey_lock); + + priv->dbg_root = debugfs_create_dir(dev_name(&pdev->dev), mlx5_debugfs_root); + if (!priv->dbg_root) + return -ENOMEM; + + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n"); + goto err_dbg; + } + + err = request_bar(pdev); + if (err) { + dev_err(&pdev->dev, "error requesting BARs, aborting.\n"); + goto err_disable; + } + + pci_set_master(pdev); + + err = set_dma_caps(pdev); + if (err) { + dev_err(&pdev->dev, "Failed setting DMA capabilities mask, aborting\n"); + goto err_clr_master; + } + + dev->iseg_base = pci_resource_start(dev->pdev, 0); + dev->iseg = ioremap(dev->iseg_base, sizeof(*dev->iseg)); + if (!dev->iseg) { + err = -ENOMEM; + dev_err(&pdev->dev, "Failed mapping initialization segment, aborting\n"); + goto err_clr_master; + } + dev_info(&pdev->dev, "firmware version: %d.%d.%d\n", fw_rev_maj(dev), + fw_rev_min(dev), fw_rev_sub(dev)); + + err = mlx5_cmd_init(dev); + if (err) { + dev_err(&pdev->dev, "Failed initializing command interface, aborting\n"); + goto err_unmap; + } + + mlx5_pagealloc_init(dev); + err = set_hca_ctrl(dev); + if (err) { + dev_err(&pdev->dev, "set_hca_ctrl failed\n"); + goto err_pagealloc_cleanup; + } + + err = handle_hca_cap(dev); + if (err) { + dev_err(&pdev->dev, "handle_hca_cap failed\n"); + goto err_pagealloc_cleanup; + } + + err = mlx5_satisfy_startup_pages(dev); + if (err) { + dev_err(&pdev->dev, "failed to allocate startup pages\n"); + goto err_pagealloc_cleanup; + } + + err = mlx5_pagealloc_start(dev); + if (err) { + dev_err(&pdev->dev, "mlx5_pagealloc_start failed\n"); + goto err_reclaim_pages; + } + + err = mlx5_cmd_init_hca(dev); + if (err) { + dev_err(&pdev->dev, "init hca failed\n"); + goto err_pagealloc_stop; + } + + mlx5_start_health_poll(dev); + + err = mlx5_cmd_query_hca_cap(dev, &dev->caps); + if (err) { + dev_err(&pdev->dev, "query hca failed\n"); + goto err_stop_poll; + } + + err = mlx5_cmd_query_adapter(dev); + if (err) { + dev_err(&pdev->dev, "query adapter failed\n"); + goto err_stop_poll; + } + + err = mlx5_enable_msix(dev); + if (err) { + dev_err(&pdev->dev, "enable msix failed\n"); + goto err_stop_poll; + } + + err = mlx5_eq_init(dev); + if (err) { + dev_err(&pdev->dev, "failed to initialize eq\n"); + goto disable_msix; + } + + err = mlx5_alloc_uuars(dev, &priv->uuari); + if (err) { + dev_err(&pdev->dev, "Failed allocating uar, aborting\n"); + goto err_eq_cleanup; + } + + err = mlx5_start_eqs(dev); + if (err) { + dev_err(&pdev->dev, "Failed to start pages and async EQs\n"); + goto err_free_uar; + } + + MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock); + + mlx5_init_cq_table(dev); + mlx5_init_qp_table(dev); + mlx5_init_srq_table(dev); + + return 0; + +err_free_uar: + mlx5_free_uuars(dev, &priv->uuari); + +err_eq_cleanup: + mlx5_eq_cleanup(dev); + +disable_msix: + mlx5_disable_msix(dev); + +err_stop_poll: + mlx5_stop_health_poll(dev); + mlx5_cmd_teardown_hca(dev); + +err_pagealloc_stop: + mlx5_pagealloc_stop(dev); + +err_reclaim_pages: + mlx5_reclaim_startup_pages(dev); + +err_pagealloc_cleanup: + mlx5_pagealloc_cleanup(dev); + mlx5_cmd_cleanup(dev); + +err_unmap: + iounmap(dev->iseg); + +err_clr_master: + pci_clear_master(dev->pdev); + release_bar(dev->pdev); + +err_disable: + pci_disable_device(dev->pdev); + +err_dbg: + debugfs_remove(priv->dbg_root); + return err; +} +EXPORT_SYMBOL(mlx5_dev_init); + +void mlx5_dev_cleanup(struct mlx5_core_dev *dev) +{ + struct mlx5_priv *priv = &dev->priv; + + mlx5_cleanup_srq_table(dev); + mlx5_cleanup_qp_table(dev); + mlx5_cleanup_cq_table(dev); + mlx5_stop_eqs(dev); + mlx5_free_uuars(dev, &priv->uuari); + mlx5_eq_cleanup(dev); + mlx5_disable_msix(dev); + mlx5_stop_health_poll(dev); + mlx5_cmd_teardown_hca(dev); + mlx5_pagealloc_stop(dev); + mlx5_reclaim_startup_pages(dev); + mlx5_pagealloc_cleanup(dev); + mlx5_cmd_cleanup(dev); + iounmap(dev->iseg); + pci_clear_master(dev->pdev); + release_bar(dev->pdev); + pci_disable_device(dev->pdev); + debugfs_remove(priv->dbg_root); +} +EXPORT_SYMBOL(mlx5_dev_cleanup); + +static int __init init(void) +{ + int err; + + mlx5_register_debugfs(); + mlx5_core_wq = create_singlethread_workqueue("mlx5_core_wq"); + if (!mlx5_core_wq) { + err = -ENOMEM; + goto err_debug; + } + mlx5_health_init(); + + return 0; + + mlx5_health_cleanup(); +err_debug: + mlx5_unregister_debugfs(); + return err; +} + +static void __exit cleanup(void) +{ + mlx5_health_cleanup(); + destroy_workqueue(mlx5_core_wq); + mlx5_unregister_debugfs(); +} + +module_init(init); +module_exit(cleanup); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mcg.c b/drivers/net/ethernet/mellanox/mlx5/core/mcg.c new file mode 100644 index 000000000000..44837640bd7c --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/mcg.c @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mlx5/driver.h> +#include <linux/mlx5/cmd.h> +#include <rdma/ib_verbs.h> +#include "mlx5_core.h" + +struct mlx5_attach_mcg_mbox_in { + struct mlx5_inbox_hdr hdr; + __be32 qpn; + __be32 rsvd; + u8 gid[16]; +}; + +struct mlx5_attach_mcg_mbox_out { + struct mlx5_outbox_hdr hdr; + u8 rsvf[8]; +}; + +struct mlx5_detach_mcg_mbox_in { + struct mlx5_inbox_hdr hdr; + __be32 qpn; + __be32 rsvd; + u8 gid[16]; +}; + +struct mlx5_detach_mcg_mbox_out { + struct mlx5_outbox_hdr hdr; + u8 rsvf[8]; +}; + +int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn) +{ + struct mlx5_attach_mcg_mbox_in in; + struct mlx5_attach_mcg_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ATTACH_TO_MCG); + memcpy(in.gid, mgid, sizeof(*mgid)); + in.qpn = cpu_to_be32(qpn); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + err = mlx5_cmd_status_to_err(&out.hdr); + + return err; +} +EXPORT_SYMBOL(mlx5_core_attach_mcg); + +int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn) +{ + struct mlx5_detach_mcg_mbox_in in; + struct mlx5_detach_mcg_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DETACH_FROM_MCG); + memcpy(in.gid, mgid, sizeof(*mgid)); + in.qpn = cpu_to_be32(qpn); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + err = mlx5_cmd_status_to_err(&out.hdr); + + return err; +} +EXPORT_SYMBOL(mlx5_core_detach_mcg); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h new file mode 100644 index 000000000000..68b74e1ae1b0 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __MLX5_CORE_H__ +#define __MLX5_CORE_H__ + +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/sched.h> + +extern int mlx5_core_debug_mask; + +#define mlx5_core_dbg(dev, format, arg...) \ +pr_debug("%s:%s:%d:(pid %d): " format, (dev)->priv.name, __func__, __LINE__, \ + current->pid, ##arg) + +#define mlx5_core_dbg_mask(dev, mask, format, arg...) \ +do { \ + if ((mask) & mlx5_core_debug_mask) \ + pr_debug("%s:%s:%d:(pid %d): " format, (dev)->priv.name, \ + __func__, __LINE__, current->pid, ##arg); \ +} while (0) + +#define mlx5_core_err(dev, format, arg...) \ +pr_err("%s:%s:%d:(pid %d): " format, (dev)->priv.name, __func__, __LINE__, \ + current->pid, ##arg) + +#define mlx5_core_warn(dev, format, arg...) \ +pr_warn("%s:%s:%d:(pid %d): " format, (dev)->priv.name, __func__, __LINE__, \ + current->pid, ##arg) + +enum { + MLX5_CMD_DATA, /* print command payload only */ + MLX5_CMD_TIME, /* print command execution time */ +}; + + +int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev, + struct mlx5_caps *caps); +int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev); +int mlx5_cmd_init_hca(struct mlx5_core_dev *dev); +int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev); + +#endif /* __MLX5_CORE_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c new file mode 100644 index 000000000000..5b44e2e46daf --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c @@ -0,0 +1,136 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mlx5/driver.h> +#include <linux/mlx5/cmd.h> +#include "mlx5_core.h" + +int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, + struct mlx5_create_mkey_mbox_in *in, int inlen) +{ + struct mlx5_create_mkey_mbox_out out; + int err; + u8 key; + + memset(&out, 0, sizeof(out)); + spin_lock(&dev->priv.mkey_lock); + key = dev->priv.mkey_key++; + spin_unlock(&dev->priv.mkey_lock); + in->seg.qpn_mkey7_0 |= cpu_to_be32(key); + in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_MKEY); + err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); + if (err) { + mlx5_core_dbg(dev, "cmd exec faile %d\n", err); + return err; + } + + if (out.hdr.status) { + mlx5_core_dbg(dev, "status %d\n", out.hdr.status); + return mlx5_cmd_status_to_err(&out.hdr); + } + + mr->key = mlx5_idx_to_mkey(be32_to_cpu(out.mkey) & 0xffffff) | key; + mlx5_core_dbg(dev, "out 0x%x, key 0x%x, mkey 0x%x\n", be32_to_cpu(out.mkey), key, mr->key); + + return err; +} +EXPORT_SYMBOL(mlx5_core_create_mkey); + +int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr) +{ + struct mlx5_destroy_mkey_mbox_in in; + struct mlx5_destroy_mkey_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_MKEY); + in.mkey = cpu_to_be32(mlx5_mkey_to_idx(mr->key)); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return mlx5_cmd_status_to_err(&out.hdr); + + return err; +} +EXPORT_SYMBOL(mlx5_core_destroy_mkey); + +int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, + struct mlx5_query_mkey_mbox_out *out, int outlen) +{ + struct mlx5_destroy_mkey_mbox_in in; + int err; + + memset(&in, 0, sizeof(in)); + memset(out, 0, outlen); + + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_MKEY); + in.mkey = cpu_to_be32(mlx5_mkey_to_idx(mr->key)); + err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen); + if (err) + return err; + + if (out->hdr.status) + return mlx5_cmd_status_to_err(&out->hdr); + + return err; +} +EXPORT_SYMBOL(mlx5_core_query_mkey); + +int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, + u32 *mkey) +{ + struct mlx5_query_special_ctxs_mbox_in in; + struct mlx5_query_special_ctxs_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return mlx5_cmd_status_to_err(&out.hdr); + + *mkey = be32_to_cpu(out.dump_fill_mkey); + + return err; +} +EXPORT_SYMBOL(mlx5_core_dump_fill_mkey); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c new file mode 100644 index 000000000000..f0bf46339b28 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c @@ -0,0 +1,435 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <asm-generic/kmap_types.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mlx5/driver.h> +#include <linux/mlx5/cmd.h> +#include "mlx5_core.h" + +enum { + MLX5_PAGES_CANT_GIVE = 0, + MLX5_PAGES_GIVE = 1, + MLX5_PAGES_TAKE = 2 +}; + +struct mlx5_pages_req { + struct mlx5_core_dev *dev; + u32 func_id; + s16 npages; + struct work_struct work; +}; + +struct fw_page { + struct rb_node rb_node; + u64 addr; + struct page *page; + u16 func_id; +}; + +struct mlx5_query_pages_inbox { + struct mlx5_inbox_hdr hdr; + u8 rsvd[8]; +}; + +struct mlx5_query_pages_outbox { + struct mlx5_outbox_hdr hdr; + u8 reserved[2]; + __be16 func_id; + __be16 init_pages; + __be16 num_pages; +}; + +struct mlx5_manage_pages_inbox { + struct mlx5_inbox_hdr hdr; + __be16 rsvd0; + __be16 func_id; + __be16 rsvd1; + __be16 num_entries; + u8 rsvd2[16]; + __be64 pas[0]; +}; + +struct mlx5_manage_pages_outbox { + struct mlx5_outbox_hdr hdr; + u8 rsvd0[2]; + __be16 num_entries; + u8 rsvd1[20]; + __be64 pas[0]; +}; + +static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id) +{ + struct rb_root *root = &dev->priv.page_root; + struct rb_node **new = &root->rb_node; + struct rb_node *parent = NULL; + struct fw_page *nfp; + struct fw_page *tfp; + + while (*new) { + parent = *new; + tfp = rb_entry(parent, struct fw_page, rb_node); + if (tfp->addr < addr) + new = &parent->rb_left; + else if (tfp->addr > addr) + new = &parent->rb_right; + else + return -EEXIST; + } + + nfp = kmalloc(sizeof(*nfp), GFP_KERNEL); + if (!nfp) + return -ENOMEM; + + nfp->addr = addr; + nfp->page = page; + nfp->func_id = func_id; + + rb_link_node(&nfp->rb_node, parent, new); + rb_insert_color(&nfp->rb_node, root); + + return 0; +} + +static struct page *remove_page(struct mlx5_core_dev *dev, u64 addr) +{ + struct rb_root *root = &dev->priv.page_root; + struct rb_node *tmp = root->rb_node; + struct page *result = NULL; + struct fw_page *tfp; + + while (tmp) { + tfp = rb_entry(tmp, struct fw_page, rb_node); + if (tfp->addr < addr) { + tmp = tmp->rb_left; + } else if (tfp->addr > addr) { + tmp = tmp->rb_right; + } else { + rb_erase(&tfp->rb_node, root); + result = tfp->page; + kfree(tfp); + break; + } + } + + return result; +} + +static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, + s16 *pages, s16 *init_pages) +{ + struct mlx5_query_pages_inbox in; + struct mlx5_query_pages_outbox out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_PAGES); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return mlx5_cmd_status_to_err(&out.hdr); + + if (pages) + *pages = be16_to_cpu(out.num_pages); + if (init_pages) + *init_pages = be16_to_cpu(out.init_pages); + *func_id = be16_to_cpu(out.func_id); + + return err; +} + +static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, + int notify_fail) +{ + struct mlx5_manage_pages_inbox *in; + struct mlx5_manage_pages_outbox out; + struct page *page; + int inlen; + u64 addr; + int err; + int i; + + inlen = sizeof(*in) + npages * sizeof(in->pas[0]); + in = mlx5_vzalloc(inlen); + if (!in) { + mlx5_core_warn(dev, "vzalloc failed %d\n", inlen); + return -ENOMEM; + } + memset(&out, 0, sizeof(out)); + + for (i = 0; i < npages; i++) { + page = alloc_page(GFP_HIGHUSER); + if (!page) { + err = -ENOMEM; + mlx5_core_warn(dev, "failed to allocate page\n"); + goto out_alloc; + } + addr = dma_map_page(&dev->pdev->dev, page, 0, + PAGE_SIZE, DMA_BIDIRECTIONAL); + if (dma_mapping_error(&dev->pdev->dev, addr)) { + mlx5_core_warn(dev, "failed dma mapping page\n"); + __free_page(page); + err = -ENOMEM; + goto out_alloc; + } + err = insert_page(dev, addr, page, func_id); + if (err) { + mlx5_core_err(dev, "failed to track allocated page\n"); + dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL); + __free_page(page); + err = -ENOMEM; + goto out_alloc; + } + in->pas[i] = cpu_to_be64(addr); + } + + in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); + in->hdr.opmod = cpu_to_be16(MLX5_PAGES_GIVE); + in->func_id = cpu_to_be16(func_id); + in->num_entries = cpu_to_be16(npages); + err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); + mlx5_core_dbg(dev, "err %d\n", err); + if (err) { + mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n", func_id, npages, err); + goto out_alloc; + } + dev->priv.fw_pages += npages; + + if (out.hdr.status) { + err = mlx5_cmd_status_to_err(&out.hdr); + if (err) { + mlx5_core_warn(dev, "func_id 0x%x, npages %d, status %d\n", func_id, npages, out.hdr.status); + goto out_alloc; + } + } + + mlx5_core_dbg(dev, "err %d\n", err); + + goto out_free; + +out_alloc: + if (notify_fail) { + memset(in, 0, inlen); + memset(&out, 0, sizeof(out)); + in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); + in->hdr.opmod = cpu_to_be16(MLX5_PAGES_CANT_GIVE); + if (mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out))) + mlx5_core_warn(dev, "\n"); + } + for (i--; i >= 0; i--) { + addr = be64_to_cpu(in->pas[i]); + page = remove_page(dev, addr); + if (!page) { + mlx5_core_err(dev, "BUG: can't remove page at addr 0x%llx\n", + addr); + continue; + } + dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL); + __free_page(page); + } + +out_free: + mlx5_vfree(in); + return err; +} + +static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, + int *nclaimed) +{ + struct mlx5_manage_pages_inbox in; + struct mlx5_manage_pages_outbox *out; + struct page *page; + int num_claimed; + int outlen; + u64 addr; + int err; + int i; + + memset(&in, 0, sizeof(in)); + outlen = sizeof(*out) + npages * sizeof(out->pas[0]); + out = mlx5_vzalloc(outlen); + if (!out) + return -ENOMEM; + + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); + in.hdr.opmod = cpu_to_be16(MLX5_PAGES_TAKE); + in.func_id = cpu_to_be16(func_id); + in.num_entries = cpu_to_be16(npages); + mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen); + err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen); + if (err) { + mlx5_core_err(dev, "failed recliaming pages\n"); + goto out_free; + } + dev->priv.fw_pages -= npages; + + if (out->hdr.status) { + err = mlx5_cmd_status_to_err(&out->hdr); + goto out_free; + } + + num_claimed = be16_to_cpu(out->num_entries); + if (nclaimed) + *nclaimed = num_claimed; + + for (i = 0; i < num_claimed; i++) { + addr = be64_to_cpu(out->pas[i]); + page = remove_page(dev, addr); + if (!page) { + mlx5_core_warn(dev, "FW reported unknown DMA address 0x%llx\n", addr); + } else { + dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL); + __free_page(page); + } + } + +out_free: + mlx5_vfree(out); + return err; +} + +static void pages_work_handler(struct work_struct *work) +{ + struct mlx5_pages_req *req = container_of(work, struct mlx5_pages_req, work); + struct mlx5_core_dev *dev = req->dev; + int err = 0; + + if (req->npages < 0) + err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL); + else if (req->npages > 0) + err = give_pages(dev, req->func_id, req->npages, 1); + + if (err) + mlx5_core_warn(dev, "%s fail %d\n", req->npages < 0 ? + "reclaim" : "give", err); + + kfree(req); +} + +void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, + s16 npages) +{ + struct mlx5_pages_req *req; + + req = kzalloc(sizeof(*req), GFP_ATOMIC); + if (!req) { + mlx5_core_warn(dev, "failed to allocate pages request\n"); + return; + } + + req->dev = dev; + req->func_id = func_id; + req->npages = npages; + INIT_WORK(&req->work, pages_work_handler); + queue_work(dev->priv.pg_wq, &req->work); +} + +int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev) +{ + s16 uninitialized_var(init_pages); + u16 uninitialized_var(func_id); + int err; + + err = mlx5_cmd_query_pages(dev, &func_id, NULL, &init_pages); + if (err) + return err; + + mlx5_core_dbg(dev, "requested %d init pages for func_id 0x%x\n", init_pages, func_id); + + return give_pages(dev, func_id, init_pages, 0); +} + +static int optimal_reclaimed_pages(void) +{ + struct mlx5_cmd_prot_block *block; + struct mlx5_cmd_layout *lay; + int ret; + + ret = (sizeof(lay->in) + sizeof(block->data) - + sizeof(struct mlx5_manage_pages_outbox)) / 8; + + return ret; +} + +int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev) +{ + unsigned long end = jiffies + msecs_to_jiffies(5000); + struct fw_page *fwp; + struct rb_node *p; + int err; + + do { + p = rb_first(&dev->priv.page_root); + if (p) { + fwp = rb_entry(p, struct fw_page, rb_node); + err = reclaim_pages(dev, fwp->func_id, optimal_reclaimed_pages(), NULL); + if (err) { + mlx5_core_warn(dev, "failed reclaiming pages (%d)\n", err); + return err; + } + } + if (time_after(jiffies, end)) { + mlx5_core_warn(dev, "FW did not return all pages. giving up...\n"); + break; + } + } while (p); + + return 0; +} + +void mlx5_pagealloc_init(struct mlx5_core_dev *dev) +{ + dev->priv.page_root = RB_ROOT; +} + +void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev) +{ + /* nothing */ +} + +int mlx5_pagealloc_start(struct mlx5_core_dev *dev) +{ + dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator"); + if (!dev->priv.pg_wq) + return -ENOMEM; + + return 0; +} + +void mlx5_pagealloc_stop(struct mlx5_core_dev *dev) +{ + destroy_workqueue(dev->priv.pg_wq); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pd.c b/drivers/net/ethernet/mellanox/mlx5/core/pd.c new file mode 100644 index 000000000000..790da5c4ca4f --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/pd.c @@ -0,0 +1,101 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mlx5/driver.h> +#include <linux/mlx5/cmd.h> +#include "mlx5_core.h" + +struct mlx5_alloc_pd_mbox_in { + struct mlx5_inbox_hdr hdr; + u8 rsvd[8]; +}; + +struct mlx5_alloc_pd_mbox_out { + struct mlx5_outbox_hdr hdr; + __be32 pdn; + u8 rsvd[4]; +}; + +struct mlx5_dealloc_pd_mbox_in { + struct mlx5_inbox_hdr hdr; + __be32 pdn; + u8 rsvd[4]; +}; + +struct mlx5_dealloc_pd_mbox_out { + struct mlx5_outbox_hdr hdr; + u8 rsvd[8]; +}; + +int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn) +{ + struct mlx5_alloc_pd_mbox_in in; + struct mlx5_alloc_pd_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ALLOC_PD); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return mlx5_cmd_status_to_err(&out.hdr); + + *pdn = be32_to_cpu(out.pdn) & 0xffffff; + return err; +} +EXPORT_SYMBOL(mlx5_core_alloc_pd); + +int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn) +{ + struct mlx5_dealloc_pd_mbox_in in; + struct mlx5_dealloc_pd_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DEALLOC_PD); + in.pdn = cpu_to_be32(pdn); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return mlx5_cmd_status_to_err(&out.hdr); + + return err; +} +EXPORT_SYMBOL(mlx5_core_dealloc_pd); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c new file mode 100644 index 000000000000..f6afe7b5a675 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/module.h> +#include <linux/mlx5/driver.h> +#include <linux/mlx5/cmd.h> +#include "mlx5_core.h" + +int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in, + int size_in, void *data_out, int size_out, + u16 reg_num, int arg, int write) +{ + struct mlx5_access_reg_mbox_in *in = NULL; + struct mlx5_access_reg_mbox_out *out = NULL; + int err = -ENOMEM; + + in = mlx5_vzalloc(sizeof(*in) + size_in); + if (!in) + return -ENOMEM; + + out = mlx5_vzalloc(sizeof(*out) + size_out); + if (!out) + goto ex1; + + memcpy(in->data, data_in, size_in); + in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ACCESS_REG); + in->hdr.opmod = cpu_to_be16(!write); + in->arg = cpu_to_be32(arg); + in->register_id = cpu_to_be16(reg_num); + err = mlx5_cmd_exec(dev, in, sizeof(*in) + size_in, out, + sizeof(out) + size_out); + if (err) + goto ex2; + + if (out->hdr.status) + err = mlx5_cmd_status_to_err(&out->hdr); + + if (!err) + memcpy(data_out, out->data, size_out); + +ex2: + mlx5_vfree(out); +ex1: + mlx5_vfree(in); + return err; +} +EXPORT_SYMBOL_GPL(mlx5_core_access_reg); + + +struct mlx5_reg_pcap { + u8 rsvd0; + u8 port_num; + u8 rsvd1[2]; + __be32 caps_127_96; + __be32 caps_95_64; + __be32 caps_63_32; + __be32 caps_31_0; +}; + +int mlx5_set_port_caps(struct mlx5_core_dev *dev, int port_num, u32 caps) +{ + struct mlx5_reg_pcap in; + struct mlx5_reg_pcap out; + int err; + + memset(&in, 0, sizeof(in)); + in.caps_127_96 = cpu_to_be32(caps); + in.port_num = port_num; + + err = mlx5_core_access_reg(dev, &in, sizeof(in), &out, + sizeof(out), MLX5_REG_PCAP, 0, 1); + + return err; +} +EXPORT_SYMBOL_GPL(mlx5_set_port_caps); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c new file mode 100644 index 000000000000..54faf8bfcaf4 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c @@ -0,0 +1,301 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + + +#include <linux/gfp.h> +#include <linux/export.h> +#include <linux/mlx5/cmd.h> +#include <linux/mlx5/qp.h> +#include <linux/mlx5/driver.h> + +#include "mlx5_core.h" + +void mlx5_qp_event(struct mlx5_core_dev *dev, u32 qpn, int event_type) +{ + struct mlx5_qp_table *table = &dev->priv.qp_table; + struct mlx5_core_qp *qp; + + spin_lock(&table->lock); + + qp = radix_tree_lookup(&table->tree, qpn); + if (qp) + atomic_inc(&qp->refcount); + + spin_unlock(&table->lock); + + if (!qp) { + mlx5_core_warn(dev, "Async event for bogus QP 0x%x\n", qpn); + return; + } + + qp->event(qp, event_type); + + if (atomic_dec_and_test(&qp->refcount)) + complete(&qp->free); +} + +int mlx5_core_create_qp(struct mlx5_core_dev *dev, + struct mlx5_core_qp *qp, + struct mlx5_create_qp_mbox_in *in, + int inlen) +{ + struct mlx5_qp_table *table = &dev->priv.qp_table; + struct mlx5_create_qp_mbox_out out; + struct mlx5_destroy_qp_mbox_in din; + struct mlx5_destroy_qp_mbox_out dout; + int err; + + memset(&dout, 0, sizeof(dout)); + in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_QP); + + err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); + if (err) { + mlx5_core_warn(dev, "ret %d", err); + return err; + } + + if (out.hdr.status) { + pr_warn("current num of QPs 0x%x\n", atomic_read(&dev->num_qps)); + return mlx5_cmd_status_to_err(&out.hdr); + } + + qp->qpn = be32_to_cpu(out.qpn) & 0xffffff; + mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn); + + spin_lock_irq(&table->lock); + err = radix_tree_insert(&table->tree, qp->qpn, qp); + spin_unlock_irq(&table->lock); + if (err) { + mlx5_core_warn(dev, "err %d", err); + goto err_cmd; + } + + err = mlx5_debug_qp_add(dev, qp); + if (err) + mlx5_core_dbg(dev, "failed adding QP 0x%x to debug file system\n", + qp->qpn); + + qp->pid = current->pid; + atomic_set(&qp->refcount, 1); + atomic_inc(&dev->num_qps); + init_completion(&qp->free); + + return 0; + +err_cmd: + memset(&din, 0, sizeof(din)); + memset(&dout, 0, sizeof(dout)); + din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_QP); + din.qpn = cpu_to_be32(qp->qpn); + mlx5_cmd_exec(dev, &din, sizeof(din), &out, sizeof(dout)); + + return err; +} +EXPORT_SYMBOL_GPL(mlx5_core_create_qp); + +int mlx5_core_destroy_qp(struct mlx5_core_dev *dev, + struct mlx5_core_qp *qp) +{ + struct mlx5_destroy_qp_mbox_in in; + struct mlx5_destroy_qp_mbox_out out; + struct mlx5_qp_table *table = &dev->priv.qp_table; + unsigned long flags; + int err; + + mlx5_debug_qp_remove(dev, qp); + + spin_lock_irqsave(&table->lock, flags); + radix_tree_delete(&table->tree, qp->qpn); + spin_unlock_irqrestore(&table->lock, flags); + + if (atomic_dec_and_test(&qp->refcount)) + complete(&qp->free); + wait_for_completion(&qp->free); + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_QP); + in.qpn = cpu_to_be32(qp->qpn); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return mlx5_cmd_status_to_err(&out.hdr); + + atomic_dec(&dev->num_qps); + return 0; +} +EXPORT_SYMBOL_GPL(mlx5_core_destroy_qp); + +int mlx5_core_qp_modify(struct mlx5_core_dev *dev, enum mlx5_qp_state cur_state, + enum mlx5_qp_state new_state, + struct mlx5_modify_qp_mbox_in *in, int sqd_event, + struct mlx5_core_qp *qp) +{ + static const u16 optab[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE] = { + [MLX5_QP_STATE_RST] = { + [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, + [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, + [MLX5_QP_STATE_INIT] = MLX5_CMD_OP_RST2INIT_QP, + }, + [MLX5_QP_STATE_INIT] = { + [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, + [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, + [MLX5_QP_STATE_INIT] = MLX5_CMD_OP_INIT2INIT_QP, + [MLX5_QP_STATE_RTR] = MLX5_CMD_OP_INIT2RTR_QP, + }, + [MLX5_QP_STATE_RTR] = { + [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, + [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, + [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_RTR2RTS_QP, + }, + [MLX5_QP_STATE_RTS] = { + [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, + [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, + [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_RTS2RTS_QP, + [MLX5_QP_STATE_SQD] = MLX5_CMD_OP_RTS2SQD_QP, + }, + [MLX5_QP_STATE_SQD] = { + [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, + [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, + [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_SQD2RTS_QP, + [MLX5_QP_STATE_SQD] = MLX5_CMD_OP_SQD2SQD_QP, + }, + [MLX5_QP_STATE_SQER] = { + [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, + [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, + [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_SQERR2RTS_QP, + }, + [MLX5_QP_STATE_ERR] = { + [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, + [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, + } + }; + + struct mlx5_modify_qp_mbox_out out; + int err = 0; + u16 op; + + if (cur_state >= MLX5_QP_NUM_STATE || new_state >= MLX5_QP_NUM_STATE || + !optab[cur_state][new_state]) + return -EINVAL; + + memset(&out, 0, sizeof(out)); + op = optab[cur_state][new_state]; + in->hdr.opcode = cpu_to_be16(op); + in->qpn = cpu_to_be32(qp->qpn); + err = mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out)); + if (err) + return err; + + return mlx5_cmd_status_to_err(&out.hdr); +} +EXPORT_SYMBOL_GPL(mlx5_core_qp_modify); + +void mlx5_init_qp_table(struct mlx5_core_dev *dev) +{ + struct mlx5_qp_table *table = &dev->priv.qp_table; + + spin_lock_init(&table->lock); + INIT_RADIX_TREE(&table->tree, GFP_ATOMIC); + mlx5_qp_debugfs_init(dev); +} + +void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev) +{ + mlx5_qp_debugfs_cleanup(dev); +} + +int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, + struct mlx5_query_qp_mbox_out *out, int outlen) +{ + struct mlx5_query_qp_mbox_in in; + int err; + + memset(&in, 0, sizeof(in)); + memset(out, 0, outlen); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_QP); + in.qpn = cpu_to_be32(qp->qpn); + err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen); + if (err) + return err; + + if (out->hdr.status) + return mlx5_cmd_status_to_err(&out->hdr); + + return err; +} +EXPORT_SYMBOL_GPL(mlx5_core_qp_query); + +int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn) +{ + struct mlx5_alloc_xrcd_mbox_in in; + struct mlx5_alloc_xrcd_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ALLOC_XRCD); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + err = mlx5_cmd_status_to_err(&out.hdr); + else + *xrcdn = be32_to_cpu(out.xrcdn); + + return err; +} +EXPORT_SYMBOL_GPL(mlx5_core_xrcd_alloc); + +int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn) +{ + struct mlx5_dealloc_xrcd_mbox_in in; + struct mlx5_dealloc_xrcd_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DEALLOC_XRCD); + in.xrcdn = cpu_to_be32(xrcdn); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + err = mlx5_cmd_status_to_err(&out.hdr); + + return err; +} +EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/srq.c b/drivers/net/ethernet/mellanox/mlx5/core/srq.c new file mode 100644 index 000000000000..38bce93f8314 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/srq.c @@ -0,0 +1,223 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mlx5/driver.h> +#include <linux/mlx5/cmd.h> +#include <linux/mlx5/srq.h> +#include <rdma/ib_verbs.h> +#include "mlx5_core.h" + +void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type) +{ + struct mlx5_srq_table *table = &dev->priv.srq_table; + struct mlx5_core_srq *srq; + + spin_lock(&table->lock); + + srq = radix_tree_lookup(&table->tree, srqn); + if (srq) + atomic_inc(&srq->refcount); + + spin_unlock(&table->lock); + + if (!srq) { + mlx5_core_warn(dev, "Async event for bogus SRQ 0x%08x\n", srqn); + return; + } + + srq->event(srq, event_type); + + if (atomic_dec_and_test(&srq->refcount)) + complete(&srq->free); +} + +struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn) +{ + struct mlx5_srq_table *table = &dev->priv.srq_table; + struct mlx5_core_srq *srq; + + spin_lock(&table->lock); + + srq = radix_tree_lookup(&table->tree, srqn); + if (srq) + atomic_inc(&srq->refcount); + + spin_unlock(&table->lock); + + return srq; +} +EXPORT_SYMBOL(mlx5_core_get_srq); + +int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, + struct mlx5_create_srq_mbox_in *in, int inlen) +{ + struct mlx5_create_srq_mbox_out out; + struct mlx5_srq_table *table = &dev->priv.srq_table; + struct mlx5_destroy_srq_mbox_in din; + struct mlx5_destroy_srq_mbox_out dout; + int err; + + memset(&out, 0, sizeof(out)); + in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_SRQ); + err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return mlx5_cmd_status_to_err(&out.hdr); + + srq->srqn = be32_to_cpu(out.srqn) & 0xffffff; + + atomic_set(&srq->refcount, 1); + init_completion(&srq->free); + + spin_lock_irq(&table->lock); + err = radix_tree_insert(&table->tree, srq->srqn, srq); + spin_unlock_irq(&table->lock); + if (err) { + mlx5_core_warn(dev, "err %d, srqn 0x%x\n", err, srq->srqn); + goto err_cmd; + } + + return 0; + +err_cmd: + memset(&din, 0, sizeof(din)); + memset(&dout, 0, sizeof(dout)); + din.srqn = cpu_to_be32(srq->srqn); + din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_SRQ); + mlx5_cmd_exec(dev, &din, sizeof(din), &dout, sizeof(dout)); + return err; +} +EXPORT_SYMBOL(mlx5_core_create_srq); + +int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq) +{ + struct mlx5_destroy_srq_mbox_in in; + struct mlx5_destroy_srq_mbox_out out; + struct mlx5_srq_table *table = &dev->priv.srq_table; + struct mlx5_core_srq *tmp; + int err; + + spin_lock_irq(&table->lock); + tmp = radix_tree_delete(&table->tree, srq->srqn); + spin_unlock_irq(&table->lock); + if (!tmp) { + mlx5_core_warn(dev, "srq 0x%x not found in tree\n", srq->srqn); + return -EINVAL; + } + if (tmp != srq) { + mlx5_core_warn(dev, "corruption on srqn 0x%x\n", srq->srqn); + return -EINVAL; + } + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_SRQ); + in.srqn = cpu_to_be32(srq->srqn); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return mlx5_cmd_status_to_err(&out.hdr); + + if (atomic_dec_and_test(&srq->refcount)) + complete(&srq->free); + wait_for_completion(&srq->free); + + return 0; +} +EXPORT_SYMBOL(mlx5_core_destroy_srq); + +int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, + struct mlx5_query_srq_mbox_out *out) +{ + struct mlx5_query_srq_mbox_in in; + int err; + + memset(&in, 0, sizeof(in)); + memset(out, 0, sizeof(*out)); + + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SRQ); + in.srqn = cpu_to_be32(srq->srqn); + err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out)); + if (err) + return err; + + if (out->hdr.status) + return mlx5_cmd_status_to_err(&out->hdr); + + return err; +} +EXPORT_SYMBOL(mlx5_core_query_srq); + +int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, + u16 lwm, int is_srq) +{ + struct mlx5_arm_srq_mbox_in in; + struct mlx5_arm_srq_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ARM_RQ); + in.hdr.opmod = cpu_to_be16(!!is_srq); + in.srqn = cpu_to_be32(srq->srqn); + in.lwm = cpu_to_be16(lwm); + + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return mlx5_cmd_status_to_err(&out.hdr); + + return err; +} +EXPORT_SYMBOL(mlx5_core_arm_srq); + +void mlx5_init_srq_table(struct mlx5_core_dev *dev) +{ + struct mlx5_srq_table *table = &dev->priv.srq_table; + + spin_lock_init(&table->lock); + INIT_RADIX_TREE(&table->tree, GFP_ATOMIC); +} + +void mlx5_cleanup_srq_table(struct mlx5_core_dev *dev) +{ + /* nothing */ +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c new file mode 100644 index 000000000000..71d4a3937200 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c @@ -0,0 +1,223 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mlx5/driver.h> +#include <linux/mlx5/cmd.h> +#include "mlx5_core.h" + +enum { + NUM_DRIVER_UARS = 4, + NUM_LOW_LAT_UUARS = 4, +}; + + +struct mlx5_alloc_uar_mbox_in { + struct mlx5_inbox_hdr hdr; + u8 rsvd[8]; +}; + +struct mlx5_alloc_uar_mbox_out { + struct mlx5_outbox_hdr hdr; + __be32 uarn; + u8 rsvd[4]; +}; + +struct mlx5_free_uar_mbox_in { + struct mlx5_inbox_hdr hdr; + __be32 uarn; + u8 rsvd[4]; +}; + +struct mlx5_free_uar_mbox_out { + struct mlx5_outbox_hdr hdr; + u8 rsvd[8]; +}; + +int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn) +{ + struct mlx5_alloc_uar_mbox_in in; + struct mlx5_alloc_uar_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ALLOC_UAR); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + goto ex; + + if (out.hdr.status) { + err = mlx5_cmd_status_to_err(&out.hdr); + goto ex; + } + + *uarn = be32_to_cpu(out.uarn) & 0xffffff; + +ex: + return err; +} +EXPORT_SYMBOL(mlx5_cmd_alloc_uar); + +int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn) +{ + struct mlx5_free_uar_mbox_in in; + struct mlx5_free_uar_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DEALLOC_UAR); + in.uarn = cpu_to_be32(uarn); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + goto ex; + + if (out.hdr.status) + err = mlx5_cmd_status_to_err(&out.hdr); + +ex: + return err; +} +EXPORT_SYMBOL(mlx5_cmd_free_uar); + +static int need_uuar_lock(int uuarn) +{ + int tot_uuars = NUM_DRIVER_UARS * MLX5_BF_REGS_PER_PAGE; + + if (uuarn == 0 || tot_uuars - NUM_LOW_LAT_UUARS) + return 0; + + return 1; +} + +int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari) +{ + int tot_uuars = NUM_DRIVER_UARS * MLX5_BF_REGS_PER_PAGE; + struct mlx5_bf *bf; + phys_addr_t addr; + int err; + int i; + + uuari->num_uars = NUM_DRIVER_UARS; + uuari->num_low_latency_uuars = NUM_LOW_LAT_UUARS; + + mutex_init(&uuari->lock); + uuari->uars = kcalloc(uuari->num_uars, sizeof(*uuari->uars), GFP_KERNEL); + if (!uuari->uars) + return -ENOMEM; + + uuari->bfs = kcalloc(tot_uuars, sizeof(*uuari->bfs), GFP_KERNEL); + if (!uuari->bfs) { + err = -ENOMEM; + goto out_uars; + } + + uuari->bitmap = kcalloc(BITS_TO_LONGS(tot_uuars), sizeof(*uuari->bitmap), + GFP_KERNEL); + if (!uuari->bitmap) { + err = -ENOMEM; + goto out_bfs; + } + + uuari->count = kcalloc(tot_uuars, sizeof(*uuari->count), GFP_KERNEL); + if (!uuari->count) { + err = -ENOMEM; + goto out_bitmap; + } + + for (i = 0; i < uuari->num_uars; i++) { + err = mlx5_cmd_alloc_uar(dev, &uuari->uars[i].index); + if (err) + goto out_count; + + addr = dev->iseg_base + ((phys_addr_t)(uuari->uars[i].index) << PAGE_SHIFT); + uuari->uars[i].map = ioremap(addr, PAGE_SIZE); + if (!uuari->uars[i].map) { + mlx5_cmd_free_uar(dev, uuari->uars[i].index); + goto out_count; + } + mlx5_core_dbg(dev, "allocated uar index 0x%x, mmaped at %p\n", + uuari->uars[i].index, uuari->uars[i].map); + } + + for (i = 0; i < tot_uuars; i++) { + bf = &uuari->bfs[i]; + + bf->buf_size = dev->caps.bf_reg_size / 2; + bf->uar = &uuari->uars[i / MLX5_BF_REGS_PER_PAGE]; + bf->regreg = uuari->uars[i / MLX5_BF_REGS_PER_PAGE].map; + bf->reg = NULL; /* Add WC support */ + bf->offset = (i % MLX5_BF_REGS_PER_PAGE) * dev->caps.bf_reg_size + + MLX5_BF_OFFSET; + bf->need_lock = need_uuar_lock(i); + spin_lock_init(&bf->lock); + spin_lock_init(&bf->lock32); + bf->uuarn = i; + } + + return 0; + +out_count: + for (i--; i >= 0; i--) { + iounmap(uuari->uars[i].map); + mlx5_cmd_free_uar(dev, uuari->uars[i].index); + } + kfree(uuari->count); + +out_bitmap: + kfree(uuari->bitmap); + +out_bfs: + kfree(uuari->bfs); + +out_uars: + kfree(uuari->uars); + return err; +} + +int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari) +{ + int i = uuari->num_uars; + + for (i--; i >= 0; i--) { + iounmap(uuari->uars[i].map); + mlx5_cmd_free_uar(dev, uuari->uars[i].index); + } + + kfree(uuari->count); + kfree(uuari->bitmap); + kfree(uuari->bfs); + kfree(uuari->uars); + + return 0; +} diff --git a/drivers/net/ethernet/micrel/Kconfig b/drivers/net/ethernet/micrel/Kconfig index fe42fc00d8d3..d16b11ed2e52 100644 --- a/drivers/net/ethernet/micrel/Kconfig +++ b/drivers/net/ethernet/micrel/Kconfig @@ -22,7 +22,6 @@ if NET_VENDOR_MICREL config ARM_KS8695_ETHER tristate "KS8695 Ethernet support" depends on ARM && ARCH_KS8695 - select NET_CORE select MII ---help--- If you wish to compile a kernel for the KS8695 and want to @@ -39,7 +38,6 @@ config KS8842 config KS8851 tristate "Micrel KS8851 SPI" depends on SPI - select NET_CORE select MII select CRC32 select EEPROM_93CX6 @@ -49,7 +47,6 @@ config KS8851 config KS8851_MLL tristate "Micrel KS8851 MLL" depends on HAS_IOMEM - select NET_CORE select MII ---help--- This platform driver is for Micrel KS8851 Address/data bus @@ -58,7 +55,6 @@ config KS8851_MLL config KSZ884X_PCI tristate "Micrel KSZ8841/2 PCI" depends on PCI - select NET_CORE select MII select CRC32 ---help--- diff --git a/drivers/net/ethernet/micrel/ks8695net.c b/drivers/net/ethernet/micrel/ks8695net.c index b6c60fdef4ff..106eb972f2ac 100644 --- a/drivers/net/ethernet/micrel/ks8695net.c +++ b/drivers/net/ethernet/micrel/ks8695net.c @@ -1600,7 +1600,6 @@ ks8695_drv_remove(struct platform_device *pdev) struct net_device *ndev = platform_get_drvdata(pdev); struct ks8695_priv *ksp = netdev_priv(ndev); - platform_set_drvdata(pdev, NULL); netif_napi_del(&ksp->napi); unregister_netdev(ndev); diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c index fbcb9e74d7fc..e393d998be89 100644 --- a/drivers/net/ethernet/micrel/ks8842.c +++ b/drivers/net/ethernet/micrel/ks8842.c @@ -1250,7 +1250,6 @@ static int ks8842_remove(struct platform_device *pdev) iounmap(adapter->hw_addr); free_netdev(netdev); release_mem_region(iomem->start, resource_size(iomem)); - platform_set_drvdata(pdev, NULL); return 0; } diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c index ddaf138ce0d4..ac20098b542a 100644 --- a/drivers/net/ethernet/micrel/ks8851_mll.c +++ b/drivers/net/ethernet/micrel/ks8851_mll.c @@ -35,6 +35,9 @@ #include <linux/delay.h> #include <linux/slab.h> #include <linux/ks8851_mll.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/of_net.h> #define DRV_NAME "ks8851_mll" @@ -1524,6 +1527,13 @@ static int ks_hw_init(struct ks_net *ks) return true; } +#if defined(CONFIG_OF) +static const struct of_device_id ks8851_ml_dt_ids[] = { + { .compatible = "micrel,ks8851-mll" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, ks8851_ml_dt_ids); +#endif static int ks8851_probe(struct platform_device *pdev) { @@ -1532,7 +1542,7 @@ static int ks8851_probe(struct platform_device *pdev) struct net_device *netdev; struct ks_net *ks; u16 id, data; - struct ks8851_mll_platform_data *pdata; + const char *mac; io_d = platform_get_resource(pdev, IORESOURCE_MEM, 0); io_c = platform_get_resource(pdev, IORESOURCE_MEM, 1); @@ -1619,13 +1629,21 @@ static int ks8851_probe(struct platform_device *pdev) ks_wrreg16(ks, KS_OBCR, data | OBCR_ODS_16MA); /* overwriting the default MAC address */ - pdata = pdev->dev.platform_data; - if (!pdata) { - netdev_err(netdev, "No platform data\n"); - err = -ENODEV; - goto err_pdata; + if (pdev->dev.of_node) { + mac = of_get_mac_address(pdev->dev.of_node); + if (mac) + memcpy(ks->mac_addr, mac, ETH_ALEN); + } else { + struct ks8851_mll_platform_data *pdata; + + pdata = pdev->dev.platform_data; + if (!pdata) { + netdev_err(netdev, "No platform data\n"); + err = -ENODEV; + goto err_pdata; + } + memcpy(ks->mac_addr, pdata->mac_addr, ETH_ALEN); } - memcpy(ks->mac_addr, pdata->mac_addr, 6); if (!is_valid_ether_addr(ks->mac_addr)) { /* Use random MAC address if none passed */ eth_random_addr(ks->mac_addr); @@ -1671,7 +1689,6 @@ static int ks8851_remove(struct platform_device *pdev) iounmap(ks->hw_addr); free_netdev(netdev); release_mem_region(iomem->start, resource_size(iomem)); - platform_set_drvdata(pdev, NULL); return 0; } @@ -1680,6 +1697,7 @@ static struct platform_driver ks8851_platform_driver = { .driver = { .name = DRV_NAME, .owner = THIS_MODULE, + .of_match_table = of_match_ptr(ks8851_ml_dt_ids), }, .probe = ks8851_probe, .remove = ks8851_remove, diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index 7be9788ed0f6..967bae8b85c5 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -3299,7 +3299,7 @@ static int myri10ge_resume(struct pci_dev *pdev) if (mgp == NULL) return -EINVAL; netdev = mgp->dev; - pci_set_power_state(pdev, 0); /* zeros conf space as a side effect */ + pci_set_power_state(pdev, PCI_D0); /* zeros conf space as a side effect */ msleep(5); /* give card time to respond */ pci_read_config_word(mgp->pdev, PCI_VENDOR_ID, &vendor); if (vendor == 0xffff) { diff --git a/drivers/net/ethernet/netx-eth.c b/drivers/net/ethernet/netx-eth.c index cb9e63831500..dc2c6f561e9a 100644 --- a/drivers/net/ethernet/netx-eth.c +++ b/drivers/net/ethernet/netx-eth.c @@ -422,7 +422,6 @@ exit_free_pfifo: exit_free_xc: free_xc(priv->xc); exit_free_netdev: - platform_set_drvdata(pdev, NULL); free_netdev(ndev); exit: return ret; @@ -430,11 +429,9 @@ exit: static int netx_eth_drv_remove(struct platform_device *pdev) { - struct net_device *ndev = dev_get_drvdata(&pdev->dev); + struct net_device *ndev = platform_get_drvdata(pdev); struct netx_eth_priv *priv = netdev_priv(ndev); - platform_set_drvdata(pdev, NULL); - unregister_netdev(ndev); xc_stop(priv->xc); free_xc(priv->xc); diff --git a/drivers/net/ethernet/nuvoton/Kconfig b/drivers/net/ethernet/nuvoton/Kconfig index 334c17183095..01182b559473 100644 --- a/drivers/net/ethernet/nuvoton/Kconfig +++ b/drivers/net/ethernet/nuvoton/Kconfig @@ -22,7 +22,6 @@ config W90P910_ETH tristate "Nuvoton w90p910 Ethernet support" depends on ARM && ARCH_W90X900 select PHYLIB - select NET_CORE select MII ---help--- Say Y here if you want to use built-in Ethernet ports diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c index 3df8287b7452..e88bdb1aa669 100644 --- a/drivers/net/ethernet/nuvoton/w90p910_ether.c +++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c @@ -1051,7 +1051,6 @@ failed_put_clk: clk_put(ether->clk); failed_free_rxirq: free_irq(ether->rxirq, pdev); - platform_set_drvdata(pdev, NULL); failed_free_txirq: free_irq(ether->txirq, pdev); failed_free_io: @@ -1080,7 +1079,6 @@ static int w90p910_ether_remove(struct platform_device *pdev) free_irq(ether->rxirq, dev); del_timer_sync(ðer->check_timer); - platform_set_drvdata(pdev, NULL); free_netdev(dev); return 0; diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index b003fe53c8e2..098b96dad66f 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -6340,7 +6340,7 @@ static DEFINE_PCI_DEVICE_TABLE(pci_tbl) = { {0,}, }; -static struct pci_driver driver = { +static struct pci_driver forcedeth_pci_driver = { .name = DRV_NAME, .id_table = pci_tbl, .probe = nv_probe, @@ -6349,16 +6349,6 @@ static struct pci_driver driver = { .driver.pm = NV_PM_OPS, }; -static int __init init_nic(void) -{ - return pci_register_driver(&driver); -} - -static void __exit exit_nic(void) -{ - pci_unregister_driver(&driver); -} - module_param(max_interrupt_work, int, 0); MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt"); module_param(optimization_mode, int, 0); @@ -6379,11 +6369,8 @@ module_param(debug_tx_timeout, bool, 0); MODULE_PARM_DESC(debug_tx_timeout, "Dump tx related registers and ring when tx_timeout happens"); +module_pci_driver(forcedeth_pci_driver); MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>"); MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver"); MODULE_LICENSE("GPL"); - MODULE_DEVICE_TABLE(pci, pci_tbl); - -module_init(init_nic); -module_exit(exit_nic); diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index 55a5548d6add..a061b93efe66 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c @@ -1483,7 +1483,6 @@ static int lpc_eth_drv_probe(struct platform_device *pdev) return 0; err_out_unregister_netdev: - platform_set_drvdata(pdev, NULL); unregister_netdev(ndev); err_out_dma_unmap: if (!use_iram_for_net(&pldat->pdev->dev) || @@ -1511,7 +1510,6 @@ static int lpc_eth_drv_remove(struct platform_device *pdev) struct netdata_local *pldat = netdev_priv(ndev); unregister_netdev(ndev); - platform_set_drvdata(pdev, NULL); if (!use_iram_for_net(&pldat->pdev->dev) || pldat->dma_buff_size > lpc32xx_return_iram_size()) diff --git a/drivers/net/ethernet/octeon/Kconfig b/drivers/net/ethernet/octeon/Kconfig index 3de52ffd2872..a7aa28054cc1 100644 --- a/drivers/net/ethernet/octeon/Kconfig +++ b/drivers/net/ethernet/octeon/Kconfig @@ -4,7 +4,7 @@ config OCTEON_MGMT_ETHERNET tristate "Octeon Management port ethernet driver (CN5XXX, CN6XXX)" - depends on CPU_CAVIUM_OCTEON + depends on CAVIUM_OCTEON_SOC select PHYLIB select MDIO_OCTEON default y diff --git a/drivers/net/ethernet/octeon/octeon_mgmt.c b/drivers/net/ethernet/octeon/octeon_mgmt.c index 91a8a5d28037..622aa75904c4 100644 --- a/drivers/net/ethernet/octeon/octeon_mgmt.c +++ b/drivers/net/ethernet/octeon/octeon_mgmt.c @@ -1448,7 +1448,7 @@ static int octeon_mgmt_probe(struct platform_device *pdev) SET_NETDEV_DEV(netdev, &pdev->dev); - dev_set_drvdata(&pdev->dev, netdev); + platform_set_drvdata(pdev, netdev); p = netdev_priv(netdev); netif_napi_add(netdev, &p->napi, octeon_mgmt_napi_poll, OCTEON_MGMT_NAPI_WEIGHT); @@ -1570,7 +1570,7 @@ err: static int octeon_mgmt_remove(struct platform_device *pdev) { - struct net_device *netdev = dev_get_drvdata(&pdev->dev); + struct net_device *netdev = platform_get_drvdata(pdev); unregister_netdev(netdev); free_netdev(netdev); diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig index 34d05bf72b2e..cb22341a14a8 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig +++ b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig @@ -5,7 +5,6 @@ config PCH_GBE tristate "OKI SEMICONDUCTOR IOH(ML7223/ML7831) GbE" depends on PCI - select NET_CORE select MII select PTP_1588_CLOCK_PCH ---help--- diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h index 7fb7e178c74e..7779036690cc 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h @@ -633,6 +633,8 @@ struct pch_gbe_adapter { struct pci_dev *ptp_pdev; }; +#define pch_gbe_hw_to_adapter(hw) container_of(hw, struct pch_gbe_adapter, hw) + extern const char pch_driver_version[]; /* pch_gbe_main.c */ diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c index 5ae03e815ee9..ff3ad70935a6 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c @@ -19,6 +19,7 @@ */ #include "pch_gbe.h" #include "pch_gbe_phy.h" +#include "pch_gbe_api.h" /* bus type values */ #define pch_gbe_bus_type_unknown 0 @@ -70,7 +71,9 @@ static s32 pch_gbe_plat_init_hw(struct pch_gbe_hw *hw) ret_val = pch_gbe_phy_get_id(hw); if (ret_val) { - pr_err("pch_gbe_phy_get_id error\n"); + struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); + + netdev_err(adapter->netdev, "pch_gbe_phy_get_id error\n"); return ret_val; } pch_gbe_phy_init_setting(hw); @@ -112,10 +115,12 @@ static void pch_gbe_plat_init_function_pointers(struct pch_gbe_hw *hw) * 0: Successfully * ENOSYS: Function is not registered */ -inline s32 pch_gbe_hal_setup_init_funcs(struct pch_gbe_hw *hw) +s32 pch_gbe_hal_setup_init_funcs(struct pch_gbe_hw *hw) { if (!hw->reg) { - pr_err("ERROR: Registers not mapped\n"); + struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); + + netdev_err(adapter->netdev, "ERROR: Registers not mapped\n"); return -ENOSYS; } pch_gbe_plat_init_function_pointers(hw); @@ -126,12 +131,15 @@ inline s32 pch_gbe_hal_setup_init_funcs(struct pch_gbe_hw *hw) * pch_gbe_hal_get_bus_info - Obtain bus information for adapter * @hw: Pointer to the HW structure */ -inline void pch_gbe_hal_get_bus_info(struct pch_gbe_hw *hw) +void pch_gbe_hal_get_bus_info(struct pch_gbe_hw *hw) { - if (!hw->func->get_bus_info) - pr_err("ERROR: configuration\n"); - else - hw->func->get_bus_info(hw); + if (!hw->func->get_bus_info) { + struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); + + netdev_err(adapter->netdev, "ERROR: configuration\n"); + return; + } + hw->func->get_bus_info(hw); } /** @@ -141,10 +149,12 @@ inline void pch_gbe_hal_get_bus_info(struct pch_gbe_hw *hw) * 0: Successfully * ENOSYS: Function is not registered */ -inline s32 pch_gbe_hal_init_hw(struct pch_gbe_hw *hw) +s32 pch_gbe_hal_init_hw(struct pch_gbe_hw *hw) { if (!hw->func->init_hw) { - pr_err("ERROR: configuration\n"); + struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); + + netdev_err(adapter->netdev, "ERROR: configuration\n"); return -ENOSYS; } return hw->func->init_hw(hw); @@ -159,7 +169,7 @@ inline s32 pch_gbe_hal_init_hw(struct pch_gbe_hw *hw) * 0: Successfully * Negative value: Failed */ -inline s32 pch_gbe_hal_read_phy_reg(struct pch_gbe_hw *hw, u32 offset, +s32 pch_gbe_hal_read_phy_reg(struct pch_gbe_hw *hw, u32 offset, u16 *data) { if (!hw->func->read_phy_reg) @@ -176,7 +186,7 @@ inline s32 pch_gbe_hal_read_phy_reg(struct pch_gbe_hw *hw, u32 offset, * 0: Successfully * Negative value: Failed */ -inline s32 pch_gbe_hal_write_phy_reg(struct pch_gbe_hw *hw, u32 offset, +s32 pch_gbe_hal_write_phy_reg(struct pch_gbe_hw *hw, u32 offset, u16 data) { if (!hw->func->write_phy_reg) @@ -188,24 +198,30 @@ inline s32 pch_gbe_hal_write_phy_reg(struct pch_gbe_hw *hw, u32 offset, * pch_gbe_hal_phy_hw_reset - Hard PHY reset * @hw: Pointer to the HW structure */ -inline void pch_gbe_hal_phy_hw_reset(struct pch_gbe_hw *hw) +void pch_gbe_hal_phy_hw_reset(struct pch_gbe_hw *hw) { - if (!hw->func->reset_phy) - pr_err("ERROR: configuration\n"); - else - hw->func->reset_phy(hw); + if (!hw->func->reset_phy) { + struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); + + netdev_err(adapter->netdev, "ERROR: configuration\n"); + return; + } + hw->func->reset_phy(hw); } /** * pch_gbe_hal_phy_sw_reset - Soft PHY reset * @hw: Pointer to the HW structure */ -inline void pch_gbe_hal_phy_sw_reset(struct pch_gbe_hw *hw) +void pch_gbe_hal_phy_sw_reset(struct pch_gbe_hw *hw) { - if (!hw->func->sw_reset_phy) - pr_err("ERROR: configuration\n"); - else - hw->func->sw_reset_phy(hw); + if (!hw->func->sw_reset_phy) { + struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); + + netdev_err(adapter->netdev, "ERROR: configuration\n"); + return; + } + hw->func->sw_reset_phy(hw); } /** @@ -215,10 +231,12 @@ inline void pch_gbe_hal_phy_sw_reset(struct pch_gbe_hw *hw) * 0: Successfully * ENOSYS: Function is not registered */ -inline s32 pch_gbe_hal_read_mac_addr(struct pch_gbe_hw *hw) +s32 pch_gbe_hal_read_mac_addr(struct pch_gbe_hw *hw) { if (!hw->func->read_mac_addr) { - pr_err("ERROR: configuration\n"); + struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); + + netdev_err(adapter->netdev, "ERROR: configuration\n"); return -ENOSYS; } return hw->func->read_mac_addr(hw); @@ -228,7 +246,7 @@ inline s32 pch_gbe_hal_read_mac_addr(struct pch_gbe_hw *hw) * pch_gbe_hal_power_up_phy - Power up PHY * @hw: Pointer to the HW structure */ -inline void pch_gbe_hal_power_up_phy(struct pch_gbe_hw *hw) +void pch_gbe_hal_power_up_phy(struct pch_gbe_hw *hw) { if (hw->func->power_up_phy) hw->func->power_up_phy(hw); @@ -238,7 +256,7 @@ inline void pch_gbe_hal_power_up_phy(struct pch_gbe_hw *hw) * pch_gbe_hal_power_down_phy - Power down PHY * @hw: Pointer to the HW structure */ -inline void pch_gbe_hal_power_down_phy(struct pch_gbe_hw *hw) +void pch_gbe_hal_power_down_phy(struct pch_gbe_hw *hw) { if (hw->func->power_down_phy) hw->func->power_down_phy(hw); diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c index 24b787be6062..1129db0cdf82 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c @@ -122,7 +122,7 @@ static int pch_gbe_set_settings(struct net_device *netdev, } ret = mii_ethtool_sset(&adapter->mii, ecmd); if (ret) { - pr_err("Error: mii_ethtool_sset\n"); + netdev_err(netdev, "Error: mii_ethtool_sset\n"); return ret; } hw->mac.link_speed = speed; diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index 0c1c65a9ce5e..ab1039a95bf9 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -287,7 +287,7 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; } -inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw) +static inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw) { iowrite32(0x01, &hw->reg->MAC_ADDR_LOAD); } @@ -300,6 +300,7 @@ inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw) */ s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw) { + struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); u32 adr1a, adr1b; adr1a = ioread32(&hw->reg->mac_adr[0].high); @@ -312,7 +313,7 @@ s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw) hw->mac.addr[4] = (u8)(adr1b & 0xFF); hw->mac.addr[5] = (u8)((adr1b >> 8) & 0xFF); - pr_debug("hw->mac.addr : %pM\n", hw->mac.addr); + netdev_dbg(adapter->netdev, "hw->mac.addr : %pM\n", hw->mac.addr); return 0; } @@ -324,6 +325,7 @@ s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw) static void pch_gbe_wait_clr_bit(void *reg, u32 bit) { u32 tmp; + /* wait busy */ tmp = 1000; while ((ioread32(reg) & bit) && --tmp) @@ -340,9 +342,10 @@ static void pch_gbe_wait_clr_bit(void *reg, u32 bit) */ static void pch_gbe_mac_mar_set(struct pch_gbe_hw *hw, u8 * addr, u32 index) { + struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); u32 mar_low, mar_high, adrmask; - pr_debug("index : 0x%x\n", index); + netdev_dbg(adapter->netdev, "index : 0x%x\n", index); /* * HW expects these in little endian so we reverse the byte order @@ -468,10 +471,11 @@ static void pch_gbe_mac_mc_addr_list_update(struct pch_gbe_hw *hw, */ s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw) { + struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); struct pch_gbe_mac_info *mac = &hw->mac; u32 rx_fctrl; - pr_debug("mac->fc = %u\n", mac->fc); + netdev_dbg(adapter->netdev, "mac->fc = %u\n", mac->fc); rx_fctrl = ioread32(&hw->reg->RX_FCTRL); @@ -493,14 +497,16 @@ s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw) mac->tx_fc_enable = true; break; default: - pr_err("Flow control param set incorrectly\n"); + netdev_err(adapter->netdev, + "Flow control param set incorrectly\n"); return -EINVAL; } if (mac->link_duplex == DUPLEX_HALF) rx_fctrl &= ~PCH_GBE_FL_CTRL_EN; iowrite32(rx_fctrl, &hw->reg->RX_FCTRL); - pr_debug("RX_FCTRL reg : 0x%08x mac->tx_fc_enable : %d\n", - ioread32(&hw->reg->RX_FCTRL), mac->tx_fc_enable); + netdev_dbg(adapter->netdev, + "RX_FCTRL reg : 0x%08x mac->tx_fc_enable : %d\n", + ioread32(&hw->reg->RX_FCTRL), mac->tx_fc_enable); return 0; } @@ -511,10 +517,11 @@ s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw) */ static void pch_gbe_mac_set_wol_event(struct pch_gbe_hw *hw, u32 wu_evt) { + struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); u32 addr_mask; - pr_debug("wu_evt : 0x%08x ADDR_MASK reg : 0x%08x\n", - wu_evt, ioread32(&hw->reg->ADDR_MASK)); + netdev_dbg(adapter->netdev, "wu_evt : 0x%08x ADDR_MASK reg : 0x%08x\n", + wu_evt, ioread32(&hw->reg->ADDR_MASK)); if (wu_evt) { /* Set Wake-On-Lan address mask */ @@ -546,6 +553,7 @@ static void pch_gbe_mac_set_wol_event(struct pch_gbe_hw *hw, u32 wu_evt) u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw, u32 addr, u32 dir, u32 reg, u16 data) { + struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); u32 data_out = 0; unsigned int i; unsigned long flags; @@ -558,7 +566,7 @@ u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw, u32 addr, u32 dir, u32 reg, udelay(20); } if (i == 0) { - pr_err("pch-gbe.miim won't go Ready\n"); + netdev_err(adapter->netdev, "pch-gbe.miim won't go Ready\n"); spin_unlock_irqrestore(&hw->miim_lock, flags); return 0; /* No way to indicate timeout error */ } @@ -573,9 +581,9 @@ u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw, u32 addr, u32 dir, u32 reg, } spin_unlock_irqrestore(&hw->miim_lock, flags); - pr_debug("PHY %s: reg=%d, data=0x%04X\n", - dir == PCH_GBE_MIIM_OPER_READ ? "READ" : "WRITE", reg, - dir == PCH_GBE_MIIM_OPER_READ ? data_out : data); + netdev_dbg(adapter->netdev, "PHY %s: reg=%d, data=0x%04X\n", + dir == PCH_GBE_MIIM_OPER_READ ? "READ" : "WRITE", reg, + dir == PCH_GBE_MIIM_OPER_READ ? data_out : data); return (u16) data_out; } @@ -585,6 +593,7 @@ u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw, u32 addr, u32 dir, u32 reg, */ static void pch_gbe_mac_set_pause_packet(struct pch_gbe_hw *hw) { + struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); unsigned long tmp2, tmp3; /* Set Pause packet */ @@ -606,10 +615,13 @@ static void pch_gbe_mac_set_pause_packet(struct pch_gbe_hw *hw) /* Transmit Pause Packet */ iowrite32(PCH_GBE_PS_PKT_RQ, &hw->reg->PAUSE_REQ); - pr_debug("PAUSE_PKT1-5 reg : 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", - ioread32(&hw->reg->PAUSE_PKT1), ioread32(&hw->reg->PAUSE_PKT2), - ioread32(&hw->reg->PAUSE_PKT3), ioread32(&hw->reg->PAUSE_PKT4), - ioread32(&hw->reg->PAUSE_PKT5)); + netdev_dbg(adapter->netdev, + "PAUSE_PKT1-5 reg : 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", + ioread32(&hw->reg->PAUSE_PKT1), + ioread32(&hw->reg->PAUSE_PKT2), + ioread32(&hw->reg->PAUSE_PKT3), + ioread32(&hw->reg->PAUSE_PKT4), + ioread32(&hw->reg->PAUSE_PKT5)); return; } @@ -624,15 +636,15 @@ static void pch_gbe_mac_set_pause_packet(struct pch_gbe_hw *hw) */ static int pch_gbe_alloc_queues(struct pch_gbe_adapter *adapter) { - adapter->tx_ring = kzalloc(sizeof(*adapter->tx_ring), GFP_KERNEL); + adapter->tx_ring = devm_kzalloc(&adapter->pdev->dev, + sizeof(*adapter->tx_ring), GFP_KERNEL); if (!adapter->tx_ring) return -ENOMEM; - adapter->rx_ring = kzalloc(sizeof(*adapter->rx_ring), GFP_KERNEL); - if (!adapter->rx_ring) { - kfree(adapter->tx_ring); + adapter->rx_ring = devm_kzalloc(&adapter->pdev->dev, + sizeof(*adapter->rx_ring), GFP_KERNEL); + if (!adapter->rx_ring) return -ENOMEM; - } return 0; } @@ -669,7 +681,7 @@ static int pch_gbe_init_phy(struct pch_gbe_adapter *adapter) break; } adapter->hw.phy.addr = adapter->mii.phy_id; - pr_debug("phy_addr = %d\n", adapter->mii.phy_id); + netdev_dbg(netdev, "phy_addr = %d\n", adapter->mii.phy_id); if (addr == 32) return -EAGAIN; /* Selected the phy and isolate the rest */ @@ -758,13 +770,15 @@ void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter) */ void pch_gbe_reset(struct pch_gbe_adapter *adapter) { + struct net_device *netdev = adapter->netdev; + pch_gbe_mac_reset_hw(&adapter->hw); /* reprogram multicast address register after reset */ - pch_gbe_set_multi(adapter->netdev); + pch_gbe_set_multi(netdev); /* Setup the receive address. */ pch_gbe_mac_init_rx_addrs(&adapter->hw, PCH_GBE_MAR_ENTRIES); if (pch_gbe_hal_init_hw(&adapter->hw)) - pr_err("Hardware Error\n"); + netdev_err(netdev, "Hardware Error\n"); } /** @@ -778,7 +792,7 @@ static void pch_gbe_free_irq(struct pch_gbe_adapter *adapter) free_irq(adapter->pdev->irq, netdev); if (adapter->have_msi) { pci_disable_msi(adapter->pdev); - pr_debug("call pci_disable_msi\n"); + netdev_dbg(netdev, "call pci_disable_msi\n"); } } @@ -795,7 +809,8 @@ static void pch_gbe_irq_disable(struct pch_gbe_adapter *adapter) ioread32(&hw->reg->INT_ST); synchronize_irq(adapter->pdev->irq); - pr_debug("INT_EN reg : 0x%08x\n", ioread32(&hw->reg->INT_EN)); + netdev_dbg(adapter->netdev, "INT_EN reg : 0x%08x\n", + ioread32(&hw->reg->INT_EN)); } /** @@ -809,7 +824,8 @@ static void pch_gbe_irq_enable(struct pch_gbe_adapter *adapter) if (likely(atomic_dec_and_test(&adapter->irq_sem))) iowrite32(PCH_GBE_INT_ENABLE_MASK, &hw->reg->INT_EN); ioread32(&hw->reg->INT_ST); - pr_debug("INT_EN reg : 0x%08x\n", ioread32(&hw->reg->INT_EN)); + netdev_dbg(adapter->netdev, "INT_EN reg : 0x%08x\n", + ioread32(&hw->reg->INT_EN)); } @@ -846,9 +862,9 @@ static void pch_gbe_configure_tx(struct pch_gbe_adapter *adapter) struct pch_gbe_hw *hw = &adapter->hw; u32 tdba, tdlen, dctrl; - pr_debug("dma addr = 0x%08llx size = 0x%08x\n", - (unsigned long long)adapter->tx_ring->dma, - adapter->tx_ring->size); + netdev_dbg(adapter->netdev, "dma addr = 0x%08llx size = 0x%08x\n", + (unsigned long long)adapter->tx_ring->dma, + adapter->tx_ring->size); /* Setup the HW Tx Head and Tail descriptor pointers */ tdba = adapter->tx_ring->dma; @@ -894,9 +910,9 @@ static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter) struct pch_gbe_hw *hw = &adapter->hw; u32 rdba, rdlen, rxdma; - pr_debug("dma adr = 0x%08llx size = 0x%08x\n", - (unsigned long long)adapter->rx_ring->dma, - adapter->rx_ring->size); + netdev_dbg(adapter->netdev, "dma adr = 0x%08llx size = 0x%08x\n", + (unsigned long long)adapter->rx_ring->dma, + adapter->rx_ring->size); pch_gbe_mac_force_mac_fc(hw); @@ -907,9 +923,10 @@ static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter) rxdma &= ~PCH_GBE_RX_DMA_EN; iowrite32(rxdma, &hw->reg->DMA_CTRL); - pr_debug("MAC_RX_EN reg = 0x%08x DMA_CTRL reg = 0x%08x\n", - ioread32(&hw->reg->MAC_RX_EN), - ioread32(&hw->reg->DMA_CTRL)); + netdev_dbg(adapter->netdev, + "MAC_RX_EN reg = 0x%08x DMA_CTRL reg = 0x%08x\n", + ioread32(&hw->reg->MAC_RX_EN), + ioread32(&hw->reg->DMA_CTRL)); /* Setup the HW Rx Head and Tail Descriptor Pointers and * the Base and Length of the Rx Descriptor Ring */ @@ -977,7 +994,8 @@ static void pch_gbe_clean_tx_ring(struct pch_gbe_adapter *adapter, buffer_info = &tx_ring->buffer_info[i]; pch_gbe_unmap_and_free_tx_resource(adapter, buffer_info); } - pr_debug("call pch_gbe_unmap_and_free_tx_resource() %d count\n", i); + netdev_dbg(adapter->netdev, + "call pch_gbe_unmap_and_free_tx_resource() %d count\n", i); size = (unsigned long)sizeof(struct pch_gbe_buffer) * tx_ring->count; memset(tx_ring->buffer_info, 0, size); @@ -1009,7 +1027,8 @@ pch_gbe_clean_rx_ring(struct pch_gbe_adapter *adapter, buffer_info = &rx_ring->buffer_info[i]; pch_gbe_unmap_and_free_rx_resource(adapter, buffer_info); } - pr_debug("call pch_gbe_unmap_and_free_rx_resource() %d count\n", i); + netdev_dbg(adapter->netdev, + "call pch_gbe_unmap_and_free_rx_resource() %d count\n", i); size = (unsigned long)sizeof(struct pch_gbe_buffer) * rx_ring->count; memset(rx_ring->buffer_info, 0, size); @@ -1087,7 +1106,7 @@ static void pch_gbe_watchdog(unsigned long data) struct net_device *netdev = adapter->netdev; struct pch_gbe_hw *hw = &adapter->hw; - pr_debug("right now = %ld\n", jiffies); + netdev_dbg(netdev, "right now = %ld\n", jiffies); pch_gbe_update_stats(adapter); if ((mii_link_ok(&adapter->mii)) && (!netif_carrier_ok(netdev))) { @@ -1095,7 +1114,7 @@ static void pch_gbe_watchdog(unsigned long data) netdev->tx_queue_len = adapter->tx_queue_len; /* mii library handles link maintenance tasks */ if (mii_ethtool_gset(&adapter->mii, &cmd)) { - pr_err("ethtool get setting Error\n"); + netdev_err(netdev, "ethtool get setting Error\n"); mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + PCH_GBE_WATCHDOG_PERIOD)); @@ -1213,7 +1232,7 @@ static void pch_gbe_tx_queue(struct pch_gbe_adapter *adapter, buffer_info->length, DMA_TO_DEVICE); if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) { - pr_err("TX DMA map failed\n"); + netdev_err(adapter->netdev, "TX DMA map failed\n"); buffer_info->dma = 0; buffer_info->time_stamp = 0; tx_ring->next_to_use = ring_num; @@ -1333,13 +1352,13 @@ static irqreturn_t pch_gbe_intr(int irq, void *data) /* When request status is no interruption factor */ if (unlikely(!int_st)) return IRQ_NONE; /* Not our interrupt. End processing. */ - pr_debug("%s occur int_st = 0x%08x\n", __func__, int_st); + netdev_dbg(netdev, "%s occur int_st = 0x%08x\n", __func__, int_st); if (int_st & PCH_GBE_INT_RX_FRAME_ERR) adapter->stats.intr_rx_frame_err_count++; if (int_st & PCH_GBE_INT_RX_FIFO_ERR) if (!adapter->rx_stop_flag) { adapter->stats.intr_rx_fifo_err_count++; - pr_debug("Rx fifo over run\n"); + netdev_dbg(netdev, "Rx fifo over run\n"); adapter->rx_stop_flag = true; int_en = ioread32(&hw->reg->INT_EN); iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR), @@ -1359,7 +1378,7 @@ static irqreturn_t pch_gbe_intr(int irq, void *data) /* When Rx descriptor is empty */ if ((int_st & PCH_GBE_INT_RX_DSC_EMP)) { adapter->stats.intr_rx_dsc_empty_count++; - pr_debug("Rx descriptor is empty\n"); + netdev_dbg(netdev, "Rx descriptor is empty\n"); int_en = ioread32(&hw->reg->INT_EN); iowrite32((int_en & ~PCH_GBE_INT_RX_DSC_EMP), &hw->reg->INT_EN); if (hw->mac.tx_fc_enable) { @@ -1382,8 +1401,8 @@ static irqreturn_t pch_gbe_intr(int irq, void *data) __napi_schedule(&adapter->napi); } } - pr_debug("return = 0x%08x INT_EN reg = 0x%08x\n", - IRQ_HANDLED, ioread32(&hw->reg->INT_EN)); + netdev_dbg(netdev, "return = 0x%08x INT_EN reg = 0x%08x\n", + IRQ_HANDLED, ioread32(&hw->reg->INT_EN)); return IRQ_HANDLED; } @@ -1437,9 +1456,10 @@ pch_gbe_alloc_rx_buffers(struct pch_gbe_adapter *adapter, rx_desc->buffer_addr = (buffer_info->dma); rx_desc->gbec_status = DSC_INIT16; - pr_debug("i = %d buffer_info->dma = 0x08%llx buffer_info->length = 0x%x\n", - i, (unsigned long long)buffer_info->dma, - buffer_info->length); + netdev_dbg(netdev, + "i = %d buffer_info->dma = 0x08%llx buffer_info->length = 0x%x\n", + i, (unsigned long long)buffer_info->dma, + buffer_info->length); if (unlikely(++i == rx_ring->count)) i = 0; @@ -1531,12 +1551,13 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter, bool cleaned = false; int unused, thresh; - pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean); + netdev_dbg(adapter->netdev, "next_to_clean : %d\n", + tx_ring->next_to_clean); i = tx_ring->next_to_clean; tx_desc = PCH_GBE_TX_DESC(*tx_ring, i); - pr_debug("gbec_status:0x%04x dma_status:0x%04x\n", - tx_desc->gbec_status, tx_desc->dma_status); + netdev_dbg(adapter->netdev, "gbec_status:0x%04x dma_status:0x%04x\n", + tx_desc->gbec_status, tx_desc->dma_status); unused = PCH_GBE_DESC_UNUSED(tx_ring); thresh = tx_ring->count - PCH_GBE_TX_WEIGHT; @@ -1544,8 +1565,10 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter, { /* current marked clean, tx queue filling up, do extra clean */ int j, k; if (unused < 8) { /* tx queue nearly full */ - pr_debug("clean_tx: transmit queue warning (%x,%x) unused=%d\n", - tx_ring->next_to_clean,tx_ring->next_to_use,unused); + netdev_dbg(adapter->netdev, + "clean_tx: transmit queue warning (%x,%x) unused=%d\n", + tx_ring->next_to_clean, tx_ring->next_to_use, + unused); } /* current marked clean, scan for more that need cleaning. */ @@ -1557,49 +1580,56 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter, if (++k >= tx_ring->count) k = 0; /*increment, wrap*/ } if (j < PCH_GBE_TX_WEIGHT) { - pr_debug("clean_tx: unused=%d loops=%d found tx_desc[%x,%x:%x].gbec_status=%04x\n", - unused,j, i,k, tx_ring->next_to_use, tx_desc->gbec_status); + netdev_dbg(adapter->netdev, + "clean_tx: unused=%d loops=%d found tx_desc[%x,%x:%x].gbec_status=%04x\n", + unused, j, i, k, tx_ring->next_to_use, + tx_desc->gbec_status); i = k; /*found one to clean, usu gbec_status==2000.*/ } } while ((tx_desc->gbec_status & DSC_INIT16) == 0x0000) { - pr_debug("gbec_status:0x%04x\n", tx_desc->gbec_status); + netdev_dbg(adapter->netdev, "gbec_status:0x%04x\n", + tx_desc->gbec_status); buffer_info = &tx_ring->buffer_info[i]; skb = buffer_info->skb; cleaned = true; if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_ABT)) { adapter->stats.tx_aborted_errors++; - pr_err("Transfer Abort Error\n"); + netdev_err(adapter->netdev, "Transfer Abort Error\n"); } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CRSER) ) { adapter->stats.tx_carrier_errors++; - pr_err("Transfer Carrier Sense Error\n"); + netdev_err(adapter->netdev, + "Transfer Carrier Sense Error\n"); } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_EXCOL) ) { adapter->stats.tx_aborted_errors++; - pr_err("Transfer Collision Abort Error\n"); + netdev_err(adapter->netdev, + "Transfer Collision Abort Error\n"); } else if ((tx_desc->gbec_status & (PCH_GBE_TXD_GMAC_STAT_SNGCOL | PCH_GBE_TXD_GMAC_STAT_MLTCOL))) { adapter->stats.collisions++; adapter->stats.tx_packets++; adapter->stats.tx_bytes += skb->len; - pr_debug("Transfer Collision\n"); + netdev_dbg(adapter->netdev, "Transfer Collision\n"); } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CMPLT) ) { adapter->stats.tx_packets++; adapter->stats.tx_bytes += skb->len; } if (buffer_info->mapped) { - pr_debug("unmap buffer_info->dma : %d\n", i); + netdev_dbg(adapter->netdev, + "unmap buffer_info->dma : %d\n", i); dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, buffer_info->length, DMA_TO_DEVICE); buffer_info->mapped = false; } if (buffer_info->skb) { - pr_debug("trim buffer_info->skb : %d\n", i); + netdev_dbg(adapter->netdev, + "trim buffer_info->skb : %d\n", i); skb_trim(buffer_info->skb, 0); } tx_desc->gbec_status = DSC_INIT16; @@ -1613,8 +1643,9 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter, break; } } - pr_debug("called pch_gbe_unmap_and_free_tx_resource() %d count\n", - cleaned_count); + netdev_dbg(adapter->netdev, + "called pch_gbe_unmap_and_free_tx_resource() %d count\n", + cleaned_count); if (cleaned_count > 0) { /*skip this if nothing cleaned*/ /* Recover from running out of Tx resources in xmit_frame */ spin_lock(&tx_ring->tx_lock); @@ -1622,12 +1653,13 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter, { netif_wake_queue(adapter->netdev); adapter->stats.tx_restart_count++; - pr_debug("Tx wake queue\n"); + netdev_dbg(adapter->netdev, "Tx wake queue\n"); } tx_ring->next_to_clean = i; - pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean); + netdev_dbg(adapter->netdev, "next_to_clean : %d\n", + tx_ring->next_to_clean); spin_unlock(&tx_ring->tx_lock); } return cleaned; @@ -1684,22 +1716,22 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter, buffer_info->length, DMA_FROM_DEVICE); buffer_info->mapped = false; - pr_debug("RxDecNo = 0x%04x Status[DMA:0x%02x GBE:0x%04x " - "TCP:0x%08x] BufInf = 0x%p\n", - i, dma_status, gbec_status, tcp_ip_status, - buffer_info); + netdev_dbg(netdev, + "RxDecNo = 0x%04x Status[DMA:0x%02x GBE:0x%04x TCP:0x%08x] BufInf = 0x%p\n", + i, dma_status, gbec_status, tcp_ip_status, + buffer_info); /* Error check */ if (unlikely(gbec_status & PCH_GBE_RXD_GMAC_STAT_NOTOCTAL)) { adapter->stats.rx_frame_errors++; - pr_err("Receive Not Octal Error\n"); + netdev_err(netdev, "Receive Not Octal Error\n"); } else if (unlikely(gbec_status & PCH_GBE_RXD_GMAC_STAT_NBLERR)) { adapter->stats.rx_frame_errors++; - pr_err("Receive Nibble Error\n"); + netdev_err(netdev, "Receive Nibble Error\n"); } else if (unlikely(gbec_status & PCH_GBE_RXD_GMAC_STAT_CRCERR)) { adapter->stats.rx_crc_errors++; - pr_err("Receive CRC Error\n"); + netdev_err(netdev, "Receive CRC Error\n"); } else { /* get receive length */ /* length convert[-3], length includes FCS length */ @@ -1730,8 +1762,9 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter, napi_gro_receive(&adapter->napi, skb); (*work_done)++; - pr_debug("Receive skb->ip_summed: %d length: %d\n", - skb->ip_summed, length); + netdev_dbg(netdev, + "Receive skb->ip_summed: %d length: %d\n", + skb->ip_summed, length); } /* return some buffers to hardware, one at a time is too slow */ if (unlikely(cleaned_count >= PCH_GBE_RX_BUFFER_WRITE)) { @@ -1787,10 +1820,10 @@ int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter, tx_desc = PCH_GBE_TX_DESC(*tx_ring, desNo); tx_desc->gbec_status = DSC_INIT16; } - pr_debug("tx_ring->desc = 0x%p tx_ring->dma = 0x%08llx\n" - "next_to_clean = 0x%08x next_to_use = 0x%08x\n", - tx_ring->desc, (unsigned long long)tx_ring->dma, - tx_ring->next_to_clean, tx_ring->next_to_use); + netdev_dbg(adapter->netdev, + "tx_ring->desc = 0x%p tx_ring->dma = 0x%08llx next_to_clean = 0x%08x next_to_use = 0x%08x\n", + tx_ring->desc, (unsigned long long)tx_ring->dma, + tx_ring->next_to_clean, tx_ring->next_to_use); return 0; } @@ -1829,10 +1862,10 @@ int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter, rx_desc = PCH_GBE_RX_DESC(*rx_ring, desNo); rx_desc->gbec_status = DSC_INIT16; } - pr_debug("rx_ring->desc = 0x%p rx_ring->dma = 0x%08llx " - "next_to_clean = 0x%08x next_to_use = 0x%08x\n", - rx_ring->desc, (unsigned long long)rx_ring->dma, - rx_ring->next_to_clean, rx_ring->next_to_use); + netdev_dbg(adapter->netdev, + "rx_ring->desc = 0x%p rx_ring->dma = 0x%08llx next_to_clean = 0x%08x next_to_use = 0x%08x\n", + rx_ring->desc, (unsigned long long)rx_ring->dma, + rx_ring->next_to_clean, rx_ring->next_to_use); return 0; } @@ -1886,9 +1919,9 @@ static int pch_gbe_request_irq(struct pch_gbe_adapter *adapter) flags = IRQF_SHARED; adapter->have_msi = false; err = pci_enable_msi(adapter->pdev); - pr_debug("call pci_enable_msi\n"); + netdev_dbg(netdev, "call pci_enable_msi\n"); if (err) { - pr_debug("call pci_enable_msi - Error: %d\n", err); + netdev_dbg(netdev, "call pci_enable_msi - Error: %d\n", err); } else { flags = 0; adapter->have_msi = true; @@ -1896,9 +1929,11 @@ static int pch_gbe_request_irq(struct pch_gbe_adapter *adapter) err = request_irq(adapter->pdev->irq, &pch_gbe_intr, flags, netdev->name, netdev); if (err) - pr_err("Unable to allocate interrupt Error: %d\n", err); - pr_debug("adapter->have_msi : %d flags : 0x%04x return : 0x%04x\n", - adapter->have_msi, flags, err); + netdev_err(netdev, "Unable to allocate interrupt Error: %d\n", + err); + netdev_dbg(netdev, + "adapter->have_msi : %d flags : 0x%04x return : 0x%04x\n", + adapter->have_msi, flags, err); return err; } @@ -1919,7 +1954,7 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter) /* Ensure we have a valid MAC */ if (!is_valid_ether_addr(adapter->hw.mac.addr)) { - pr_err("Error: Invalid MAC address\n"); + netdev_err(netdev, "Error: Invalid MAC address\n"); goto out; } @@ -1933,12 +1968,14 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter) err = pch_gbe_request_irq(adapter); if (err) { - pr_err("Error: can't bring device up - irq request failed\n"); + netdev_err(netdev, + "Error: can't bring device up - irq request failed\n"); goto out; } err = pch_gbe_alloc_rx_buffers_pool(adapter, rx_ring, rx_ring->count); if (err) { - pr_err("Error: can't bring device up - alloc rx buffers pool failed\n"); + netdev_err(netdev, + "Error: can't bring device up - alloc rx buffers pool failed\n"); goto freeirq; } pch_gbe_alloc_tx_buffers(adapter, tx_ring); @@ -2015,11 +2052,11 @@ static int pch_gbe_sw_init(struct pch_gbe_adapter *adapter) /* Initialize the hardware-specific values */ if (pch_gbe_hal_setup_init_funcs(hw)) { - pr_err("Hardware Initialization Failure\n"); + netdev_err(netdev, "Hardware Initialization Failure\n"); return -EIO; } if (pch_gbe_alloc_queues(adapter)) { - pr_err("Unable to allocate memory for queues\n"); + netdev_err(netdev, "Unable to allocate memory for queues\n"); return -ENOMEM; } spin_lock_init(&adapter->hw.miim_lock); @@ -2030,9 +2067,10 @@ static int pch_gbe_sw_init(struct pch_gbe_adapter *adapter) pch_gbe_init_stats(adapter); - pr_debug("rx_buffer_len : %d mac.min_frame_size : %d mac.max_frame_size : %d\n", - (u32) adapter->rx_buffer_len, - hw->mac.min_frame_size, hw->mac.max_frame_size); + netdev_dbg(netdev, + "rx_buffer_len : %d mac.min_frame_size : %d mac.max_frame_size : %d\n", + (u32) adapter->rx_buffer_len, + hw->mac.min_frame_size, hw->mac.max_frame_size); return 0; } @@ -2061,7 +2099,7 @@ static int pch_gbe_open(struct net_device *netdev) err = pch_gbe_up(adapter); if (err) goto err_up; - pr_debug("Success End\n"); + netdev_dbg(netdev, "Success End\n"); return 0; err_up: @@ -2072,7 +2110,7 @@ err_setup_rx: pch_gbe_free_tx_resources(adapter, adapter->tx_ring); err_setup_tx: pch_gbe_reset(adapter); - pr_err("Error End\n"); + netdev_err(netdev, "Error End\n"); return err; } @@ -2116,8 +2154,9 @@ static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) { netif_stop_queue(netdev); spin_unlock_irqrestore(&tx_ring->tx_lock, flags); - pr_debug("Return : BUSY next_to use : 0x%08x next_to clean : 0x%08x\n", - tx_ring->next_to_use, tx_ring->next_to_clean); + netdev_dbg(netdev, + "Return : BUSY next_to use : 0x%08x next_to clean : 0x%08x\n", + tx_ring->next_to_use, tx_ring->next_to_clean); return NETDEV_TX_BUSY; } @@ -2152,7 +2191,7 @@ static void pch_gbe_set_multi(struct net_device *netdev) int i; int mc_count; - pr_debug("netdev->flags : 0x%08x\n", netdev->flags); + netdev_dbg(netdev, "netdev->flags : 0x%08x\n", netdev->flags); /* Check for Promiscuous and All Multicast modes */ rctl = ioread32(&hw->reg->RX_MODE); @@ -2192,7 +2231,8 @@ static void pch_gbe_set_multi(struct net_device *netdev) PCH_GBE_MAR_ENTRIES); kfree(mta_list); - pr_debug("RX_MODE reg(check bit31,30 ADD,MLT) : 0x%08x netdev->mc_count : 0x%08x\n", + netdev_dbg(netdev, + "RX_MODE reg(check bit31,30 ADD,MLT) : 0x%08x netdev->mc_count : 0x%08x\n", ioread32(&hw->reg->RX_MODE), mc_count); } @@ -2218,12 +2258,12 @@ static int pch_gbe_set_mac(struct net_device *netdev, void *addr) pch_gbe_mac_mar_set(&adapter->hw, adapter->hw.mac.addr, 0); ret_val = 0; } - pr_debug("ret_val : 0x%08x\n", ret_val); - pr_debug("dev_addr : %pM\n", netdev->dev_addr); - pr_debug("mac_addr : %pM\n", adapter->hw.mac.addr); - pr_debug("MAC_ADR1AB reg : 0x%08x 0x%08x\n", - ioread32(&adapter->hw.reg->mac_adr[0].high), - ioread32(&adapter->hw.reg->mac_adr[0].low)); + netdev_dbg(netdev, "ret_val : 0x%08x\n", ret_val); + netdev_dbg(netdev, "dev_addr : %pM\n", netdev->dev_addr); + netdev_dbg(netdev, "mac_addr : %pM\n", adapter->hw.mac.addr); + netdev_dbg(netdev, "MAC_ADR1AB reg : 0x%08x 0x%08x\n", + ioread32(&adapter->hw.reg->mac_adr[0].high), + ioread32(&adapter->hw.reg->mac_adr[0].low)); return ret_val; } @@ -2245,7 +2285,7 @@ static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu) max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || (max_frame > PCH_GBE_MAX_JUMBO_FRAME_SIZE)) { - pr_err("Invalid MTU setting\n"); + netdev_err(netdev, "Invalid MTU setting\n"); return -EINVAL; } if (max_frame <= PCH_GBE_FRAME_SIZE_2048) @@ -2274,9 +2314,10 @@ static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu) adapter->hw.mac.max_frame_size = max_frame; } - pr_debug("max_frame : %d rx_buffer_len : %d mtu : %d max_frame_size : %d\n", - max_frame, (u32) adapter->rx_buffer_len, netdev->mtu, - adapter->hw.mac.max_frame_size); + netdev_dbg(netdev, + "max_frame : %d rx_buffer_len : %d mtu : %d max_frame_size : %d\n", + max_frame, (u32) adapter->rx_buffer_len, netdev->mtu, + adapter->hw.mac.max_frame_size); return 0; } @@ -2317,7 +2358,7 @@ static int pch_gbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { struct pch_gbe_adapter *adapter = netdev_priv(netdev); - pr_debug("cmd : 0x%04x\n", cmd); + netdev_dbg(netdev, "cmd : 0x%04x\n", cmd); if (cmd == SIOCSHWTSTAMP) return hwtstamp_ioctl(netdev, ifr, cmd); @@ -2354,7 +2395,7 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget) bool poll_end_flag = false; bool cleaned = false; - pr_debug("budget : %d\n", budget); + netdev_dbg(adapter->netdev, "budget : %d\n", budget); pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget); cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring); @@ -2377,8 +2418,9 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget) pch_gbe_enable_dma_rx(&adapter->hw); } - pr_debug("poll_end_flag : %d work_done : %d budget : %d\n", - poll_end_flag, work_done, budget); + netdev_dbg(adapter->netdev, + "poll_end_flag : %d work_done : %d budget : %d\n", + poll_end_flag, work_done, budget); return work_done; } @@ -2435,7 +2477,7 @@ static pci_ers_result_t pch_gbe_io_slot_reset(struct pci_dev *pdev) struct pch_gbe_hw *hw = &adapter->hw; if (pci_enable_device(pdev)) { - pr_err("Cannot re-enable PCI device after reset\n"); + netdev_err(netdev, "Cannot re-enable PCI device after reset\n"); return PCI_ERS_RESULT_DISCONNECT; } pci_set_master(pdev); @@ -2455,7 +2497,8 @@ static void pch_gbe_io_resume(struct pci_dev *pdev) if (netif_running(netdev)) { if (pch_gbe_up(adapter)) { - pr_debug("can't bring device back up after reset\n"); + netdev_dbg(netdev, + "can't bring device back up after reset\n"); return; } } @@ -2509,7 +2552,7 @@ static int pch_gbe_resume(struct device *device) err = pci_enable_device(pdev); if (err) { - pr_err("Cannot enable PCI device from suspend\n"); + netdev_err(netdev, "Cannot enable PCI device from suspend\n"); return err; } pci_set_master(pdev); @@ -2545,13 +2588,7 @@ static void pch_gbe_remove(struct pci_dev *pdev) pch_gbe_hal_phy_hw_reset(&adapter->hw); - kfree(adapter->tx_ring); - kfree(adapter->rx_ring); - - iounmap(adapter->hw.reg); - pci_release_regions(pdev); free_netdev(netdev); - pci_disable_device(pdev); } static int pch_gbe_probe(struct pci_dev *pdev, @@ -2561,7 +2598,7 @@ static int pch_gbe_probe(struct pci_dev *pdev, struct pch_gbe_adapter *adapter; int ret; - ret = pci_enable_device(pdev); + ret = pcim_enable_device(pdev); if (ret) return ret; @@ -2574,24 +2611,22 @@ static int pch_gbe_probe(struct pci_dev *pdev, if (ret) { dev_err(&pdev->dev, "ERR: No usable DMA " "configuration, aborting\n"); - goto err_disable_device; + return ret; } } } - ret = pci_request_regions(pdev, KBUILD_MODNAME); + ret = pcim_iomap_regions(pdev, 1 << PCH_GBE_PCI_BAR, pci_name(pdev)); if (ret) { dev_err(&pdev->dev, "ERR: Can't reserve PCI I/O and memory resources\n"); - goto err_disable_device; + return ret; } pci_set_master(pdev); netdev = alloc_etherdev((int)sizeof(struct pch_gbe_adapter)); - if (!netdev) { - ret = -ENOMEM; - goto err_release_pci; - } + if (!netdev) + return -ENOMEM; SET_NETDEV_DEV(netdev, &pdev->dev); pci_set_drvdata(pdev, netdev); @@ -2599,18 +2634,14 @@ static int pch_gbe_probe(struct pci_dev *pdev, adapter->netdev = netdev; adapter->pdev = pdev; adapter->hw.back = adapter; - adapter->hw.reg = pci_iomap(pdev, PCH_GBE_PCI_BAR, 0); - if (!adapter->hw.reg) { - ret = -EIO; - dev_err(&pdev->dev, "Can't ioremap\n"); - goto err_free_netdev; - } + adapter->hw.reg = pcim_iomap_table(pdev)[PCH_GBE_PCI_BAR]; adapter->ptp_pdev = pci_get_bus_and_slot(adapter->pdev->bus->number, PCI_DEVFN(12, 4)); if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) { - pr_err("Bad ptp filter\n"); - return -EINVAL; + dev_err(&pdev->dev, "Bad ptp filter\n"); + ret = -EINVAL; + goto err_free_netdev; } netdev->netdev_ops = &pch_gbe_netdev_ops; @@ -2628,7 +2659,7 @@ static int pch_gbe_probe(struct pci_dev *pdev, /* setup the private structure */ ret = pch_gbe_sw_init(adapter); if (ret) - goto err_iounmap; + goto err_free_netdev; /* Initialize PHY */ ret = pch_gbe_init_phy(adapter); @@ -2684,16 +2715,8 @@ static int pch_gbe_probe(struct pci_dev *pdev, err_free_adapter: pch_gbe_hal_phy_hw_reset(&adapter->hw); - kfree(adapter->tx_ring); - kfree(adapter->rx_ring); -err_iounmap: - iounmap(adapter->hw.reg); err_free_netdev: free_netdev(netdev); -err_release_pci: - pci_release_regions(pdev); -err_disable_device: - pci_disable_device(pdev); return ret; } diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c index 8653c3b81f84..cf7c9b3a255b 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c @@ -237,16 +237,17 @@ static int pch_gbe_validate_option(int *value, case enable_option: switch (*value) { case OPTION_ENABLED: - pr_debug("%s Enabled\n", opt->name); + netdev_dbg(adapter->netdev, "%s Enabled\n", opt->name); return 0; case OPTION_DISABLED: - pr_debug("%s Disabled\n", opt->name); + netdev_dbg(adapter->netdev, "%s Disabled\n", opt->name); return 0; } break; case range_option: if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { - pr_debug("%s set to %i\n", opt->name, *value); + netdev_dbg(adapter->netdev, "%s set to %i\n", + opt->name, *value); return 0; } break; @@ -258,7 +259,8 @@ static int pch_gbe_validate_option(int *value, ent = &opt->arg.l.p[i]; if (*value == ent->i) { if (ent->str[0] != '\0') - pr_debug("%s\n", ent->str); + netdev_dbg(adapter->netdev, "%s\n", + ent->str); return 0; } } @@ -268,8 +270,8 @@ static int pch_gbe_validate_option(int *value, BUG(); } - pr_debug("Invalid %s value specified (%i) %s\n", - opt->name, *value, opt->err); + netdev_dbg(adapter->netdev, "Invalid %s value specified (%i) %s\n", + opt->name, *value, opt->err); *value = opt->def; return -1; } @@ -318,7 +320,8 @@ static void pch_gbe_check_copper_options(struct pch_gbe_adapter *adapter) .p = an_list} } }; if (speed || dplx) { - pr_debug("AutoNeg specified along with Speed or Duplex, AutoNeg parameter ignored\n"); + netdev_dbg(adapter->netdev, + "AutoNeg specified along with Speed or Duplex, AutoNeg parameter ignored\n"); hw->phy.autoneg_advertised = opt.def; } else { int tmp = AutoNeg; @@ -332,13 +335,16 @@ static void pch_gbe_check_copper_options(struct pch_gbe_adapter *adapter) case 0: hw->mac.autoneg = hw->mac.fc_autoneg = 1; if ((speed || dplx)) - pr_debug("Speed and duplex autonegotiation enabled\n"); + netdev_dbg(adapter->netdev, + "Speed and duplex autonegotiation enabled\n"); hw->mac.link_speed = SPEED_10; hw->mac.link_duplex = DUPLEX_HALF; break; case HALF_DUPLEX: - pr_debug("Half Duplex specified without Speed\n"); - pr_debug("Using Autonegotiation at Half Duplex only\n"); + netdev_dbg(adapter->netdev, + "Half Duplex specified without Speed\n"); + netdev_dbg(adapter->netdev, + "Using Autonegotiation at Half Duplex only\n"); hw->mac.autoneg = hw->mac.fc_autoneg = 1; hw->phy.autoneg_advertised = PHY_ADVERTISE_10_HALF | PHY_ADVERTISE_100_HALF; @@ -346,8 +352,10 @@ static void pch_gbe_check_copper_options(struct pch_gbe_adapter *adapter) hw->mac.link_duplex = DUPLEX_HALF; break; case FULL_DUPLEX: - pr_debug("Full Duplex specified without Speed\n"); - pr_debug("Using Autonegotiation at Full Duplex only\n"); + netdev_dbg(adapter->netdev, + "Full Duplex specified without Speed\n"); + netdev_dbg(adapter->netdev, + "Using Autonegotiation at Full Duplex only\n"); hw->mac.autoneg = hw->mac.fc_autoneg = 1; hw->phy.autoneg_advertised = PHY_ADVERTISE_10_FULL | PHY_ADVERTISE_100_FULL | @@ -356,8 +364,10 @@ static void pch_gbe_check_copper_options(struct pch_gbe_adapter *adapter) hw->mac.link_duplex = DUPLEX_FULL; break; case SPEED_10: - pr_debug("10 Mbps Speed specified without Duplex\n"); - pr_debug("Using Autonegotiation at 10 Mbps only\n"); + netdev_dbg(adapter->netdev, + "10 Mbps Speed specified without Duplex\n"); + netdev_dbg(adapter->netdev, + "Using Autonegotiation at 10 Mbps only\n"); hw->mac.autoneg = hw->mac.fc_autoneg = 1; hw->phy.autoneg_advertised = PHY_ADVERTISE_10_HALF | PHY_ADVERTISE_10_FULL; @@ -365,22 +375,24 @@ static void pch_gbe_check_copper_options(struct pch_gbe_adapter *adapter) hw->mac.link_duplex = DUPLEX_HALF; break; case SPEED_10 + HALF_DUPLEX: - pr_debug("Forcing to 10 Mbps Half Duplex\n"); + netdev_dbg(adapter->netdev, "Forcing to 10 Mbps Half Duplex\n"); hw->mac.autoneg = hw->mac.fc_autoneg = 0; hw->phy.autoneg_advertised = 0; hw->mac.link_speed = SPEED_10; hw->mac.link_duplex = DUPLEX_HALF; break; case SPEED_10 + FULL_DUPLEX: - pr_debug("Forcing to 10 Mbps Full Duplex\n"); + netdev_dbg(adapter->netdev, "Forcing to 10 Mbps Full Duplex\n"); hw->mac.autoneg = hw->mac.fc_autoneg = 0; hw->phy.autoneg_advertised = 0; hw->mac.link_speed = SPEED_10; hw->mac.link_duplex = DUPLEX_FULL; break; case SPEED_100: - pr_debug("100 Mbps Speed specified without Duplex\n"); - pr_debug("Using Autonegotiation at 100 Mbps only\n"); + netdev_dbg(adapter->netdev, + "100 Mbps Speed specified without Duplex\n"); + netdev_dbg(adapter->netdev, + "Using Autonegotiation at 100 Mbps only\n"); hw->mac.autoneg = hw->mac.fc_autoneg = 1; hw->phy.autoneg_advertised = PHY_ADVERTISE_100_HALF | PHY_ADVERTISE_100_FULL; @@ -388,28 +400,33 @@ static void pch_gbe_check_copper_options(struct pch_gbe_adapter *adapter) hw->mac.link_duplex = DUPLEX_HALF; break; case SPEED_100 + HALF_DUPLEX: - pr_debug("Forcing to 100 Mbps Half Duplex\n"); + netdev_dbg(adapter->netdev, + "Forcing to 100 Mbps Half Duplex\n"); hw->mac.autoneg = hw->mac.fc_autoneg = 0; hw->phy.autoneg_advertised = 0; hw->mac.link_speed = SPEED_100; hw->mac.link_duplex = DUPLEX_HALF; break; case SPEED_100 + FULL_DUPLEX: - pr_debug("Forcing to 100 Mbps Full Duplex\n"); + netdev_dbg(adapter->netdev, + "Forcing to 100 Mbps Full Duplex\n"); hw->mac.autoneg = hw->mac.fc_autoneg = 0; hw->phy.autoneg_advertised = 0; hw->mac.link_speed = SPEED_100; hw->mac.link_duplex = DUPLEX_FULL; break; case SPEED_1000: - pr_debug("1000 Mbps Speed specified without Duplex\n"); + netdev_dbg(adapter->netdev, + "1000 Mbps Speed specified without Duplex\n"); goto full_duplex_only; case SPEED_1000 + HALF_DUPLEX: - pr_debug("Half Duplex is not supported at 1000 Mbps\n"); + netdev_dbg(adapter->netdev, + "Half Duplex is not supported at 1000 Mbps\n"); /* fall through */ case SPEED_1000 + FULL_DUPLEX: full_duplex_only: - pr_debug("Using Autonegotiation at 1000 Mbps Full Duplex only\n"); + netdev_dbg(adapter->netdev, + "Using Autonegotiation at 1000 Mbps Full Duplex only\n"); hw->mac.autoneg = hw->mac.fc_autoneg = 1; hw->phy.autoneg_advertised = PHY_ADVERTISE_1000_FULL; hw->mac.link_speed = SPEED_1000; diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c index 28bb9603d736..da079073a6c6 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c @@ -97,6 +97,7 @@ */ s32 pch_gbe_phy_get_id(struct pch_gbe_hw *hw) { + struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); struct pch_gbe_phy_info *phy = &hw->phy; s32 ret; u16 phy_id1; @@ -115,8 +116,9 @@ s32 pch_gbe_phy_get_id(struct pch_gbe_hw *hw) phy->id = (u32)phy_id1; phy->id = ((phy->id << 6) | ((phy_id2 & 0xFC00) >> 10)); phy->revision = (u32) (phy_id2 & 0x000F); - pr_debug("phy->id : 0x%08x phy->revision : 0x%08x\n", - phy->id, phy->revision); + netdev_dbg(adapter->netdev, + "phy->id : 0x%08x phy->revision : 0x%08x\n", + phy->id, phy->revision); return 0; } @@ -134,7 +136,10 @@ s32 pch_gbe_phy_read_reg_miic(struct pch_gbe_hw *hw, u32 offset, u16 *data) struct pch_gbe_phy_info *phy = &hw->phy; if (offset > PHY_MAX_REG_ADDRESS) { - pr_err("PHY Address %d is out of range\n", offset); + struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); + + netdev_err(adapter->netdev, "PHY Address %d is out of range\n", + offset); return -EINVAL; } *data = pch_gbe_mac_ctrl_miim(hw, phy->addr, PCH_GBE_HAL_MIIM_READ, @@ -156,7 +161,10 @@ s32 pch_gbe_phy_write_reg_miic(struct pch_gbe_hw *hw, u32 offset, u16 data) struct pch_gbe_phy_info *phy = &hw->phy; if (offset > PHY_MAX_REG_ADDRESS) { - pr_err("PHY Address %d is out of range\n", offset); + struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); + + netdev_err(adapter->netdev, "PHY Address %d is out of range\n", + offset); return -EINVAL; } pch_gbe_mac_ctrl_miim(hw, phy->addr, PCH_GBE_HAL_MIIM_WRITE, @@ -235,7 +243,7 @@ void pch_gbe_phy_power_down(struct pch_gbe_hw *hw) * pch_gbe_phy_set_rgmii - RGMII interface setting * @hw: Pointer to the HW structure */ -inline void pch_gbe_phy_set_rgmii(struct pch_gbe_hw *hw) +void pch_gbe_phy_set_rgmii(struct pch_gbe_hw *hw) { pch_gbe_phy_sw_reset(hw); } @@ -246,15 +254,14 @@ inline void pch_gbe_phy_set_rgmii(struct pch_gbe_hw *hw) */ void pch_gbe_phy_init_setting(struct pch_gbe_hw *hw) { - struct pch_gbe_adapter *adapter; + struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET }; int ret; u16 mii_reg; - adapter = container_of(hw, struct pch_gbe_adapter, hw); ret = mii_ethtool_gset(&adapter->mii, &cmd); if (ret) - pr_err("Error: mii_ethtool_gset\n"); + netdev_err(adapter->netdev, "Error: mii_ethtool_gset\n"); ethtool_cmd_speed_set(&cmd, hw->mac.link_speed); cmd.duplex = hw->mac.link_duplex; @@ -263,12 +270,11 @@ void pch_gbe_phy_init_setting(struct pch_gbe_hw *hw) pch_gbe_phy_write_reg_miic(hw, MII_BMCR, BMCR_RESET); ret = mii_ethtool_sset(&adapter->mii, &cmd); if (ret) - pr_err("Error: mii_ethtool_sset\n"); + netdev_err(adapter->netdev, "Error: mii_ethtool_sset\n"); pch_gbe_phy_sw_reset(hw); pch_gbe_phy_read_reg_miic(hw, PHY_PHYSP_CONTROL, &mii_reg); mii_reg |= PHYSP_CTRL_ASSERT_CRS_TX; pch_gbe_phy_write_reg_miic(hw, PHY_PHYSP_CONTROL, mii_reg); - } diff --git a/drivers/net/ethernet/packetengines/Kconfig b/drivers/net/ethernet/packetengines/Kconfig index cbbeca3f8c5c..8d5180043c70 100644 --- a/drivers/net/ethernet/packetengines/Kconfig +++ b/drivers/net/ethernet/packetengines/Kconfig @@ -21,7 +21,6 @@ if NET_PACKET_ENGINE config HAMACHI tristate "Packet Engines Hamachi GNIC-II support" depends on PCI - select NET_CORE select MII ---help--- If you have a Gigabit Ethernet card of this type, say Y and read diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h index 322a36b76727..3fe09ab2d7c9 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h @@ -53,8 +53,8 @@ #define _NETXEN_NIC_LINUX_MAJOR 4 #define _NETXEN_NIC_LINUX_MINOR 0 -#define _NETXEN_NIC_LINUX_SUBVERSION 80 -#define NETXEN_NIC_LINUX_VERSIONID "4.0.80" +#define _NETXEN_NIC_LINUX_SUBVERSION 81 +#define NETXEN_NIC_LINUX_VERSIONID "4.0.81" #define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c)) #define _major(v) (((v) >> 24) & 0xff) @@ -1855,7 +1855,7 @@ static const struct netxen_brdinfo netxen_boards[] = { #define NUM_SUPPORTED_BOARDS ARRAY_SIZE(netxen_boards) -static inline void get_brd_name_by_type(u32 type, char *name) +static inline int netxen_nic_get_brd_name_by_type(u32 type, char *name) { int i, found = 0; for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) { @@ -1864,10 +1864,14 @@ static inline void get_brd_name_by_type(u32 type, char *name) found = 1; break; } + } + if (!found) { + strcpy(name, "Unknown"); + return -EINVAL; } - if (!found) - name = "Unknown"; + + return 0; } static inline u32 netxen_tx_avail(struct nx_host_tx_ring *tx_ring) diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h index 28e076960bcb..32c790659f9c 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h @@ -734,6 +734,9 @@ enum { #define NIC_CRB_BASE_2 (NETXEN_CAM_RAM(0x700)) #define NETXEN_NIC_REG(X) (NIC_CRB_BASE+(X)) #define NETXEN_NIC_REG_2(X) (NIC_CRB_BASE_2+(X)) +#define NETXEN_INTR_MODE_REG NETXEN_NIC_REG(0x44) +#define NETXEN_MSI_MODE 0x1 +#define NETXEN_INTX_MODE 0x2 #define NX_CDRP_CRB_OFFSET (NETXEN_NIC_REG(0x18)) #define NX_ARG1_CRB_OFFSET (NETXEN_NIC_REG(0x1c)) diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index af951f343ff6..c401b0b4353d 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -592,48 +592,60 @@ static const struct net_device_ops netxen_netdev_ops = { #endif }; -static void -netxen_setup_intr(struct netxen_adapter *adapter) +static inline bool netxen_function_zero(struct pci_dev *pdev) { - struct netxen_legacy_intr_set *legacy_intrp; - struct pci_dev *pdev = adapter->pdev; - int err, num_msix; + return (PCI_FUNC(pdev->devfn) == 0) ? true : false; +} - if (adapter->rss_supported) { - num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ? - MSIX_ENTRIES_PER_ADAPTER : 2; - } else - num_msix = 1; +static inline void netxen_set_interrupt_mode(struct netxen_adapter *adapter, + u32 mode) +{ + NXWR32(adapter, NETXEN_INTR_MODE_REG, mode); +} - adapter->max_sds_rings = 1; +static inline u32 netxen_get_interrupt_mode(struct netxen_adapter *adapter) +{ + return NXRD32(adapter, NETXEN_INTR_MODE_REG); +} - adapter->flags &= ~(NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED); +static void +netxen_initialize_interrupt_registers(struct netxen_adapter *adapter) +{ + struct netxen_legacy_intr_set *legacy_intrp; + u32 tgt_status_reg, int_state_reg; if (adapter->ahw.revision_id >= NX_P3_B0) legacy_intrp = &legacy_intr[adapter->ahw.pci_func]; else legacy_intrp = &legacy_intr[0]; + tgt_status_reg = legacy_intrp->tgt_status_reg; + int_state_reg = ISR_INT_STATE_REG; + adapter->int_vec_bit = legacy_intrp->int_vec_bit; - adapter->tgt_status_reg = netxen_get_ioaddr(adapter, - legacy_intrp->tgt_status_reg); + adapter->tgt_status_reg = netxen_get_ioaddr(adapter, tgt_status_reg); adapter->tgt_mask_reg = netxen_get_ioaddr(adapter, - legacy_intrp->tgt_mask_reg); + legacy_intrp->tgt_mask_reg); adapter->pci_int_reg = netxen_get_ioaddr(adapter, - legacy_intrp->pci_int_reg); + legacy_intrp->pci_int_reg); adapter->isr_int_vec = netxen_get_ioaddr(adapter, ISR_INT_VECTOR); if (adapter->ahw.revision_id >= NX_P3_B1) adapter->crb_int_state_reg = netxen_get_ioaddr(adapter, - ISR_INT_STATE_REG); + int_state_reg); else adapter->crb_int_state_reg = netxen_get_ioaddr(adapter, - CRB_INT_VECTOR); + CRB_INT_VECTOR); +} - netxen_set_msix_bit(pdev, 0); +static int netxen_setup_msi_interrupts(struct netxen_adapter *adapter, + int num_msix) +{ + struct pci_dev *pdev = adapter->pdev; + u32 value; + int err; if (adapter->msix_supported) { - netxen_init_msix_entries(adapter, num_msix); err = pci_enable_msix(pdev, adapter->msix_entries, num_msix); if (err == 0) { @@ -644,26 +656,59 @@ netxen_setup_intr(struct netxen_adapter *adapter) adapter->max_sds_rings = num_msix; dev_info(&pdev->dev, "using msi-x interrupts\n"); - return; + return 0; } - - if (err > 0) - pci_disable_msix(pdev); - /* fall through for msi */ } if (use_msi && !pci_enable_msi(pdev)) { + value = msi_tgt_status[adapter->ahw.pci_func]; adapter->flags |= NETXEN_NIC_MSI_ENABLED; - adapter->tgt_status_reg = netxen_get_ioaddr(adapter, - msi_tgt_status[adapter->ahw.pci_func]); - dev_info(&pdev->dev, "using msi interrupts\n"); + adapter->tgt_status_reg = netxen_get_ioaddr(adapter, value); adapter->msix_entries[0].vector = pdev->irq; - return; + dev_info(&pdev->dev, "using msi interrupts\n"); + return 0; } - dev_info(&pdev->dev, "using legacy interrupts\n"); - adapter->msix_entries[0].vector = pdev->irq; + dev_err(&pdev->dev, "Failed to acquire MSI-X/MSI interrupt vector\n"); + return -EIO; +} + +static int netxen_setup_intr(struct netxen_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + int num_msix; + + if (adapter->rss_supported) + num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ? + MSIX_ENTRIES_PER_ADAPTER : 2; + else + num_msix = 1; + + adapter->max_sds_rings = 1; + adapter->flags &= ~(NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED); + + netxen_initialize_interrupt_registers(adapter); + netxen_set_msix_bit(pdev, 0); + + if (netxen_function_zero(pdev)) { + if (!netxen_setup_msi_interrupts(adapter, num_msix)) + netxen_set_interrupt_mode(adapter, NETXEN_MSI_MODE); + else + netxen_set_interrupt_mode(adapter, NETXEN_INTX_MODE); + } else { + if (netxen_get_interrupt_mode(adapter) == NETXEN_MSI_MODE && + netxen_setup_msi_interrupts(adapter, num_msix)) { + dev_err(&pdev->dev, "Co-existence of MSI-X/MSI and INTx interrupts is not supported\n"); + return -EIO; + } + } + + if (!NETXEN_IS_MSI_FAMILY(adapter)) { + adapter->msix_entries[0].vector = pdev->irq; + dev_info(&pdev->dev, "using legacy interrupts\n"); + } + return 0; } static void @@ -841,7 +886,9 @@ netxen_check_options(struct netxen_adapter *adapter) } if (adapter->portnum == 0) { - get_brd_name_by_type(adapter->ahw.board_type, brd_name); + if (netxen_nic_get_brd_name_by_type(adapter->ahw.board_type, + brd_name)) + strcpy(serial_num, "Unknown"); pr_info("%s: %s Board S/N %s Chip rev 0x%x\n", module_name(THIS_MODULE), @@ -860,9 +907,9 @@ netxen_check_options(struct netxen_adapter *adapter) adapter->ahw.cut_through = (i & 0x8000) ? 1 : 0; } - dev_info(&pdev->dev, "firmware v%d.%d.%d [%s]\n", - fw_major, fw_minor, fw_build, - adapter->ahw.cut_through ? "cut-through" : "legacy"); + dev_info(&pdev->dev, "Driver v%s, firmware v%d.%d.%d [%s]\n", + NETXEN_NIC_LINUX_VERSIONID, fw_major, fw_minor, fw_build, + adapter->ahw.cut_through ? "cut-through" : "legacy"); if (adapter->fw_version >= NETXEN_VERSION_CODE(4, 0, 222)) adapter->capabilities = NXRD32(adapter, CRB_FW_CAPABILITIES_1); @@ -1508,7 +1555,13 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netxen_nic_clear_stats(adapter); - netxen_setup_intr(adapter); + err = netxen_setup_intr(adapter); + + if (err) { + dev_err(&adapter->pdev->dev, + "Failed to setup interrupts, error = %d\n", err); + goto err_out_disable_msi; + } err = netxen_setup_netdev(adapter, netdev); if (err) @@ -1596,7 +1649,7 @@ static void netxen_nic_remove(struct pci_dev *pdev) clear_bit(__NX_RESETTING, &adapter->state); netxen_teardown_intr(adapter); - + netxen_set_interrupt_mode(adapter, 0); netxen_remove_diag_entries(adapter); netxen_cleanup_pci_map(adapter); @@ -2721,7 +2774,7 @@ netxen_store_bridged_mode(struct device *dev, if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) goto err_out; - if (strict_strtoul(buf, 2, &new)) + if (kstrtoul(buf, 2, &new)) goto err_out; if (!netxen_config_bridged_mode(adapter, !!new)) @@ -2760,7 +2813,7 @@ netxen_store_diag_mode(struct device *dev, struct netxen_adapter *adapter = dev_get_drvdata(dev); unsigned long new; - if (strict_strtoul(buf, 2, &new)) + if (kstrtoul(buf, 2, &new)) return -EINVAL; if (!!new != !!(adapter->flags & NETXEN_NIC_DIAG_ENABLED)) @@ -3311,7 +3364,7 @@ static int netxen_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { struct netxen_adapter *adapter; - struct net_device *dev = (struct net_device *)ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct net_device *orig_dev = dev; struct net_device *slave; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index c1b693cb3df3..b00cf5665eab 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -38,8 +38,8 @@ #define _QLCNIC_LINUX_MAJOR 5 #define _QLCNIC_LINUX_MINOR 2 -#define _QLCNIC_LINUX_SUBVERSION 42 -#define QLCNIC_LINUX_VERSIONID "5.2.42" +#define _QLCNIC_LINUX_SUBVERSION 44 +#define QLCNIC_LINUX_VERSIONID "5.2.44" #define QLCNIC_DRV_IDC_VER 0x01 #define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) @@ -303,7 +303,6 @@ extern int qlcnic_use_msi; extern int qlcnic_use_msi_x; extern int qlcnic_auto_fw_reset; extern int qlcnic_load_fw_file; -extern int qlcnic_config_npars; /* Number of status descriptors to handle per interrupt */ #define MAX_STATUS_HANDLE (64) @@ -394,6 +393,9 @@ struct qlcnic_fw_dump { u32 size; /* total size of the dump */ void *data; /* dump data area */ struct qlcnic_dump_template_hdr *tmpl_hdr; + dma_addr_t phys_addr; + void *dma_buffer; + bool use_pex_dma; }; /* @@ -427,6 +429,7 @@ struct qlcnic_hardware_context { u8 nic_mode; char diag_cnt; + u16 max_uc_count; u16 port_type; u16 board_type; u16 supported_type; @@ -443,9 +446,10 @@ struct qlcnic_hardware_context { u16 max_mtu; u32 msg_enable; u16 act_pci_func; + u16 max_pci_func; u32 capabilities; - u32 capabilities2; + u32 extra_capability[3]; u32 temp; u32 int_vec_bit; u32 fw_hal_version; @@ -815,7 +819,8 @@ struct qlcnic_mac_list_s { #define QLCNIC_FW_CAPABILITY_2_LRO_MAX_TCP_SEG BIT_2 #define QLCNIC_FW_CAP2_HW_LRO_IPV6 BIT_3 -#define QLCNIC_FW_CAPABILITY_2_OCBB BIT_5 +#define QLCNIC_FW_CAPABILITY_SET_DRV_VER BIT_5 +#define QLCNIC_FW_CAPABILITY_2_BEACON BIT_7 /* module types */ #define LINKEVENT_MODULE_NOT_PRESENT 1 @@ -913,6 +918,9 @@ struct qlcnic_ipaddr { #define QLCNIC_IS_TSO_CAPABLE(adapter) \ ((adapter)->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO) +#define QLCNIC_BEACON_EANBLE 0xC +#define QLCNIC_BEACON_DISABLE 0xD + #define QLCNIC_DEF_NUM_STS_DESC_RINGS 4 #define QLCNIC_MSIX_TBL_SPACE 8192 #define QLCNIC_PCI_REG_MSIX_TBL 0x44 @@ -932,6 +940,7 @@ struct qlcnic_ipaddr { #define __QLCNIC_SRIOV_ENABLE 10 #define __QLCNIC_SRIOV_CAPABLE 11 #define __QLCNIC_MBX_POLL_ENABLE 12 +#define __QLCNIC_DIAG_MODE 13 #define QLCNIC_INTERRUPT_TEST 1 #define QLCNIC_LOOPBACK_TEST 2 @@ -1467,7 +1476,7 @@ int qlcnic_nic_del_mac(struct qlcnic_adapter *, const u8 *); void qlcnic_82xx_free_mac_list(struct qlcnic_adapter *adapter); int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu); -int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *); +int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *, u32); int qlcnic_change_mtu(struct net_device *netdev, int new_mtu); netdev_features_t qlcnic_fix_features(struct net_device *netdev, netdev_features_t features); @@ -1489,7 +1498,9 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev); int qlcnic_set_max_rss(struct qlcnic_adapter *, u8, size_t); int qlcnic_validate_max_rss(struct qlcnic_adapter *, __u32); void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter); +void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *); int qlcnic_enable_msix(struct qlcnic_adapter *, u32); +void qlcnic_set_drv_version(struct qlcnic_adapter *); /* eSwitch management functions */ int qlcnic_config_switch_port(struct qlcnic_adapter *, @@ -1543,6 +1554,7 @@ int qlcnic_set_default_offload_settings(struct qlcnic_adapter *); int qlcnic_reset_npar_config(struct qlcnic_adapter *); int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *); void qlcnic_add_lb_filter(struct qlcnic_adapter *, struct sk_buff *, int, u16); +int qlcnic_get_beacon_state(struct qlcnic_adapter *, u8 *); int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter); int qlcnic_read_mac_addr(struct qlcnic_adapter *); int qlcnic_setup_netdev(struct qlcnic_adapter *, struct net_device *, int); @@ -1584,6 +1596,8 @@ struct qlcnic_nic_template { void (*napi_del)(struct qlcnic_adapter *); void (*config_ipaddr)(struct qlcnic_adapter *, __be32, int); irqreturn_t (*clear_legacy_intr)(struct qlcnic_adapter *); + int (*shutdown)(struct pci_dev *); + int (*resume)(struct qlcnic_adapter *); }; /* Adapter hardware abstraction */ @@ -1625,6 +1639,7 @@ struct qlcnic_hardware_ops { int (*config_promisc_mode) (struct qlcnic_adapter *, u32); void (*change_l2_filter) (struct qlcnic_adapter *, u64 *, u16); int (*get_board_info) (struct qlcnic_adapter *); + void (*set_mac_filter_count) (struct qlcnic_adapter *); void (*free_mac_list) (struct qlcnic_adapter *); }; @@ -1787,6 +1802,18 @@ static inline void qlcnic_napi_enable(struct qlcnic_adapter *adapter) adapter->ahw->hw_ops->napi_enable(adapter); } +static inline int __qlcnic_shutdown(struct pci_dev *pdev) +{ + struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); + + return adapter->nic_ops->shutdown(pdev); +} + +static inline int __qlcnic_resume(struct qlcnic_adapter *adapter) +{ + return adapter->nic_ops->resume(adapter); +} + static inline void qlcnic_napi_disable(struct qlcnic_adapter *adapter) { adapter->ahw->hw_ops->napi_disable(adapter); @@ -1840,6 +1867,11 @@ static inline void qlcnic_free_mac_list(struct qlcnic_adapter *adapter) return adapter->ahw->hw_ops->free_mac_list(adapter); } +static inline void qlcnic_set_mac_filter_count(struct qlcnic_adapter *adapter) +{ + adapter->ahw->hw_ops->set_mac_filter_count(adapter); +} + static inline void qlcnic_dev_request_reset(struct qlcnic_adapter *adapter, u32 key) { @@ -1886,6 +1918,21 @@ static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring) writel(0xfbff, adapter->tgt_mask_reg); } +static inline int qlcnic_get_diag_lock(struct qlcnic_adapter *adapter) +{ + return test_and_set_bit(__QLCNIC_DIAG_MODE, &adapter->state); +} + +static inline void qlcnic_release_diag_lock(struct qlcnic_adapter *adapter) +{ + clear_bit(__QLCNIC_DIAG_MODE, &adapter->state); +} + +static inline int qlcnic_check_diag_status(struct qlcnic_adapter *adapter) +{ + return test_bit(__QLCNIC_DIAG_MODE, &adapter->state); +} + extern const struct ethtool_ops qlcnic_sriov_vf_ethtool_ops; extern const struct ethtool_ops qlcnic_ethtool_ops; extern const struct ethtool_ops qlcnic_ethtool_failed_ops; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index b4ff1e35a11d..0913c623a67e 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -63,6 +63,7 @@ static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = { {QLCNIC_CMD_STOP_NIC_FUNC, 2, 1}, {QLCNIC_CMD_SET_LED_CONFIG, 5, 1}, {QLCNIC_CMD_GET_LED_CONFIG, 1, 5}, + {QLCNIC_CMD_83XX_SET_DRV_VER, 4, 1}, {QLCNIC_CMD_ADD_RCV_RINGS, 130, 26}, {QLCNIC_CMD_CONFIG_VPORT, 4, 4}, {QLCNIC_CMD_BC_EVENT_SETUP, 2, 1}, @@ -172,6 +173,7 @@ static struct qlcnic_hardware_ops qlcnic_83xx_hw_ops = { .config_promisc_mode = qlcnic_83xx_nic_set_promisc, .change_l2_filter = qlcnic_83xx_change_l2_filter, .get_board_info = qlcnic_83xx_get_port_info, + .set_mac_filter_count = qlcnic_83xx_set_mac_filter_count, .free_mac_list = qlcnic_82xx_free_mac_list, }; @@ -184,6 +186,8 @@ static struct qlcnic_nic_template qlcnic_83xx_ops = { .napi_del = qlcnic_83xx_napi_del, .config_ipaddr = qlcnic_83xx_config_ipaddr, .clear_legacy_intr = qlcnic_83xx_clear_legacy_intr, + .shutdown = qlcnic_83xx_shutdown, + .resume = qlcnic_83xx_resume, }; void qlcnic_83xx_register_map(struct qlcnic_hardware_context *ahw) @@ -312,6 +316,11 @@ inline void qlcnic_83xx_clear_legacy_intr_mask(struct qlcnic_adapter *adapter) writel(0, adapter->tgt_mask_reg); } +inline void qlcnic_83xx_set_legacy_intr_mask(struct qlcnic_adapter *adapter) +{ + writel(1, adapter->tgt_mask_reg); +} + /* Enable MSI-x and INT-x interrupts */ void qlcnic_83xx_enable_intr(struct qlcnic_adapter *adapter, struct qlcnic_host_sds_ring *sds_ring) @@ -458,6 +467,9 @@ void qlcnic_83xx_free_mbx_intr(struct qlcnic_adapter *adapter) { u32 num_msix; + if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) + qlcnic_83xx_set_legacy_intr_mask(adapter); + qlcnic_83xx_disable_mbx_intr(adapter); if (adapter->flags & QLCNIC_MSIX_ENABLED) @@ -474,7 +486,6 @@ int qlcnic_83xx_setup_mbx_intr(struct qlcnic_adapter *adapter) { irq_handler_t handler; u32 val; - char name[32]; int err = 0; unsigned long flags = 0; @@ -485,9 +496,7 @@ int qlcnic_83xx_setup_mbx_intr(struct qlcnic_adapter *adapter) if (adapter->flags & QLCNIC_MSIX_ENABLED) { handler = qlcnic_83xx_handle_aen; val = adapter->msix_entries[adapter->ahw->num_msix - 1].vector; - snprintf(name, (IFNAMSIZ + 4), - "%s[%s]", "qlcnic", "aen"); - err = request_irq(val, handler, flags, name, adapter); + err = request_irq(val, handler, flags, "qlcnic-MB", adapter); if (err) { dev_err(&adapter->pdev->dev, "failed to register MBX interrupt\n"); @@ -604,6 +613,22 @@ int qlcnic_83xx_get_port_info(struct qlcnic_adapter *adapter) return status; } +void qlcnic_83xx_set_mac_filter_count(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + u16 act_pci_fn = ahw->act_pci_func; + u16 count; + + ahw->max_mc_count = QLC_83XX_MAX_MC_COUNT; + if (act_pci_fn <= 2) + count = (QLC_83XX_MAX_UC_COUNT - QLC_83XX_MAX_MC_COUNT) / + act_pci_fn; + else + count = (QLC_83XX_LB_MAX_FILTERS - QLC_83XX_MAX_MC_COUNT) / + act_pci_fn; + ahw->max_uc_count = count; +} + void qlcnic_83xx_enable_mbx_intrpt(struct qlcnic_adapter *adapter) { u32 val; @@ -839,7 +864,9 @@ void qlcnic_83xx_idc_aen_work(struct work_struct *work) int i, err = 0; adapter = container_of(work, struct qlcnic_adapter, idc_aen_work.work); - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_IDC_ACK); + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_IDC_ACK); + if (err) + return; for (i = 1; i < QLC_83XX_MBX_AEN_CNT; i++) cmd.req.arg[i] = adapter->ahw->mbox_aen[i]; @@ -1080,8 +1107,10 @@ int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *adapter) cap |= QLC_83XX_FW_CAP_LRO_MSS; /* set mailbox hdr and capabilities */ - qlcnic_alloc_mbx_args(&cmd, adapter, - QLCNIC_CMD_CREATE_RX_CTX); + err = qlcnic_alloc_mbx_args(&cmd, adapter, + QLCNIC_CMD_CREATE_RX_CTX); + if (err) + return err; if (qlcnic_sriov_pf_check(adapter) || qlcnic_sriov_vf_check(adapter)) cmd.req.arg[0] |= (0x3 << 29); @@ -1239,7 +1268,9 @@ int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *adapter, mbx.intr_id = 0xffff; mbx.src = 0; - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_TX_CTX); + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_TX_CTX); + if (err) + return err; if (qlcnic_sriov_pf_check(adapter) || qlcnic_sriov_vf_check(adapter)) cmd.req.arg[0] |= (0x3 << 29); @@ -1385,8 +1416,11 @@ int qlcnic_83xx_config_led(struct qlcnic_adapter *adapter, u32 state, if (state) { /* Get LED configuration */ - qlcnic_alloc_mbx_args(&cmd, adapter, - QLCNIC_CMD_GET_LED_CONFIG); + status = qlcnic_alloc_mbx_args(&cmd, adapter, + QLCNIC_CMD_GET_LED_CONFIG); + if (status) + return status; + status = qlcnic_issue_cmd(adapter, &cmd); if (status) { dev_err(&adapter->pdev->dev, @@ -1400,8 +1434,11 @@ int qlcnic_83xx_config_led(struct qlcnic_adapter *adapter, u32 state, /* Set LED Configuration */ mbx_in = (LSW(QLC_83XX_LED_CONFIG) << 16) | LSW(QLC_83XX_LED_CONFIG); - qlcnic_alloc_mbx_args(&cmd, adapter, - QLCNIC_CMD_SET_LED_CONFIG); + status = qlcnic_alloc_mbx_args(&cmd, adapter, + QLCNIC_CMD_SET_LED_CONFIG); + if (status) + return status; + cmd.req.arg[1] = mbx_in; cmd.req.arg[2] = mbx_in; cmd.req.arg[3] = mbx_in; @@ -1418,8 +1455,11 @@ mbx_err: } else { /* Restoring default LED configuration */ - qlcnic_alloc_mbx_args(&cmd, adapter, - QLCNIC_CMD_SET_LED_CONFIG); + status = qlcnic_alloc_mbx_args(&cmd, adapter, + QLCNIC_CMD_SET_LED_CONFIG); + if (status) + return status; + cmd.req.arg[1] = adapter->ahw->mbox_reg[0]; cmd.req.arg[2] = adapter->ahw->mbox_reg[1]; cmd.req.arg[3] = adapter->ahw->mbox_reg[2]; @@ -1489,10 +1529,18 @@ void qlcnic_83xx_register_nic_idc_func(struct qlcnic_adapter *adapter, return; if (enable) { - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INIT_NIC_FUNC); + status = qlcnic_alloc_mbx_args(&cmd, adapter, + QLCNIC_CMD_INIT_NIC_FUNC); + if (status) + return; + cmd.req.arg[1] = BIT_0 | BIT_31; } else { - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_STOP_NIC_FUNC); + status = qlcnic_alloc_mbx_args(&cmd, adapter, + QLCNIC_CMD_STOP_NIC_FUNC); + if (status) + return; + cmd.req.arg[1] = BIT_0 | BIT_31; } status = qlcnic_issue_cmd(adapter, &cmd); @@ -1509,7 +1557,10 @@ int qlcnic_83xx_set_port_config(struct qlcnic_adapter *adapter) struct qlcnic_cmd_args cmd; int err; - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_PORT_CONFIG); + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_PORT_CONFIG); + if (err) + return err; + cmd.req.arg[1] = adapter->ahw->port_config; err = qlcnic_issue_cmd(adapter, &cmd); if (err) @@ -1523,7 +1574,10 @@ int qlcnic_83xx_get_port_config(struct qlcnic_adapter *adapter) struct qlcnic_cmd_args cmd; int err; - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PORT_CONFIG); + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PORT_CONFIG); + if (err) + return err; + err = qlcnic_issue_cmd(adapter, &cmd); if (err) dev_info(&adapter->pdev->dev, "Get Port config failed\n"); @@ -1539,7 +1593,10 @@ int qlcnic_83xx_setup_link_event(struct qlcnic_adapter *adapter, int enable) u32 temp; struct qlcnic_cmd_args cmd; - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_LINK_EVENT); + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_LINK_EVENT); + if (err) + return err; + temp = adapter->recv_ctx->context_id << 16; cmd.req.arg[1] = (enable ? 1 : 0) | BIT_8 | temp; err = qlcnic_issue_cmd(adapter, &cmd); @@ -1570,7 +1627,11 @@ int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode) if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED) return -EIO; - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_MAC_RX_MODE); + err = qlcnic_alloc_mbx_args(&cmd, adapter, + QLCNIC_CMD_CONFIGURE_MAC_RX_MODE); + if (err) + return err; + qlcnic_83xx_set_interface_id_promisc(adapter, &temp); cmd.req.arg[1] = (mode ? 1 : 0) | temp; err = qlcnic_issue_cmd(adapter, &cmd); @@ -1588,16 +1649,24 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode) struct qlcnic_hardware_context *ahw = adapter->ahw; int ret = 0, loop = 0, max_sds_rings = adapter->max_sds_rings; - QLCDB(adapter, DRV, "%s loopback test in progress\n", - mode == QLCNIC_ILB_MODE ? "internal" : "external"); if (ahw->op_mode == QLCNIC_NON_PRIV_FUNC) { - dev_warn(&adapter->pdev->dev, - "Loopback test not supported for non privilege function\n"); + netdev_warn(netdev, + "Loopback test not supported in non privileged mode\n"); return ret; } - if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) + if (test_bit(__QLCNIC_RESETTING, &adapter->state)) { + netdev_info(netdev, "Device is resetting\n"); return -EBUSY; + } + + if (qlcnic_get_diag_lock(adapter)) { + netdev_info(netdev, "Device is in diagnostics mode\n"); + return -EBUSY; + } + + netdev_info(netdev, "%s loopback test in progress\n", + mode == QLCNIC_ILB_MODE ? "internal" : "external"); ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST, max_sds_rings); @@ -1610,13 +1679,19 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode) /* Poll for link up event before running traffic */ do { - msleep(500); + msleep(QLC_83XX_LB_MSLEEP_COUNT); if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) qlcnic_83xx_process_aen(adapter); - if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) { - dev_info(&adapter->pdev->dev, - "Firmware didn't sent link up event to loopback request\n"); + if (test_bit(__QLCNIC_RESETTING, &adapter->state)) { + netdev_info(netdev, + "Device is resetting, free LB test resources\n"); + ret = -EIO; + goto free_diag_res; + } + if (loop++ > QLC_83XX_LB_WAIT_COUNT) { + netdev_info(netdev, + "Firmware didn't sent link up event to loopback request\n"); ret = -QLCNIC_FW_NOT_RESPOND; qlcnic_83xx_clear_lb_mode(adapter, mode); goto free_diag_res; @@ -1638,13 +1713,14 @@ free_diag_res: fail_diag_alloc: adapter->max_sds_rings = max_sds_rings; - clear_bit(__QLCNIC_RESETTING, &adapter->state); + qlcnic_release_diag_lock(adapter); return ret; } int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode) { struct qlcnic_hardware_context *ahw = adapter->ahw; + struct net_device *netdev = adapter->netdev; int status = 0, loop = 0; u32 config; @@ -1662,9 +1738,9 @@ int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode) status = qlcnic_83xx_set_port_config(adapter); if (status) { - dev_err(&adapter->pdev->dev, - "Failed to Set Loopback Mode = 0x%x.\n", - ahw->port_config); + netdev_err(netdev, + "Failed to Set Loopback Mode = 0x%x.\n", + ahw->port_config); ahw->port_config = config; clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status); return status; @@ -1672,13 +1748,19 @@ int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode) /* Wait for Link and IDC Completion AEN */ do { - msleep(300); + msleep(QLC_83XX_LB_MSLEEP_COUNT); if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) qlcnic_83xx_process_aen(adapter); - if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) { - dev_err(&adapter->pdev->dev, - "FW did not generate IDC completion AEN\n"); + if (test_bit(__QLCNIC_RESETTING, &adapter->state)) { + netdev_info(netdev, + "Device is resetting, free LB test resources\n"); + clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status); + return -EIO; + } + if (loop++ > QLC_83XX_LB_WAIT_COUNT) { + netdev_err(netdev, + "Did not receive IDC completion AEN\n"); clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status); qlcnic_83xx_clear_lb_mode(adapter, mode); return -EIO; @@ -1693,6 +1775,7 @@ int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode) int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode) { struct qlcnic_hardware_context *ahw = adapter->ahw; + struct net_device *netdev = adapter->netdev; int status = 0, loop = 0; u32 config = ahw->port_config; @@ -1704,9 +1787,9 @@ int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode) status = qlcnic_83xx_set_port_config(adapter); if (status) { - dev_err(&adapter->pdev->dev, - "Failed to Clear Loopback Mode = 0x%x.\n", - ahw->port_config); + netdev_err(netdev, + "Failed to Clear Loopback Mode = 0x%x.\n", + ahw->port_config); ahw->port_config = config; clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status); return status; @@ -1714,13 +1797,20 @@ int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode) /* Wait for Link and IDC Completion AEN */ do { - msleep(300); + msleep(QLC_83XX_LB_MSLEEP_COUNT); if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) qlcnic_83xx_process_aen(adapter); - if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) { - dev_err(&adapter->pdev->dev, - "Firmware didn't sent IDC completion AEN\n"); + if (test_bit(__QLCNIC_RESETTING, &adapter->state)) { + netdev_info(netdev, + "Device is resetting, free LB test resources\n"); + clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status); + return -EIO; + } + + if (loop++ > QLC_83XX_LB_WAIT_COUNT) { + netdev_err(netdev, + "Did not receive IDC completion AEN\n"); clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status); return -EIO; } @@ -1749,7 +1839,11 @@ void qlcnic_83xx_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip, u32 temp = 0, temp_ip; struct qlcnic_cmd_args cmd; - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_IP_ADDR); + err = qlcnic_alloc_mbx_args(&cmd, adapter, + QLCNIC_CMD_CONFIGURE_IP_ADDR); + if (err) + return; + qlcnic_83xx_set_interface_id_ipaddr(adapter, &temp); if (mode == QLCNIC_IP_UP) @@ -1788,7 +1882,10 @@ int qlcnic_83xx_config_hw_lro(struct qlcnic_adapter *adapter, int mode) if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED) return 0; - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_HW_LRO); + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_HW_LRO); + if (err) + return err; + temp = adapter->recv_ctx->context_id << 16; arg1 = lro_bit_mask | temp; cmd.req.arg[1] = arg1; @@ -1810,8 +1907,9 @@ int qlcnic_83xx_config_rss(struct qlcnic_adapter *adapter, int enable) 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL, 0x255b0ec26d5a56daULL }; - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_RSS); - + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_RSS); + if (err) + return err; /* * RSS request: * bits 3-0: Rsvd @@ -1917,7 +2015,10 @@ int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac) struct qlcnic_cmd_args cmd; u32 mac_low, mac_high; - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_MAC_ADDRESS); + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_MAC_ADDRESS); + if (err) + return err; + qlcnic_83xx_configure_mac(adapter, mac, QLCNIC_GET_CURRENT_MAC, &cmd); err = qlcnic_issue_cmd(adapter, &cmd); @@ -1948,7 +2049,10 @@ void qlcnic_83xx_config_intr_coal(struct qlcnic_adapter *adapter) if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED) return; - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_INTR_COAL); + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_INTR_COAL); + if (err) + return; + if (coal->type == QLCNIC_INTR_COAL_TYPE_RX) { temp = adapter->recv_ctx->context_id; cmd.req.arg[1] = QLCNIC_INTR_COAL_TYPE_RX | temp << 16; @@ -2020,7 +2124,10 @@ int qlcnic_enable_eswitch(struct qlcnic_adapter *adapter, u8 port, u8 enable) return err; } - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_TOGGLE_ESWITCH); + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_TOGGLE_ESWITCH); + if (err) + return err; + cmd.req.arg[1] = (port & 0xf) | BIT_4; err = qlcnic_issue_cmd(adapter, &cmd); @@ -2048,7 +2155,10 @@ int qlcnic_83xx_set_nic_info(struct qlcnic_adapter *adapter, return err; } - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO); + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO); + if (err) + return err; + cmd.req.arg[1] = (nic->pci_func << 16); cmd.req.arg[2] = 0x1 << 16; cmd.req.arg[3] = nic->phys_port | (nic->switch_mode << 16); @@ -2079,13 +2189,17 @@ int qlcnic_83xx_get_nic_info(struct qlcnic_adapter *adapter, u32 temp; u8 op = 0; struct qlcnic_cmd_args cmd; + struct qlcnic_hardware_context *ahw = adapter->ahw; - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO); - if (func_id != adapter->ahw->pci_func) { + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO); + if (err) + return err; + + if (func_id != ahw->pci_func) { temp = func_id << 16; cmd.req.arg[1] = op | BIT_31 | temp; } else { - cmd.req.arg[1] = adapter->ahw->pci_func << 16; + cmd.req.arg[1] = ahw->pci_func << 16; } err = qlcnic_issue_cmd(adapter, &cmd); if (err) { @@ -2112,6 +2226,9 @@ int qlcnic_83xx_get_nic_info(struct qlcnic_adapter *adapter, temp = (cmd.rsp.arg[8] & 0x7FFE0000) >> 17; npar_info->max_linkspeed_reg_offset = temp; } + if (npar_info->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS) + memcpy(ahw->extra_capability, &cmd.rsp.arg[16], + sizeof(ahw->extra_capability)); out: qlcnic_free_mbx_args(&cmd); @@ -2121,26 +2238,28 @@ out: int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *adapter, struct qlcnic_pci_info *pci_info) { + struct qlcnic_hardware_context *ahw = adapter->ahw; + struct device *dev = &adapter->pdev->dev; + struct qlcnic_cmd_args cmd; int i, err = 0, j = 0; u32 temp; - struct qlcnic_cmd_args cmd; - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PCI_INFO); + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PCI_INFO); + if (err) + return err; + err = qlcnic_issue_cmd(adapter, &cmd); - adapter->ahw->act_pci_func = 0; + ahw->act_pci_func = 0; if (err == QLCNIC_RCODE_SUCCESS) { - pci_info->func_count = cmd.rsp.arg[1] & 0xFF; - dev_info(&adapter->pdev->dev, - "%s: total functions = %d\n", - __func__, pci_info->func_count); + ahw->max_pci_func = cmd.rsp.arg[1] & 0xFF; for (i = 2, j = 0; j < QLCNIC_MAX_PCI_FUNC; j++, pci_info++) { pci_info->id = cmd.rsp.arg[i] & 0xFFFF; pci_info->active = (cmd.rsp.arg[i] & 0xFFFF0000) >> 16; i++; pci_info->type = cmd.rsp.arg[i] & 0xFFFF; if (pci_info->type == QLCNIC_TYPE_NIC) - adapter->ahw->act_pci_func++; + ahw->act_pci_func++; temp = (cmd.rsp.arg[i] & 0xFFFF0000) >> 16; pci_info->default_port = temp; i++; @@ -2152,18 +2271,21 @@ int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *adapter, i++; memcpy(pci_info->mac + sizeof(u32), &cmd.rsp.arg[i], 2); i = i + 3; - - dev_info(&adapter->pdev->dev, "%s:\n" - "\tid = %d active = %d type = %d\n" - "\tport = %d min bw = %d max bw = %d\n" - "\tmac_addr = %pM\n", __func__, - pci_info->id, pci_info->active, pci_info->type, - pci_info->default_port, pci_info->tx_min_bw, - pci_info->tx_max_bw, pci_info->mac); + if (ahw->op_mode == QLCNIC_MGMT_FUNC) + dev_info(dev, "id = %d active = %d type = %d\n" + "\tport = %d min bw = %d max bw = %d\n" + "\tmac_addr = %pM\n", pci_info->id, + pci_info->active, pci_info->type, + pci_info->default_port, + pci_info->tx_min_bw, + pci_info->tx_max_bw, pci_info->mac); } + if (ahw->op_mode == QLCNIC_MGMT_FUNC) + dev_info(dev, "Max vNIC functions = %d, active vNIC functions = %d\n", + ahw->max_pci_func, ahw->act_pci_func); + } else { - dev_err(&adapter->pdev->dev, "Failed to get PCI Info%d\n", - err); + dev_err(dev, "Failed to get PCI Info, error = %d\n", err); err = -EIO; } @@ -2180,7 +2302,10 @@ int qlcnic_83xx_config_intrpt(struct qlcnic_adapter *adapter, bool op_type) struct qlcnic_cmd_args cmd; max_ints = adapter->ahw->num_msix - 1; - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_INTRPT); + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_INTRPT); + if (err) + return err; + cmd.req.arg[1] = max_ints; if (qlcnic_sriov_vf_check(adapter)) @@ -2808,7 +2933,11 @@ int qlcnic_83xx_test_link(struct qlcnic_adapter *adapter) dev_info(&adapter->pdev->dev, "link state down\n"); return config; } - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_LINK_STATUS); + + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_LINK_STATUS); + if (err) + return err; + err = qlcnic_issue_cmd(adapter, &cmd); if (err) { dev_info(&adapter->pdev->dev, @@ -3034,7 +3163,9 @@ void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data) struct net_device *netdev = adapter->netdev; int ret = 0; - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_STATISTICS); + ret = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_STATISTICS); + if (ret) + return; /* Get Tx stats */ cmd.req.arg[1] = BIT_1 | (adapter->tx_ring->ctx_id << 16); cmd.rsp.num = QLC_83XX_TX_STAT_REGS; @@ -3113,8 +3244,10 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev) u8 val; int ret, max_sds_rings = adapter->max_sds_rings; - if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) - return -EIO; + if (qlcnic_get_diag_lock(adapter)) { + netdev_info(netdev, "Device in diagnostics mode\n"); + return -EBUSY; + } ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST, max_sds_rings); @@ -3122,7 +3255,9 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev) goto fail_diag_irq; ahw->diag_cnt = 0; - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INTRPT_TEST); + ret = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INTRPT_TEST); + if (ret) + goto fail_diag_irq; if (adapter->flags & QLCNIC_MSIX_ENABLED) intrpt_id = ahw->intr_tbl[0].id; @@ -3156,7 +3291,7 @@ done: fail_diag_irq: adapter->max_sds_rings = max_sds_rings; - clear_bit(__QLCNIC_RESETTING, &adapter->state); + qlcnic_release_diag_lock(adapter); return ret; } @@ -3260,3 +3395,54 @@ int qlcnic_83xx_flash_test(struct qlcnic_adapter *adapter) } return 0; } + +int qlcnic_83xx_shutdown(struct pci_dev *pdev) +{ + struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + int retval; + + netif_device_detach(netdev); + qlcnic_cancel_idc_work(adapter); + + if (netif_running(netdev)) + qlcnic_down(adapter, netdev); + + qlcnic_83xx_disable_mbx_intr(adapter); + cancel_delayed_work_sync(&adapter->idc_aen_work); + + retval = pci_save_state(pdev); + if (retval) + return retval; + + return 0; +} + +int qlcnic_83xx_resume(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + struct qlc_83xx_idc *idc = &ahw->idc; + int err = 0; + + err = qlcnic_83xx_idc_init(adapter); + if (err) + return err; + + if (ahw->nic_mode == QLC_83XX_VIRTUAL_NIC_MODE) { + if (ahw->op_mode == QLCNIC_MGMT_FUNC) { + qlcnic_83xx_set_vnic_opmode(adapter); + } else { + err = qlcnic_83xx_check_vnic_state(adapter); + if (err) + return err; + } + } + + err = qlcnic_83xx_idc_reattach_driver(adapter); + if (err) + return err; + + qlcnic_schedule_work(adapter, qlcnic_83xx_idc_poll_dev_state, + idc->delay); + return err; +} diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h index f5db67fc9f55..2548d1403d75 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h @@ -36,7 +36,8 @@ #define QLC_83XX_MAX_DRV_LOCK_RECOVERY_ATTEMPT 3 #define QLC_83XX_DRV_LOCK_RECOVERY_DELAY 200 #define QLC_83XX_DRV_LOCK_RECOVERY_STATUS_MASK 0x3 - +#define QLC_83XX_LB_WAIT_COUNT 250 +#define QLC_83XX_LB_MSLEEP_COUNT 20 #define QLC_83XX_NO_NIC_RESOURCE 0x5 #define QLC_83XX_MAC_PRESENT 0xC #define QLC_83XX_MAC_ABSENT 0xD @@ -314,6 +315,7 @@ struct qlc_83xx_idc { u8 vnic_state; u8 vnic_wait_limit; u8 quiesce_req; + u8 delay_reset; char **name; }; @@ -392,6 +394,8 @@ enum qlcnic_83xx_states { #define QLC_83XX_LB_MAX_FILTERS 2048 #define QLC_83XX_LB_BUCKET_SIZE 256 #define QLC_83XX_MINIMUM_VECTOR 3 +#define QLC_83XX_MAX_MC_COUNT 38 +#define QLC_83XX_MAX_UC_COUNT 4096 #define QLC_83XX_GET_FUNC_MODE_FROM_NPAR_INFO(val) (val & 0x80000000) #define QLC_83XX_GET_LRO_CAPABILITY(val) (val & 0x20) @@ -623,4 +627,11 @@ u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *); u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *, u32 *); void qlcnic_83xx_enable_mbx_poll(struct qlcnic_adapter *); void qlcnic_83xx_disable_mbx_poll(struct qlcnic_adapter *); +void qlcnic_83xx_set_mac_filter_count(struct qlcnic_adapter *); +int qlcnic_83xx_shutdown(struct pci_dev *); +int qlcnic_83xx_resume(struct qlcnic_adapter *); +int qlcnic_83xx_idc_init(struct qlcnic_adapter *); +int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *); +int qlcnic_83xx_set_vnic_opmode(struct qlcnic_adapter *); +int qlcnic_83xx_check_vnic_state(struct qlcnic_adapter *); #endif diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index 5e7fb1dfb97b..f41dfab1e9a3 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c @@ -606,7 +606,7 @@ static int qlcnic_83xx_idc_check_fan_failure(struct qlcnic_adapter *adapter) return 0; } -static int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter) +int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter) { int err; @@ -629,6 +629,7 @@ static int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter) return -EIO; } + qlcnic_set_drv_version(adapter); qlcnic_83xx_idc_attach_driver(adapter); return 0; @@ -649,6 +650,7 @@ static void qlcnic_83xx_idc_update_idc_params(struct qlcnic_adapter *adapter) ahw->idc.collect_dump = 0; ahw->reset_context = 0; adapter->tx_timeo_cnt = 0; + ahw->idc.delay_reset = 0; clear_bit(__QLCNIC_RESETTING, &adapter->state); } @@ -883,21 +885,41 @@ static int qlcnic_83xx_idc_need_reset_state(struct qlcnic_adapter *adapter) int ret = 0; if (adapter->ahw->idc.prev_state != QLC_83XX_IDC_DEV_NEED_RESET) { - qlcnic_83xx_idc_update_drv_ack_reg(adapter, 1, 1); qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1); set_bit(__QLCNIC_RESETTING, &adapter->state); clear_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status); if (adapter->ahw->nic_mode == QLC_83XX_VIRTUAL_NIC_MODE) qlcnic_83xx_disable_vnic_mode(adapter, 1); - qlcnic_83xx_idc_detach_driver(adapter); + + if (qlcnic_check_diag_status(adapter)) { + dev_info(&adapter->pdev->dev, + "%s: Wait for diag completion\n", __func__); + adapter->ahw->idc.delay_reset = 1; + return 0; + } else { + qlcnic_83xx_idc_update_drv_ack_reg(adapter, 1, 1); + qlcnic_83xx_idc_detach_driver(adapter); + } } - /* Check ACK from other functions */ - ret = qlcnic_83xx_idc_check_reset_ack_reg(adapter); - if (ret) { + if (qlcnic_check_diag_status(adapter)) { dev_info(&adapter->pdev->dev, - "%s: Waiting for reset ACK\n", __func__); - return 0; + "%s: Wait for diag completion\n", __func__); + return -1; + } else { + if (adapter->ahw->idc.delay_reset) { + qlcnic_83xx_idc_update_drv_ack_reg(adapter, 1, 1); + qlcnic_83xx_idc_detach_driver(adapter); + adapter->ahw->idc.delay_reset = 0; + } + + /* Check for ACK from other functions */ + ret = qlcnic_83xx_idc_check_reset_ack_reg(adapter); + if (ret) { + dev_info(&adapter->pdev->dev, + "%s: Waiting for reset ACK\n", __func__); + return -1; + } } /* Transit to INIT state and restart the HW */ @@ -1113,7 +1135,7 @@ qlcnic_83xx_idc_first_to_load_function_handler(struct qlcnic_adapter *adapter) return 0; } -static int qlcnic_83xx_idc_init(struct qlcnic_adapter *adapter) +int qlcnic_83xx_idc_init(struct qlcnic_adapter *adapter) { int ret = -EIO; @@ -1532,9 +1554,18 @@ static int qlcnic_83xx_reset_template_checksum(struct qlcnic_adapter *p_dev) int qlcnic_83xx_get_reset_instruction_template(struct qlcnic_adapter *p_dev) { - u8 *p_buff; - u32 addr, count; struct qlcnic_hardware_context *ahw = p_dev->ahw; + u32 addr, count, prev_ver, curr_ver; + u8 *p_buff; + + if (ahw->reset.buff != NULL) { + prev_ver = p_dev->fw_version; + curr_ver = qlcnic_83xx_get_fw_version(p_dev); + if (curr_ver > prev_ver) + kfree(ahw->reset.buff); + else + return 0; + } ahw->reset.seq_error = 0; ahw->reset.buff = kzalloc(QLC_83XX_RESTART_TEMPLATE_SIZE, GFP_KERNEL); @@ -2062,7 +2093,11 @@ static void qlcnic_83xx_clear_function_resources(struct qlcnic_adapter *adapter) audit_mask = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_AUDIT); if (IS_QLC_83XX_USED(adapter, presence_mask, audit_mask)) { - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_STOP_NIC_FUNC); + status = qlcnic_alloc_mbx_args(&cmd, adapter, + QLCNIC_CMD_STOP_NIC_FUNC); + if (status) + return; + cmd.req.arg[1] = BIT_31; status = qlcnic_issue_cmd(adapter, &cmd); if (status) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c index b0c3de9ede03..599d1fda52f2 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c @@ -39,30 +39,21 @@ int qlcnic_83xx_disable_vnic_mode(struct qlcnic_adapter *adapter, int lock) return 0; } -static int qlcnic_83xx_set_vnic_opmode(struct qlcnic_adapter *adapter) +int qlcnic_83xx_set_vnic_opmode(struct qlcnic_adapter *adapter) { u8 id; - int i, ret = -EBUSY; + int ret = -EBUSY; u32 data = QLCNIC_MGMT_FUNC; struct qlcnic_hardware_context *ahw = adapter->ahw; if (qlcnic_83xx_lock_driver(adapter)) return ret; - if (qlcnic_config_npars) { - for (i = 0; i < ahw->act_pci_func; i++) { - id = adapter->npars[i].pci_func; - if (id == ahw->pci_func) - continue; - data |= qlcnic_config_npars & - QLC_83XX_SET_FUNC_OPMODE(0x3, id); - } - } else { - data = QLCRDX(adapter->ahw, QLC_83XX_DRV_OP_MODE); - data = (data & ~QLC_83XX_SET_FUNC_OPMODE(0x3, ahw->pci_func)) | - QLC_83XX_SET_FUNC_OPMODE(QLCNIC_MGMT_FUNC, - ahw->pci_func); - } + id = ahw->pci_func; + data = QLCRDX(adapter->ahw, QLC_83XX_DRV_OP_MODE); + data = (data & ~QLC_83XX_SET_FUNC_OPMODE(0x3, id)) | + QLC_83XX_SET_FUNC_OPMODE(QLCNIC_MGMT_FUNC, id); + QLCWRX(adapter->ahw, QLC_83XX_DRV_OP_MODE, data); qlcnic_83xx_unlock_driver(adapter); @@ -196,20 +187,24 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter) else priv_level = QLC_83XX_GET_FUNC_PRIVILEGE(op_mode, ahw->pci_func); - - if (priv_level == QLCNIC_NON_PRIV_FUNC) { + switch (priv_level) { + case QLCNIC_NON_PRIV_FUNC: ahw->op_mode = QLCNIC_NON_PRIV_FUNC; ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry; nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic; - } else if (priv_level == QLCNIC_PRIV_FUNC) { + break; + case QLCNIC_PRIV_FUNC: ahw->op_mode = QLCNIC_PRIV_FUNC; ahw->idc.state_entry = qlcnic_83xx_idc_vnic_pf_entry; nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic; - } else if (priv_level == QLCNIC_MGMT_FUNC) { + break; + case QLCNIC_MGMT_FUNC: ahw->op_mode = QLCNIC_MGMT_FUNC; ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry; nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic; - } else { + break; + default: + dev_err(&adapter->pdev->dev, "Invalid Virtual NIC opmode\n"); return -EIO; } @@ -218,8 +213,29 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter) else adapter->flags &= ~QLCNIC_ESWITCH_ENABLED; - adapter->ahw->idc.vnic_state = QLCNIC_DEV_NPAR_NON_OPER; - adapter->ahw->idc.vnic_wait_limit = QLCNIC_DEV_NPAR_OPER_TIMEO; + ahw->idc.vnic_state = QLCNIC_DEV_NPAR_NON_OPER; + ahw->idc.vnic_wait_limit = QLCNIC_DEV_NPAR_OPER_TIMEO; + + return 0; +} + +int qlcnic_83xx_check_vnic_state(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + struct qlc_83xx_idc *idc = &ahw->idc; + u32 state; + + state = QLCRDX(ahw, QLC_83XX_VNIC_STATE); + while (state != QLCNIC_DEV_NPAR_OPER && idc->vnic_wait_limit--) { + msleep(1000); + state = QLCRDX(ahw, QLC_83XX_VNIC_STATE); + } + + if (!idc->vnic_wait_limit) { + dev_err(&adapter->pdev->dev, + "vNIC mode not operational, state check timed out.\n"); + return -EIO; + } return 0; } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c index 6acf82b9f018..0581a484ceb5 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c @@ -36,7 +36,8 @@ static const struct qlcnic_mailbox_metadata qlcnic_mbx_tbl[] = { {QLCNIC_CMD_CONFIG_PORT, 4, 1}, {QLCNIC_CMD_TEMP_SIZE, 4, 4}, {QLCNIC_CMD_GET_TEMP_HDR, 4, 1}, - {QLCNIC_CMD_SET_DRV_VER, 4, 1}, + {QLCNIC_CMD_82XX_SET_DRV_VER, 4, 1}, + {QLCNIC_CMD_GET_LED_STATUS, 4, 2}, }; static inline u32 qlcnic_get_cmd_signature(struct qlcnic_hardware_context *ahw) @@ -181,7 +182,7 @@ int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter, return cmd->rsp.arg[0]; } -int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *adapter) +int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *adapter, u32 fw_cmd) { struct qlcnic_cmd_args cmd; u32 arg1, arg2, arg3; @@ -193,7 +194,10 @@ int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *adapter) _QLCNIC_LINUX_MAJOR, _QLCNIC_LINUX_MINOR, _QLCNIC_LINUX_SUBVERSION); - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_DRV_VER); + err = qlcnic_alloc_mbx_args(&cmd, adapter, fw_cmd); + if (err) + return err; + memcpy(&arg1, drv_string, sizeof(u32)); memcpy(&arg2, drv_string + 4, sizeof(u32)); memcpy(&arg3, drv_string + 8, sizeof(u32)); @@ -221,7 +225,10 @@ qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu) if (recv_ctx->state != QLCNIC_HOST_CTX_STATE_ACTIVE) return err; - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_MTU); + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_MTU); + if (err) + return err; + cmd.req.arg[1] = recv_ctx->context_id; cmd.req.arg[2] = mtu; @@ -335,7 +342,10 @@ int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter) } phys_addr = hostrq_phys_addr; - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_RX_CTX); + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_RX_CTX); + if (err) + goto out_free_rsp; + cmd.req.arg[1] = MSD(phys_addr); cmd.req.arg[2] = LSD(phys_addr); cmd.req.arg[3] = rq_size; @@ -373,10 +383,10 @@ int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter) recv_ctx->context_id = le16_to_cpu(prsp->context_id); recv_ctx->virt_port = prsp->virt_port; + qlcnic_free_mbx_args(&cmd); out_free_rsp: dma_free_coherent(&adapter->pdev->dev, rsp_size, prsp, - cardrsp_phys_addr); - qlcnic_free_mbx_args(&cmd); + cardrsp_phys_addr); out_free_rq: dma_free_coherent(&adapter->pdev->dev, rq_size, prq, hostrq_phys_addr); return err; @@ -388,7 +398,10 @@ void qlcnic_82xx_fw_cmd_del_rx_ctx(struct qlcnic_adapter *adapter) struct qlcnic_cmd_args cmd; struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_RX_CTX); + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_RX_CTX); + if (err) + return; + cmd.req.arg[1] = recv_ctx->context_id; err = qlcnic_issue_cmd(adapter, &cmd); if (err) @@ -457,7 +470,10 @@ int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter, phys_addr = rq_phys_addr; - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_TX_CTX); + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_TX_CTX); + if (err) + goto out_free_rsp; + cmd.req.arg[1] = MSD(phys_addr); cmd.req.arg[2] = LSD(phys_addr); cmd.req.arg[3] = rq_size; @@ -473,12 +489,13 @@ int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter, err = -EIO; } + qlcnic_free_mbx_args(&cmd); + +out_free_rsp: dma_free_coherent(&adapter->pdev->dev, rsp_size, rsp_addr, rsp_phys_addr); - out_free_rq: dma_free_coherent(&adapter->pdev->dev, rq_size, rq_addr, rq_phys_addr); - qlcnic_free_mbx_args(&cmd); return err; } @@ -487,8 +504,11 @@ void qlcnic_82xx_fw_cmd_del_tx_ctx(struct qlcnic_adapter *adapter, struct qlcnic_host_tx_ring *tx_ring) { struct qlcnic_cmd_args cmd; + int ret; - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_TX_CTX); + ret = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_TX_CTX); + if (ret) + return; cmd.req.arg[1] = tx_ring->ctx_id; if (qlcnic_issue_cmd(adapter, &cmd)) @@ -503,7 +523,10 @@ qlcnic_fw_cmd_set_port(struct qlcnic_adapter *adapter, u32 config) int err; struct qlcnic_cmd_args cmd; - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_PORT); + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_PORT); + if (err) + return err; + cmd.req.arg[1] = config; err = qlcnic_issue_cmd(adapter, &cmd); qlcnic_free_mbx_args(&cmd); @@ -707,7 +730,10 @@ int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac) struct qlcnic_cmd_args cmd; u32 mac_low, mac_high; - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_MAC_ADDRESS); + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_MAC_ADDRESS); + if (err) + return err; + cmd.req.arg[1] = adapter->ahw->pci_func | BIT_8; err = qlcnic_issue_cmd(adapter, &cmd); @@ -746,7 +772,10 @@ int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *adapter, nic_info = nic_info_addr; - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO); + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO); + if (err) + goto out_free_dma; + cmd.req.arg[1] = MSD(nic_dma_t); cmd.req.arg[2] = LSD(nic_dma_t); cmd.req.arg[3] = (func_id << 16 | nic_size); @@ -768,9 +797,10 @@ int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *adapter, npar_info->max_mtu = le16_to_cpu(nic_info->max_mtu); } + qlcnic_free_mbx_args(&cmd); +out_free_dma: dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr, nic_dma_t); - qlcnic_free_mbx_args(&cmd); return err; } @@ -807,7 +837,10 @@ int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *adapter, nic_info->min_tx_bw = cpu_to_le16(nic->min_tx_bw); nic_info->max_tx_bw = cpu_to_le16(nic->max_tx_bw); - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO); + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO); + if (err) + goto out_free_dma; + cmd.req.arg[1] = MSD(nic_dma_t); cmd.req.arg[2] = LSD(nic_dma_t); cmd.req.arg[3] = ((nic->pci_func << 16) | nic_size); @@ -819,9 +852,10 @@ int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *adapter, err = -EIO; } - dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr, - nic_dma_t); qlcnic_free_mbx_args(&cmd); +out_free_dma: + dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr, + nic_dma_t); return err; } @@ -845,7 +879,10 @@ int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *adapter, return -ENOMEM; npar = pci_info_addr; - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PCI_INFO); + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PCI_INFO); + if (err) + goto out_free_dma; + cmd.req.arg[1] = MSD(pci_info_dma_t); cmd.req.arg[2] = LSD(pci_info_dma_t); cmd.req.arg[3] = pci_size; @@ -873,20 +910,22 @@ int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *adapter, err = -EIO; } + qlcnic_free_mbx_args(&cmd); +out_free_dma: dma_free_coherent(&adapter->pdev->dev, pci_size, pci_info_addr, pci_info_dma_t); - qlcnic_free_mbx_args(&cmd); return err; } /* Configure eSwitch for port mirroring */ int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id, - u8 enable_mirroring, u8 pci_func) + u8 enable_mirroring, u8 pci_func) { + struct device *dev = &adapter->pdev->dev; + struct qlcnic_cmd_args cmd; int err = -EIO; u32 arg1; - struct qlcnic_cmd_args cmd; if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC || !(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE)) @@ -895,18 +934,20 @@ int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id, arg1 = id | (enable_mirroring ? BIT_4 : 0); arg1 |= pci_func << 8; - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_PORTMIRRORING); + err = qlcnic_alloc_mbx_args(&cmd, adapter, + QLCNIC_CMD_SET_PORTMIRRORING); + if (err) + return err; + cmd.req.arg[1] = arg1; err = qlcnic_issue_cmd(adapter, &cmd); if (err != QLCNIC_RCODE_SUCCESS) - dev_err(&adapter->pdev->dev, - "Failed to configure port mirroring%d on eswitch:%d\n", + dev_err(dev, "Failed to configure port mirroring for vNIC function %d on eSwitch %d\n", pci_func, id); else - dev_info(&adapter->pdev->dev, - "Configured eSwitch %d for port mirroring:%d\n", - id, pci_func); + dev_info(dev, "Configured port mirroring for vNIC function %d on eSwitch %d\n", + pci_func, id); qlcnic_free_mbx_args(&cmd); return err; @@ -941,7 +982,11 @@ int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func, arg1 = func | QLCNIC_STATS_VERSION << 8 | QLCNIC_STATS_PORT << 12; arg1 |= rx_tx << 15 | stats_size << 16; - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_ESWITCH_STATS); + err = qlcnic_alloc_mbx_args(&cmd, adapter, + QLCNIC_CMD_GET_ESWITCH_STATS); + if (err) + goto out_free_dma; + cmd.req.arg[1] = arg1; cmd.req.arg[2] = MSD(stats_dma_t); cmd.req.arg[3] = LSD(stats_dma_t); @@ -963,9 +1008,10 @@ int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func, esw_stats->numbytes = le64_to_cpu(stats->numbytes); } - dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr, - stats_dma_t); qlcnic_free_mbx_args(&cmd); +out_free_dma: + dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr, + stats_dma_t); return err; } @@ -989,7 +1035,10 @@ int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter, if (!stats_addr) return -ENOMEM; - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_MAC_STATS); + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_MAC_STATS); + if (err) + goto out_free_dma; + cmd.req.arg[1] = stats_size << 16; cmd.req.arg[2] = MSD(stats_dma_t); cmd.req.arg[3] = LSD(stats_dma_t); @@ -1020,11 +1069,12 @@ int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter, "%s: Get mac stats failed, err=%d.\n", __func__, err); } - dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr, - stats_dma_t); - qlcnic_free_mbx_args(&cmd); +out_free_dma: + dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr, + stats_dma_t); + return err; } @@ -1108,7 +1158,11 @@ int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, const u8 func_esw, arg1 = port | QLCNIC_STATS_VERSION << 8 | func_esw << 12; arg1 |= BIT_14 | rx_tx << 15; - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_ESWITCH_STATS); + err = qlcnic_alloc_mbx_args(&cmd, adapter, + QLCNIC_CMD_GET_ESWITCH_STATS); + if (err) + return err; + cmd.req.arg[1] = arg1; err = qlcnic_issue_cmd(adapter, &cmd); qlcnic_free_mbx_args(&cmd); @@ -1121,17 +1175,19 @@ err_ret: return -EIO; } -static int -__qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter, - u32 *arg1, u32 *arg2) +static int __qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter, + u32 *arg1, u32 *arg2) { - int err = -EIO; + struct device *dev = &adapter->pdev->dev; struct qlcnic_cmd_args cmd; - u8 pci_func; - pci_func = (*arg1 >> 8); + u8 pci_func = *arg1 >> 8; + int err; + + err = qlcnic_alloc_mbx_args(&cmd, adapter, + QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG); + if (err) + return err; - qlcnic_alloc_mbx_args(&cmd, adapter, - QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG); cmd.req.arg[1] = *arg1; err = qlcnic_issue_cmd(adapter, &cmd); *arg1 = cmd.rsp.arg[1]; @@ -1139,12 +1195,11 @@ __qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter, qlcnic_free_mbx_args(&cmd); if (err == QLCNIC_RCODE_SUCCESS) - dev_info(&adapter->pdev->dev, - "eSwitch port config for pci func %d\n", pci_func); + dev_info(dev, "Get eSwitch port config for vNIC function %d\n", + pci_func); else - dev_err(&adapter->pdev->dev, - "Failed to get eswitch port config for pci func %d\n", - pci_func); + dev_err(dev, "Failed to get eswitch port config for vNIC function %d\n", + pci_func); return err; } /* Configure eSwitch port @@ -1157,9 +1212,10 @@ op_type = 1 for port vlan_id int qlcnic_config_switch_port(struct qlcnic_adapter *adapter, struct qlcnic_esw_func_cfg *esw_cfg) { + struct device *dev = &adapter->pdev->dev; + struct qlcnic_cmd_args cmd; int err = -EIO, index; u32 arg1, arg2 = 0; - struct qlcnic_cmd_args cmd; u8 pci_func; if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) @@ -1209,18 +1265,22 @@ int qlcnic_config_switch_port(struct qlcnic_adapter *adapter, return err; } - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_ESWITCH); + err = qlcnic_alloc_mbx_args(&cmd, adapter, + QLCNIC_CMD_CONFIGURE_ESWITCH); + if (err) + return err; + cmd.req.arg[1] = arg1; cmd.req.arg[2] = arg2; err = qlcnic_issue_cmd(adapter, &cmd); qlcnic_free_mbx_args(&cmd); if (err != QLCNIC_RCODE_SUCCESS) - dev_err(&adapter->pdev->dev, - "Failed to configure eswitch pci func %d\n", pci_func); + dev_err(dev, "Failed to configure eswitch for vNIC function %d\n", + pci_func); else - dev_info(&adapter->pdev->dev, - "Configured eSwitch for pci func %d\n", pci_func); + dev_info(dev, "Configured eSwitch for vNIC function %d\n", + pci_func); return err; } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c index f67652de5a63..700a46324d09 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c @@ -846,7 +846,9 @@ static int qlcnic_irq_test(struct net_device *netdev) goto clear_diag_irq; ahw->diag_cnt = 0; - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INTRPT_TEST); + ret = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INTRPT_TEST); + if (ret) + goto free_diag_res; cmd.req.arg[1] = ahw->pci_func; ret = qlcnic_issue_cmd(adapter, &cmd); @@ -858,6 +860,8 @@ static int qlcnic_irq_test(struct net_device *netdev) done: qlcnic_free_mbx_args(&cmd); + +free_diag_res: qlcnic_diag_free_res(netdev, max_sds_rings); clear_diag_irq: diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h index c0f0c0d0a790..d262211b03b3 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h @@ -672,6 +672,7 @@ enum { #define QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT 10 #define QLCNIC_MAX_MC_COUNT 38 +#define QLCNIC_MAX_UC_COUNT 512 #define QLCNIC_WATCHDOG_TIMEOUTVALUE 5 #define ISR_MSI_INT_TRIGGER(FUNC) (QLCNIC_PCIX_PS_REG(PCIX_MSI_F(FUNC))) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c index 106a12f2a02f..5b5d2edf125d 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c @@ -499,6 +499,7 @@ int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr, u16 vlan) void __qlcnic_set_multi(struct net_device *netdev, u16 vlan) { struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_hardware_context *ahw = adapter->ahw; struct netdev_hw_addr *ha; static const u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff @@ -515,25 +516,30 @@ void __qlcnic_set_multi(struct net_device *netdev, u16 vlan) if (netdev->flags & IFF_PROMISC) { if (!(adapter->flags & QLCNIC_PROMISC_DISABLED)) mode = VPORT_MISS_MODE_ACCEPT_ALL; - goto send_fw_cmd; - } - - if ((netdev->flags & IFF_ALLMULTI) || - (netdev_mc_count(netdev) > adapter->ahw->max_mc_count)) { - mode = VPORT_MISS_MODE_ACCEPT_MULTI; - goto send_fw_cmd; + } else if (netdev->flags & IFF_ALLMULTI) { + if (netdev_mc_count(netdev) > ahw->max_mc_count) { + mode = VPORT_MISS_MODE_ACCEPT_MULTI; + } else if (!netdev_mc_empty(netdev) && + !qlcnic_sriov_vf_check(adapter)) { + netdev_for_each_mc_addr(ha, netdev) + qlcnic_nic_add_mac(adapter, ha->addr, + vlan); + } + if (mode != VPORT_MISS_MODE_ACCEPT_MULTI && + qlcnic_sriov_vf_check(adapter)) + qlcnic_vf_add_mc_list(netdev, vlan); } - if (!netdev_mc_empty(netdev) && !qlcnic_sriov_vf_check(adapter)) { - netdev_for_each_mc_addr(ha, netdev) { + /* configure unicast MAC address, if there is not sufficient space + * to store all the unicast addresses then enable promiscuous mode + */ + if (netdev_uc_count(netdev) > ahw->max_uc_count) { + mode = VPORT_MISS_MODE_ACCEPT_ALL; + } else if (!netdev_uc_empty(netdev)) { + netdev_for_each_uc_addr(ha, netdev) qlcnic_nic_add_mac(adapter, ha->addr, vlan); - } } - if (qlcnic_sriov_vf_check(adapter)) - qlcnic_vf_add_mc_list(netdev, vlan); - -send_fw_cmd: if (!qlcnic_sriov_vf_check(adapter)) { if (mode == VPORT_MISS_MODE_ACCEPT_ALL && !adapter->fdb_mac_learn) { @@ -780,7 +786,8 @@ int qlcnic_82xx_config_hw_lro(struct qlcnic_adapter *adapter, int enable) word = 0; if (enable) { word = QLCNIC_ENABLE_IPV4_LRO | QLCNIC_NO_DEST_IPV4_CHECK; - if (adapter->ahw->capabilities2 & QLCNIC_FW_CAP2_HW_LRO_IPV6) + if (adapter->ahw->extra_capability[0] & + QLCNIC_FW_CAP2_HW_LRO_IPV6) word |= QLCNIC_ENABLE_IPV6_LRO | QLCNIC_NO_DEST_IPV6_CHECK; } @@ -1503,6 +1510,21 @@ int qlcnic_82xx_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate) return rv; } +int qlcnic_get_beacon_state(struct qlcnic_adapter *adapter, u8 *h_state) +{ + struct qlcnic_cmd_args cmd; + int err; + + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_LED_STATUS); + if (!err) { + err = qlcnic_issue_cmd(adapter, &cmd); + if (!err) + *h_state = cmd.rsp.arg[1]; + } + qlcnic_free_mbx_args(&cmd); + return err; +} + void qlcnic_82xx_get_func_no(struct qlcnic_adapter *adapter) { void __iomem *msix_base_addr; @@ -1555,3 +1577,54 @@ void qlcnic_82xx_api_unlock(struct qlcnic_adapter *adapter) { qlcnic_pcie_sem_unlock(adapter, 5); } + +int qlcnic_82xx_shutdown(struct pci_dev *pdev) +{ + struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + int retval; + + netif_device_detach(netdev); + + qlcnic_cancel_idc_work(adapter); + + if (netif_running(netdev)) + qlcnic_down(adapter, netdev); + + qlcnic_clr_all_drv_state(adapter, 0); + + clear_bit(__QLCNIC_RESETTING, &adapter->state); + + retval = pci_save_state(pdev); + if (retval) + return retval; + + if (qlcnic_wol_supported(adapter)) { + pci_enable_wake(pdev, PCI_D3cold, 1); + pci_enable_wake(pdev, PCI_D3hot, 1); + } + + return 0; +} + +int qlcnic_82xx_resume(struct qlcnic_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err; + + err = qlcnic_start_firmware(adapter); + if (err) { + dev_err(&adapter->pdev->dev, "failed to start firmware\n"); + return err; + } + + if (netif_running(netdev)) { + err = qlcnic_up(adapter, netdev); + if (!err) + qlcnic_restore_indev_addr(netdev, NETDEV_UP); + } + + netif_device_attach(netdev); + qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY); + return err; +} diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h index b6818f4356b9..2c22504f57aa 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h @@ -86,7 +86,8 @@ enum qlcnic_regs { #define QLCNIC_CMD_BC_EVENT_SETUP 0x31 #define QLCNIC_CMD_CONFIG_VPORT 0x32 #define QLCNIC_CMD_GET_MAC_STATS 0x37 -#define QLCNIC_CMD_SET_DRV_VER 0x38 +#define QLCNIC_CMD_82XX_SET_DRV_VER 0x38 +#define QLCNIC_CMD_GET_LED_STATUS 0x3C #define QLCNIC_CMD_CONFIGURE_RSS 0x41 #define QLCNIC_CMD_CONFIG_INTR_COAL 0x43 #define QLCNIC_CMD_CONFIGURE_LED 0x44 @@ -102,6 +103,7 @@ enum qlcnic_regs { #define QLCNIC_CMD_GET_LINK_STATUS 0x68 #define QLCNIC_CMD_SET_LED_CONFIG 0x69 #define QLCNIC_CMD_GET_LED_CONFIG 0x6A +#define QLCNIC_CMD_83XX_SET_DRV_VER 0x6F #define QLCNIC_CMD_ADD_RCV_RINGS 0x0B #define QLCNIC_INTRPT_INTX 1 @@ -197,4 +199,8 @@ void qlcnic_82xx_api_unlock(struct qlcnic_adapter *); void qlcnic_82xx_napi_enable(struct qlcnic_adapter *); void qlcnic_82xx_napi_disable(struct qlcnic_adapter *); void qlcnic_82xx_napi_del(struct qlcnic_adapter *); +int qlcnic_82xx_shutdown(struct pci_dev *); +int qlcnic_82xx_resume(struct qlcnic_adapter *); +void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed); +void qlcnic_fw_poll_work(struct work_struct *work); #endif /* __QLCNIC_HW_H_ */ diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index aeb26a850679..4528f8ec333b 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -52,10 +52,6 @@ int qlcnic_load_fw_file; MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file)"); module_param_named(load_fw_file, qlcnic_load_fw_file, int, 0444); -int qlcnic_config_npars; -module_param(qlcnic_config_npars, int, 0444); -MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled)"); - static int qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent); static void qlcnic_remove(struct pci_dev *pdev); static int qlcnic_open(struct net_device *netdev); @@ -63,13 +59,11 @@ static int qlcnic_close(struct net_device *netdev); static void qlcnic_tx_timeout(struct net_device *netdev); static void qlcnic_attach_work(struct work_struct *work); static void qlcnic_fwinit_work(struct work_struct *work); -static void qlcnic_fw_poll_work(struct work_struct *work); #ifdef CONFIG_NET_POLL_CONTROLLER static void qlcnic_poll_controller(struct net_device *netdev); #endif static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding); -static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8); static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter); static irqreturn_t qlcnic_tmp_intr(int irq, void *data); @@ -364,12 +358,15 @@ static int qlcnic_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], return ndo_dflt_fdb_del(ndm, tb, netdev, addr); if (adapter->flags & QLCNIC_ESWITCH_ENABLED) { - if (is_unicast_ether_addr(addr)) - err = qlcnic_nic_del_mac(adapter, addr); - else if (is_multicast_ether_addr(addr)) + if (is_unicast_ether_addr(addr)) { + err = dev_uc_del(netdev, addr); + if (!err) + err = qlcnic_nic_del_mac(adapter, addr); + } else if (is_multicast_ether_addr(addr)) { err = dev_mc_del(netdev, addr); - else + } else { err = -EINVAL; + } } return err; } @@ -392,12 +389,16 @@ static int qlcnic_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], if (ether_addr_equal(addr, adapter->mac_addr)) return err; - if (is_unicast_ether_addr(addr)) - err = qlcnic_nic_add_mac(adapter, addr, 0); - else if (is_multicast_ether_addr(addr)) + if (is_unicast_ether_addr(addr)) { + if (netdev_uc_count(netdev) < adapter->ahw->max_uc_count) + err = dev_uc_add_excl(netdev, addr); + else + err = -ENOMEM; + } else if (is_multicast_ether_addr(addr)) { err = dev_mc_add_excl(netdev, addr); - else + } else { err = -EINVAL; + } return err; } @@ -449,6 +450,7 @@ static const struct net_device_ops qlcnic_netdev_ops = { .ndo_set_vf_tx_rate = qlcnic_sriov_set_vf_tx_rate, .ndo_get_vf_config = qlcnic_sriov_get_vf_config, .ndo_set_vf_vlan = qlcnic_sriov_set_vf_vlan, + .ndo_set_vf_spoofchk = qlcnic_sriov_set_vf_spoofchk, #endif }; @@ -465,6 +467,8 @@ static struct qlcnic_nic_template qlcnic_ops = { .napi_add = qlcnic_82xx_napi_add, .napi_del = qlcnic_82xx_napi_del, .config_ipaddr = qlcnic_82xx_config_ipaddr, + .shutdown = qlcnic_82xx_shutdown, + .resume = qlcnic_82xx_resume, .clear_legacy_intr = qlcnic_82xx_clear_legacy_intr, }; @@ -508,6 +512,7 @@ static struct qlcnic_hardware_ops qlcnic_hw_ops = { .config_promisc_mode = qlcnic_82xx_nic_set_promisc, .change_l2_filter = qlcnic_82xx_change_filter, .get_board_info = qlcnic_82xx_get_board_info, + .set_mac_filter_count = qlcnic_82xx_set_mac_filter_count, .free_mac_list = qlcnic_82xx_free_mac_list, }; @@ -768,7 +773,7 @@ static int qlcnic_set_function_modes(struct qlcnic_adapter *adapter) { u8 id; - int i, ret = 1; + int ret; u32 data = QLCNIC_MGMT_FUNC; struct qlcnic_hardware_context *ahw = adapter->ahw; @@ -776,20 +781,10 @@ qlcnic_set_function_modes(struct qlcnic_adapter *adapter) if (ret) goto err_lock; - if (qlcnic_config_npars) { - for (i = 0; i < ahw->act_pci_func; i++) { - id = adapter->npars[i].pci_func; - if (id == ahw->pci_func) - continue; - data |= (qlcnic_config_npars & - QLC_DEV_SET_DRV(0xf, id)); - } - } else { - data = QLC_SHARED_REG_RD32(adapter, QLCNIC_DRV_OP_MODE); - data = (data & ~QLC_DEV_SET_DRV(0xf, ahw->pci_func)) | - (QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC, - ahw->pci_func)); - } + id = ahw->pci_func; + data = QLC_SHARED_REG_RD32(adapter, QLCNIC_DRV_OP_MODE); + data = (data & ~QLC_DEV_SET_DRV(0xf, id)) | + QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC, id); QLC_SHARED_REG_WR32(adapter, QLCNIC_DRV_OP_MODE, data); qlcnic_api_unlock(adapter); err_lock: @@ -875,6 +870,27 @@ static int qlcnic_setup_pci_map(struct pci_dev *pdev, return 0; } +static inline bool qlcnic_validate_subsystem_id(struct qlcnic_adapter *adapter, + int index) +{ + struct pci_dev *pdev = adapter->pdev; + unsigned short subsystem_vendor; + bool ret = true; + + subsystem_vendor = pdev->subsystem_vendor; + + if (pdev->device == PCI_DEVICE_ID_QLOGIC_QLE824X || + pdev->device == PCI_DEVICE_ID_QLOGIC_QLE834X) { + if (qlcnic_boards[index].sub_vendor == subsystem_vendor && + qlcnic_boards[index].sub_device == pdev->subsystem_device) + ret = true; + else + ret = false; + } + + return ret; +} + static void qlcnic_get_board_name(struct qlcnic_adapter *adapter, char *name) { struct pci_dev *pdev = adapter->pdev; @@ -882,20 +898,18 @@ static void qlcnic_get_board_name(struct qlcnic_adapter *adapter, char *name) for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) { if (qlcnic_boards[i].vendor == pdev->vendor && - qlcnic_boards[i].device == pdev->device && - qlcnic_boards[i].sub_vendor == pdev->subsystem_vendor && - qlcnic_boards[i].sub_device == pdev->subsystem_device) { - sprintf(name, "%pM: %s" , - adapter->mac_addr, - qlcnic_boards[i].short_name); - found = 1; - break; + qlcnic_boards[i].device == pdev->device && + qlcnic_validate_subsystem_id(adapter, i)) { + found = 1; + break; } - } if (!found) sprintf(name, "%pM Gigabit Ethernet", adapter->mac_addr); + else + sprintf(name, "%pM: %s" , adapter->mac_addr, + qlcnic_boards[i].short_name); } static void @@ -980,7 +994,7 @@ qlcnic_initialize_nic(struct qlcnic_adapter *adapter) if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS) { u32 temp; temp = QLCRD32(adapter, CRB_FW_CAPABILITIES_2); - adapter->ahw->capabilities2 = temp; + adapter->ahw->extra_capability[0] = temp; } adapter->ahw->max_mac_filters = nic_info.max_mac_filters; adapter->ahw->max_mtu = nic_info.max_mtu; @@ -1395,16 +1409,23 @@ qlcnic_request_irq(struct qlcnic_adapter *adapter) for (ring = 0; ring < num_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; if (qlcnic_82xx_check(adapter) && - (ring == (num_sds_rings - 1))) - snprintf(sds_ring->name, - sizeof(sds_ring->name), - "qlcnic-%s[Tx0+Rx%d]", - netdev->name, ring); - else + (ring == (num_sds_rings - 1))) { + if (!(adapter->flags & + QLCNIC_MSIX_ENABLED)) + snprintf(sds_ring->name, + sizeof(sds_ring->name), + "qlcnic"); + else + snprintf(sds_ring->name, + sizeof(sds_ring->name), + "%s-tx-0-rx-%d", + netdev->name, ring); + } else { snprintf(sds_ring->name, sizeof(sds_ring->name), - "qlcnic-%s[Rx%d]", + "%s-rx-%d", netdev->name, ring); + } err = request_irq(sds_ring->irq, handler, flags, sds_ring->name, sds_ring); if (err) @@ -1419,7 +1440,7 @@ qlcnic_request_irq(struct qlcnic_adapter *adapter) ring++) { tx_ring = &adapter->tx_ring[ring]; snprintf(tx_ring->name, sizeof(tx_ring->name), - "qlcnic-%s[Tx%d]", netdev->name, ring); + "%s-tx-%d", netdev->name, ring); err = request_irq(tx_ring->irq, handler, flags, tx_ring->name, tx_ring); if (err) @@ -1465,7 +1486,7 @@ static void qlcnic_get_lro_mss_capability(struct qlcnic_adapter *adapter) u32 capab = 0; if (qlcnic_82xx_check(adapter)) { - if (adapter->ahw->capabilities2 & + if (adapter->ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_LRO_MAX_TCP_SEG) adapter->flags |= QLCNIC_FW_LRO_MSS_CAP; } else { @@ -1816,6 +1837,22 @@ qlcnic_reset_context(struct qlcnic_adapter *adapter) return err; } +void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + u16 act_pci_fn = ahw->act_pci_func; + u16 count; + + ahw->max_mc_count = QLCNIC_MAX_MC_COUNT; + if (act_pci_fn <= 2) + count = (QLCNIC_MAX_UC_COUNT - QLCNIC_MAX_MC_COUNT) / + act_pci_fn; + else + count = (QLCNIC_LB_MAX_FILTERS - QLCNIC_MAX_MC_COUNT) / + act_pci_fn; + ahw->max_uc_count = count; +} + int qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev, int pci_using_dac) @@ -1825,7 +1862,7 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev, adapter->rx_csum = 1; adapter->ahw->mc_enabled = 0; - adapter->ahw->max_mc_count = QLCNIC_MAX_MC_COUNT; + qlcnic_set_mac_filter_count(adapter); netdev->netdev_ops = &qlcnic_netdev_ops; netdev->watchdog_timeo = QLCNIC_WATCHDOG_TIMEOUTVALUE * HZ; @@ -1863,6 +1900,7 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev, netdev->features |= NETIF_F_LRO; netdev->hw_features = netdev->features; + netdev->priv_flags |= IFF_UNICAST_FLT; netdev->irq = adapter->msix_entries[0].vector; err = register_netdev(netdev); @@ -1947,6 +1985,21 @@ int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter, return 0; } +void qlcnic_set_drv_version(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + u32 fw_cmd = 0; + + if (qlcnic_82xx_check(adapter)) + fw_cmd = QLCNIC_CMD_82XX_SET_DRV_VER; + else if (qlcnic_83xx_check(adapter)) + fw_cmd = QLCNIC_CMD_83XX_SET_DRV_VER; + + if ((ahw->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS) && + (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_SET_DRV_VER)) + qlcnic_fw_cmd_set_drv_version(adapter, fw_cmd); +} + static int qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -1954,7 +2007,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct qlcnic_adapter *adapter = NULL; struct qlcnic_hardware_context *ahw; int err, pci_using_dac = -1; - u32 capab2; char board_name[QLCNIC_MAX_BOARD_NAME_LEN + 19]; /* MAC + ": " + name */ if (pdev->is_virtfn) @@ -2109,13 +2161,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto err_out_disable_mbx_intr; - if (qlcnic_82xx_check(adapter)) { - if (ahw->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS) { - capab2 = QLCRD32(adapter, CRB_FW_CAPABILITIES_2); - if (capab2 & QLCNIC_FW_CAPABILITY_2_OCBB) - qlcnic_fw_cmd_set_drv_version(adapter); - } - } + qlcnic_set_drv_version(adapter); pci_set_drvdata(pdev, adapter); @@ -2231,37 +2277,6 @@ static void qlcnic_remove(struct pci_dev *pdev) kfree(ahw); free_netdev(netdev); } -static int __qlcnic_shutdown(struct pci_dev *pdev) -{ - struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); - struct net_device *netdev = adapter->netdev; - int retval; - - netif_device_detach(netdev); - - qlcnic_cancel_idc_work(adapter); - - if (netif_running(netdev)) - qlcnic_down(adapter, netdev); - - qlcnic_sriov_cleanup(adapter); - if (qlcnic_82xx_check(adapter)) - qlcnic_clr_all_drv_state(adapter, 0); - - clear_bit(__QLCNIC_RESETTING, &adapter->state); - - retval = pci_save_state(pdev); - if (retval) - return retval; - if (qlcnic_82xx_check(adapter)) { - if (qlcnic_wol_supported(adapter)) { - pci_enable_wake(pdev, PCI_D3cold, 1); - pci_enable_wake(pdev, PCI_D3hot, 1); - } - } - - return 0; -} static void qlcnic_shutdown(struct pci_dev *pdev) { @@ -2272,8 +2287,7 @@ static void qlcnic_shutdown(struct pci_dev *pdev) } #ifdef CONFIG_PM -static int -qlcnic_suspend(struct pci_dev *pdev, pm_message_t state) +static int qlcnic_suspend(struct pci_dev *pdev, pm_message_t state) { int retval; @@ -2285,11 +2299,9 @@ qlcnic_suspend(struct pci_dev *pdev, pm_message_t state) return 0; } -static int -qlcnic_resume(struct pci_dev *pdev) +static int qlcnic_resume(struct pci_dev *pdev) { struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); - struct net_device *netdev = adapter->netdev; int err; err = pci_enable_device(pdev); @@ -2300,23 +2312,7 @@ qlcnic_resume(struct pci_dev *pdev) pci_set_master(pdev); pci_restore_state(pdev); - err = qlcnic_start_firmware(adapter); - if (err) { - dev_err(&pdev->dev, "failed to start firmware\n"); - return err; - } - - if (netif_running(netdev)) { - err = qlcnic_up(adapter, netdev); - if (err) - goto done; - - qlcnic_restore_indev_addr(netdev, NETDEV_UP); - } -done: - netif_device_attach(netdev); - qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY); - return 0; + return __qlcnic_resume(adapter); } #endif @@ -2655,8 +2651,7 @@ qlcnic_clr_drv_state(struct qlcnic_adapter *adapter) return 0; } -static void -qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed) +void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed) { u32 val; @@ -3086,6 +3081,7 @@ done: adapter->fw_fail_cnt = 0; adapter->flags &= ~QLCNIC_FW_HANG; clear_bit(__QLCNIC_RESETTING, &adapter->state); + qlcnic_set_drv_version(adapter); if (!qlcnic_clr_drv_state(adapter)) qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, @@ -3166,8 +3162,7 @@ detach: return 1; } -static void -qlcnic_fw_poll_work(struct work_struct *work) +void qlcnic_fw_poll_work(struct work_struct *work) { struct qlcnic_adapter *adapter = container_of(work, struct qlcnic_adapter, fw_work.work); @@ -3219,7 +3214,6 @@ static int qlcnic_attach_func(struct pci_dev *pdev) if (err) return err; - pci_set_power_state(pdev, PCI_D0); pci_set_master(pdev); pci_restore_state(pdev); @@ -3517,7 +3511,7 @@ static int qlcnic_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { struct qlcnic_adapter *adapter; - struct net_device *dev = (struct net_device *)ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); recheck: if (dev == NULL) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c index 4b9bab18ebd9..ab8a6744d402 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c @@ -15,6 +15,7 @@ #define QLC_83XX_MINIDUMP_FLASH 0x520000 #define QLC_83XX_OCM_INDEX 3 #define QLC_83XX_PCI_INDEX 0 +#define QLC_83XX_DMA_ENGINE_INDEX 8 static const u32 qlcnic_ms_read_data[] = { 0x410000A8, 0x410000AC, 0x410000B8, 0x410000BC @@ -32,6 +33,16 @@ static const u32 qlcnic_ms_read_data[] = { #define QLCNIC_DUMP_MASK_MAX 0xff +struct qlcnic_pex_dma_descriptor { + u32 read_data_size; + u32 dma_desc_cmd; + u32 src_addr_low; + u32 src_addr_high; + u32 dma_bus_addr_low; + u32 dma_bus_addr_high; + u32 rsvd[6]; +} __packed; + struct qlcnic_common_entry_hdr { u32 type; u32 offset; @@ -90,7 +101,10 @@ struct __ocm { } __packed; struct __mem { - u8 rsvd[24]; + u32 desc_card_addr; + u32 dma_desc_cmd; + u32 start_dma_cmd; + u32 rsvd[3]; u32 addr; u32 size; } __packed; @@ -466,12 +480,12 @@ skip_poll: return l2->no_ops * l2->read_addr_num * sizeof(u32); } -static u32 qlcnic_read_memory(struct qlcnic_adapter *adapter, - struct qlcnic_dump_entry *entry, __le32 *buffer) +static u32 qlcnic_read_memory_test_agent(struct qlcnic_adapter *adapter, + struct __mem *mem, __le32 *buffer, + int *ret) { - u32 addr, data, test, ret = 0; + u32 addr, data, test; int i, reg_read; - struct __mem *mem = &entry->region.mem; reg_read = mem->size; addr = mem->addr; @@ -480,7 +494,8 @@ static u32 qlcnic_read_memory(struct qlcnic_adapter *adapter, dev_info(&adapter->pdev->dev, "Unaligned memory addr:0x%x size:0x%x\n", addr, reg_read); - return -EINVAL; + *ret = -EINVAL; + return 0; } mutex_lock(&adapter->ahw->mem_lock); @@ -499,7 +514,7 @@ static u32 qlcnic_read_memory(struct qlcnic_adapter *adapter, if (printk_ratelimit()) { dev_err(&adapter->pdev->dev, "failed to read through agent\n"); - ret = -EINVAL; + *ret = -EIO; goto out; } } @@ -516,6 +531,181 @@ out: return mem->size; } +/* DMA register base address */ +#define QLC_DMA_REG_BASE_ADDR(dma_no) (0x77320000 + (dma_no * 0x10000)) + +/* DMA register offsets w.r.t base address */ +#define QLC_DMA_CMD_BUFF_ADDR_LOW 0 +#define QLC_DMA_CMD_BUFF_ADDR_HI 4 +#define QLC_DMA_CMD_STATUS_CTRL 8 + +#define QLC_PEX_DMA_READ_SIZE (PAGE_SIZE * 16) + +static int qlcnic_start_pex_dma(struct qlcnic_adapter *adapter, + struct __mem *mem) +{ + struct qlcnic_dump_template_hdr *tmpl_hdr; + struct device *dev = &adapter->pdev->dev; + u32 dma_no, dma_base_addr, temp_addr; + int i, ret, dma_sts; + + tmpl_hdr = adapter->ahw->fw_dump.tmpl_hdr; + dma_no = tmpl_hdr->saved_state[QLC_83XX_DMA_ENGINE_INDEX]; + dma_base_addr = QLC_DMA_REG_BASE_ADDR(dma_no); + + temp_addr = dma_base_addr + QLC_DMA_CMD_BUFF_ADDR_LOW; + ret = qlcnic_83xx_wrt_reg_indirect(adapter, temp_addr, + mem->desc_card_addr); + if (ret) + return ret; + + temp_addr = dma_base_addr + QLC_DMA_CMD_BUFF_ADDR_HI; + ret = qlcnic_83xx_wrt_reg_indirect(adapter, temp_addr, 0); + if (ret) + return ret; + + temp_addr = dma_base_addr + QLC_DMA_CMD_STATUS_CTRL; + ret = qlcnic_83xx_wrt_reg_indirect(adapter, temp_addr, + mem->start_dma_cmd); + if (ret) + return ret; + + /* Wait for DMA to complete */ + temp_addr = dma_base_addr + QLC_DMA_CMD_STATUS_CTRL; + for (i = 0; i < 400; i++) { + dma_sts = qlcnic_ind_rd(adapter, temp_addr); + + if (dma_sts & BIT_1) + usleep_range(250, 500); + else + break; + } + + if (i >= 400) { + dev_info(dev, "PEX DMA operation timed out"); + ret = -EIO; + } + + return ret; +} + +static u32 qlcnic_read_memory_pexdma(struct qlcnic_adapter *adapter, + struct __mem *mem, + __le32 *buffer, int *ret) +{ + struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; + u32 temp, dma_base_addr, size = 0, read_size = 0; + struct qlcnic_pex_dma_descriptor *dma_descr; + struct qlcnic_dump_template_hdr *tmpl_hdr; + struct device *dev = &adapter->pdev->dev; + dma_addr_t dma_phys_addr; + void *dma_buffer; + + tmpl_hdr = fw_dump->tmpl_hdr; + + /* Check if DMA engine is available */ + temp = tmpl_hdr->saved_state[QLC_83XX_DMA_ENGINE_INDEX]; + dma_base_addr = QLC_DMA_REG_BASE_ADDR(temp); + temp = qlcnic_ind_rd(adapter, + dma_base_addr + QLC_DMA_CMD_STATUS_CTRL); + + if (!(temp & BIT_31)) { + dev_info(dev, "%s: DMA engine is not available\n", __func__); + *ret = -EIO; + return 0; + } + + /* Create DMA descriptor */ + dma_descr = kzalloc(sizeof(struct qlcnic_pex_dma_descriptor), + GFP_KERNEL); + if (!dma_descr) { + *ret = -ENOMEM; + return 0; + } + + /* dma_desc_cmd 0:15 = 0 + * dma_desc_cmd 16:19 = mem->dma_desc_cmd 0:3 + * dma_desc_cmd 20:23 = pci function number + * dma_desc_cmd 24:31 = mem->dma_desc_cmd 8:15 + */ + dma_phys_addr = fw_dump->phys_addr; + dma_buffer = fw_dump->dma_buffer; + temp = 0; + temp = mem->dma_desc_cmd & 0xff0f; + temp |= (adapter->ahw->pci_func & 0xf) << 4; + dma_descr->dma_desc_cmd = (temp << 16) & 0xffff0000; + dma_descr->dma_bus_addr_low = LSD(dma_phys_addr); + dma_descr->dma_bus_addr_high = MSD(dma_phys_addr); + dma_descr->src_addr_high = 0; + + /* Collect memory dump using multiple DMA operations if required */ + while (read_size < mem->size) { + if (mem->size - read_size >= QLC_PEX_DMA_READ_SIZE) + size = QLC_PEX_DMA_READ_SIZE; + else + size = mem->size - read_size; + + dma_descr->src_addr_low = mem->addr + read_size; + dma_descr->read_data_size = size; + + /* Write DMA descriptor to MS memory*/ + temp = sizeof(struct qlcnic_pex_dma_descriptor) / 16; + *ret = qlcnic_83xx_ms_mem_write128(adapter, mem->desc_card_addr, + (u32 *)dma_descr, temp); + if (*ret) { + dev_info(dev, "Failed to write DMA descriptor to MS memory at address 0x%x\n", + mem->desc_card_addr); + goto free_dma_descr; + } + + *ret = qlcnic_start_pex_dma(adapter, mem); + if (*ret) { + dev_info(dev, "Failed to start PEX DMA operation\n"); + goto free_dma_descr; + } + + memcpy(buffer, dma_buffer, size); + buffer += size / 4; + read_size += size; + } + +free_dma_descr: + kfree(dma_descr); + + return read_size; +} + +static u32 qlcnic_read_memory(struct qlcnic_adapter *adapter, + struct qlcnic_dump_entry *entry, __le32 *buffer) +{ + struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; + struct device *dev = &adapter->pdev->dev; + struct __mem *mem = &entry->region.mem; + u32 data_size; + int ret = 0; + + if (fw_dump->use_pex_dma) { + data_size = qlcnic_read_memory_pexdma(adapter, mem, buffer, + &ret); + if (ret) + dev_info(dev, + "Failed to read memory dump using PEX DMA: mask[0x%x]\n", + entry->hdr.mask); + else + return data_size; + } + + data_size = qlcnic_read_memory_test_agent(adapter, mem, buffer, &ret); + if (ret) { + dev_info(dev, + "Failed to read memory dump using test agent method: mask[0x%x]\n", + entry->hdr.mask); + return 0; + } else { + return data_size; + } +} + static u32 qlcnic_dump_nop(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry, __le32 *buffer) { @@ -893,6 +1083,12 @@ flash_temp: tmpl_hdr = ahw->fw_dump.tmpl_hdr; tmpl_hdr->drv_cap_mask = QLCNIC_DUMP_MASK_DEF; + + if ((tmpl_hdr->version & 0xffffff) >= 0x20001) + ahw->fw_dump.use_pex_dma = true; + else + ahw->fw_dump.use_pex_dma = false; + ahw->fw_dump.enable = 1; return 0; @@ -910,7 +1106,9 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter) struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; struct qlcnic_dump_template_hdr *tmpl_hdr = fw_dump->tmpl_hdr; static const struct qlcnic_dump_operations *fw_dump_ops; + struct device *dev = &adapter->pdev->dev; struct qlcnic_hardware_context *ahw; + void *temp_buffer; ahw = adapter->ahw; @@ -944,6 +1142,16 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter) tmpl_hdr->sys_info[0] = QLCNIC_DRIVER_VERSION; tmpl_hdr->sys_info[1] = adapter->fw_version; + if (fw_dump->use_pex_dma) { + temp_buffer = dma_alloc_coherent(dev, QLC_PEX_DMA_READ_SIZE, + &fw_dump->phys_addr, + GFP_KERNEL); + if (!temp_buffer) + fw_dump->use_pex_dma = false; + else + fw_dump->dma_buffer = temp_buffer; + } + if (qlcnic_82xx_check(adapter)) { ops_cnt = ARRAY_SIZE(qlcnic_fw_dump_ops); fw_dump_ops = qlcnic_fw_dump_ops; @@ -1002,6 +1210,9 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter) return 0; } error: + if (fw_dump->use_pex_dma) + dma_free_coherent(dev, QLC_PEX_DMA_READ_SIZE, + fw_dump->dma_buffer, fw_dump->phys_addr); vfree(fw_dump->data); return -EINVAL; } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h index d85fbb57c25b..0daf660e12a1 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h @@ -129,6 +129,7 @@ struct qlcnic_vport { u8 vlan_mode; u16 vlan; u8 qos; + bool spoofchk; u8 mac[6]; }; @@ -194,6 +195,8 @@ int __qlcnic_sriov_add_act_list(struct qlcnic_sriov *, struct qlcnic_vf_info *, int qlcnic_sriov_get_vf_vport_info(struct qlcnic_adapter *, struct qlcnic_info *, u16); int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *, u16, u8); +int qlcnic_sriov_vf_shutdown(struct pci_dev *); +int qlcnic_sriov_vf_resume(struct qlcnic_adapter *); static inline bool qlcnic_sriov_enable_check(struct qlcnic_adapter *adapter) { @@ -225,6 +228,7 @@ int qlcnic_sriov_set_vf_tx_rate(struct net_device *, int, int); int qlcnic_sriov_get_vf_config(struct net_device *, int , struct ifla_vf_info *); int qlcnic_sriov_set_vf_vlan(struct net_device *, int, u16, u8); +int qlcnic_sriov_set_vf_spoofchk(struct net_device *, int, bool); #else static inline void qlcnic_sriov_pf_disable(struct qlcnic_adapter *adapter) {} static inline void qlcnic_sriov_pf_cleanup(struct qlcnic_adapter *adapter) {} diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c index 8b59a710a4a5..62380ce89905 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c @@ -35,6 +35,7 @@ static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *); static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *); static int qlcnic_sriov_vf_mbx_op(struct qlcnic_adapter *, struct qlcnic_cmd_args *); +static void qlcnic_sriov_process_bc_cmd(struct work_struct *); static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = { .read_crb = qlcnic_83xx_read_crb, @@ -75,6 +76,8 @@ static struct qlcnic_nic_template qlcnic_sriov_vf_ops = { .cancel_idc_work = qlcnic_sriov_vf_cancel_fw_work, .napi_add = qlcnic_83xx_napi_add, .napi_del = qlcnic_83xx_napi_del, + .shutdown = qlcnic_sriov_vf_shutdown, + .resume = qlcnic_sriov_vf_resume, .config_ipaddr = qlcnic_83xx_config_ipaddr, .clear_legacy_intr = qlcnic_83xx_clear_legacy_intr, }; @@ -179,6 +182,8 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs) spin_lock_init(&vf->rcv_pend.lock); init_completion(&vf->ch_free_cmpl); + INIT_WORK(&vf->trans_work, qlcnic_sriov_process_bc_cmd); + if (qlcnic_sriov_pf_check(adapter)) { vp = kzalloc(sizeof(struct qlcnic_vport), GFP_KERNEL); if (!vp) { @@ -187,6 +192,7 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs) } sriov->vf_info[i].vp = vp; vp->max_tx_bw = MAX_BW; + vp->spoofchk = true; random_ether_addr(vp->mac); dev_info(&adapter->pdev->dev, "MAC Address %pM is configured for VF %d\n", @@ -652,6 +658,8 @@ int qlcnic_sriov_vf_init(struct qlcnic_adapter *adapter, int pci_using_dac) if (qlcnic_read_mac_addr(adapter)) dev_warn(&adapter->pdev->dev, "failed to read mac addr\n"); + INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work); + clear_bit(__QLCNIC_RESETTING, &adapter->state); return 0; } @@ -864,7 +872,6 @@ static void qlcnic_sriov_schedule_bc_cmd(struct qlcnic_sriov *sriov, vf->adapter->need_fw_reset) return; - INIT_WORK(&vf->trans_work, func); queue_work(sriov->bc.bc_trans_wq, &vf->trans_work); } @@ -1949,3 +1956,54 @@ static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *adapter) kfree(cur); } } + +int qlcnic_sriov_vf_shutdown(struct pci_dev *pdev) +{ + struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + int retval; + + netif_device_detach(netdev); + qlcnic_cancel_idc_work(adapter); + + if (netif_running(netdev)) + qlcnic_down(adapter, netdev); + + qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM); + qlcnic_sriov_cfg_bc_intr(adapter, 0); + qlcnic_83xx_disable_mbx_intr(adapter); + cancel_delayed_work_sync(&adapter->idc_aen_work); + + retval = pci_save_state(pdev); + if (retval) + return retval; + + return 0; +} + +int qlcnic_sriov_vf_resume(struct qlcnic_adapter *adapter) +{ + struct qlc_83xx_idc *idc = &adapter->ahw->idc; + struct net_device *netdev = adapter->netdev; + int err; + + set_bit(QLC_83XX_MODULE_LOADED, &idc->status); + qlcnic_83xx_enable_mbx_intrpt(adapter); + err = qlcnic_sriov_cfg_bc_intr(adapter, 1); + if (err) + return err; + + err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT); + if (!err) { + if (netif_running(netdev)) { + err = qlcnic_up(adapter, netdev); + if (!err) + qlcnic_restore_indev_addr(netdev, NETDEV_UP); + } + } + + netif_device_attach(netdev); + qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state, + idc->delay); + return err; +} diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c index 1a66ccded235..ee0c1d307966 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c @@ -580,6 +580,7 @@ static int qlcnic_sriov_set_vf_acl(struct qlcnic_adapter *adapter, u8 func) struct qlcnic_cmd_args cmd; struct qlcnic_vport *vp; int err, id; + u8 *mac; id = qlcnic_sriov_func_to_index(adapter, func); if (id < 0) @@ -591,6 +592,14 @@ static int qlcnic_sriov_set_vf_acl(struct qlcnic_adapter *adapter, u8 func) return err; cmd.req.arg[1] = 0x3 | func << 16; + if (vp->spoofchk == true) { + mac = vp->mac; + cmd.req.arg[2] |= BIT_1 | BIT_3 | BIT_8; + cmd.req.arg[4] = mac[5] | mac[4] << 8 | mac[3] << 16 | + mac[2] << 24; + cmd.req.arg[5] = mac[1] | mac[0] << 8; + } + if (vp->vlan_mode == QLC_PVID_MODE) { cmd.req.arg[2] |= BIT_6; cmd.req.arg[3] |= vp->vlan << 8; @@ -1767,6 +1776,7 @@ int qlcnic_sriov_get_vf_config(struct net_device *netdev, memcpy(&ivi->mac, vp->mac, ETH_ALEN); ivi->vlan = vp->vlan; ivi->qos = vp->qos; + ivi->spoofchk = vp->spoofchk; if (vp->max_tx_bw == MAX_BW) ivi->tx_rate = 0; else @@ -1775,3 +1785,29 @@ int qlcnic_sriov_get_vf_config(struct net_device *netdev, ivi->vf = vf; return 0; } + +int qlcnic_sriov_set_vf_spoofchk(struct net_device *netdev, int vf, bool chk) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_sriov *sriov = adapter->ahw->sriov; + struct qlcnic_vf_info *vf_info; + struct qlcnic_vport *vp; + + if (!qlcnic_sriov_pf_check(adapter)) + return -EOPNOTSUPP; + + if (vf >= sriov->num_vfs) + return -EINVAL; + + vf_info = &sriov->vf_info[vf]; + vp = vf_info->vp; + if (test_bit(QLC_BC_VF_STATE, &vf_info->state)) { + netdev_err(netdev, + "Spoof check change failed for VF %d, as VF driver is loaded. Please unload VF driver and retry the operation\n", + vf); + return -EOPNOTSUPP; + } + + vp->spoofchk = chk; + return 0; +} diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c index e7a2fe21b649..10ed82b3baca 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c @@ -47,7 +47,7 @@ static ssize_t qlcnic_store_bridged_mode(struct device *dev, if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) goto err_out; - if (strict_strtoul(buf, 2, &new)) + if (kstrtoul(buf, 2, &new)) goto err_out; if (!qlcnic_config_bridged_mode(adapter, !!new)) @@ -77,7 +77,7 @@ static ssize_t qlcnic_store_diag_mode(struct device *dev, struct qlcnic_adapter *adapter = dev_get_drvdata(dev); unsigned long new; - if (strict_strtoul(buf, 2, &new)) + if (kstrtoul(buf, 2, &new)) return -EINVAL; if (!!new != !!(adapter->flags & QLCNIC_DIAG_ENABLED)) @@ -114,57 +114,51 @@ static int qlcnic_validate_beacon(struct qlcnic_adapter *adapter, u16 beacon, return 0; } -static ssize_t qlcnic_store_beacon(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t len) +static int qlcnic_83xx_store_beacon(struct qlcnic_adapter *adapter, + const char *buf, size_t len) { - struct qlcnic_adapter *adapter = dev_get_drvdata(dev); struct qlcnic_hardware_context *ahw = adapter->ahw; - int err, max_sds_rings = adapter->max_sds_rings; - u16 beacon; - u8 b_state, b_rate; unsigned long h_beacon; + int err; - if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) { - dev_warn(dev, - "LED test not supported in non privileged mode\n"); - return -EOPNOTSUPP; - } + if (test_bit(__QLCNIC_RESETTING, &adapter->state)) + return -EIO; - if (qlcnic_83xx_check(adapter) && - !test_bit(__QLCNIC_RESETTING, &adapter->state)) { - if (kstrtoul(buf, 2, &h_beacon)) - return -EINVAL; + if (kstrtoul(buf, 2, &h_beacon)) + return -EINVAL; - if (ahw->beacon_state == h_beacon) - return len; + if (ahw->beacon_state == h_beacon) + return len; - rtnl_lock(); - if (!ahw->beacon_state) { - if (test_and_set_bit(__QLCNIC_LED_ENABLE, - &adapter->state)) { - rtnl_unlock(); - return -EBUSY; - } - } - if (h_beacon) { - err = qlcnic_83xx_config_led(adapter, 1, h_beacon); - if (err) - goto beacon_err; - } else { - err = qlcnic_83xx_config_led(adapter, 0, !h_beacon); - if (err) - goto beacon_err; + rtnl_lock(); + if (!ahw->beacon_state) { + if (test_and_set_bit(__QLCNIC_LED_ENABLE, &adapter->state)) { + rtnl_unlock(); + return -EBUSY; } - /* set the current beacon state */ + } + + if (h_beacon) + err = qlcnic_83xx_config_led(adapter, 1, h_beacon); + else + err = qlcnic_83xx_config_led(adapter, 0, !h_beacon); + if (!err) ahw->beacon_state = h_beacon; -beacon_err: - if (!ahw->beacon_state) - clear_bit(__QLCNIC_LED_ENABLE, &adapter->state); - rtnl_unlock(); - return len; - } + if (!ahw->beacon_state) + clear_bit(__QLCNIC_LED_ENABLE, &adapter->state); + + rtnl_unlock(); + return len; +} + +static int qlcnic_82xx_store_beacon(struct qlcnic_adapter *adapter, + const char *buf, size_t len) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + int err, max_sds_rings = adapter->max_sds_rings; + u16 beacon; + u8 h_beacon_state, b_state, b_rate; if (len != sizeof(u16)) return QL_STATUS_INVALID_PARAM; @@ -174,16 +168,29 @@ beacon_err: if (err) return err; - if (adapter->ahw->beacon_state == b_state) + if (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_BEACON) { + err = qlcnic_get_beacon_state(adapter, &h_beacon_state); + if (!err) { + dev_info(&adapter->pdev->dev, + "Failed to get current beacon state\n"); + } else { + if (h_beacon_state == QLCNIC_BEACON_DISABLE) + ahw->beacon_state = 0; + else if (h_beacon_state == QLCNIC_BEACON_EANBLE) + ahw->beacon_state = 2; + } + } + + if (ahw->beacon_state == b_state) return len; rtnl_lock(); - - if (!adapter->ahw->beacon_state) + if (!ahw->beacon_state) { if (test_and_set_bit(__QLCNIC_LED_ENABLE, &adapter->state)) { rtnl_unlock(); return -EBUSY; } + } if (test_bit(__QLCNIC_RESETTING, &adapter->state)) { err = -EIO; @@ -206,14 +213,37 @@ beacon_err: if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state)) qlcnic_diag_free_res(adapter->netdev, max_sds_rings); - out: - if (!adapter->ahw->beacon_state) +out: + if (!ahw->beacon_state) clear_bit(__QLCNIC_LED_ENABLE, &adapter->state); rtnl_unlock(); return err; } +static ssize_t qlcnic_store_beacon(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + int err = 0; + + if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) { + dev_warn(dev, + "LED test not supported in non privileged mode\n"); + return -EOPNOTSUPP; + } + + if (qlcnic_82xx_check(adapter)) + err = qlcnic_82xx_store_beacon(adapter, buf, len); + else if (qlcnic_83xx_check(adapter)) + err = qlcnic_83xx_store_beacon(adapter, buf, len); + else + return -EIO; + + return err; +} + static ssize_t qlcnic_show_beacon(struct device *dev, struct device_attribute *attr, char *buf) { diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index f87cc216045b..2553cf4503b9 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -4946,15 +4946,4 @@ static struct pci_driver qlge_driver = { .err_handler = &qlge_err_handler }; -static int __init qlge_init_module(void) -{ - return pci_register_driver(&qlge_driver); -} - -static void __exit qlge_exit(void) -{ - pci_unregister_driver(&qlge_driver); -} - -module_init(qlge_init_module); -module_exit(qlge_exit); +module_pci_driver(qlge_driver); diff --git a/drivers/net/ethernet/rdc/Kconfig b/drivers/net/ethernet/rdc/Kconfig index c8ba4b3494c1..2055f7eb2ba9 100644 --- a/drivers/net/ethernet/rdc/Kconfig +++ b/drivers/net/ethernet/rdc/Kconfig @@ -22,7 +22,6 @@ config R6040 tristate "RDC R6040 Fast Ethernet Adapter support" depends on PCI select CRC32 - select NET_CORE select MII select PHYLIB ---help--- diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index 03523459c406..e6acb9fa5767 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c @@ -1817,7 +1817,7 @@ static int cp_set_eeprom(struct net_device *dev, /* Put the board into D3cold state and wait for WakeUp signal */ static void cp_set_d3_state (struct cp_private *cp) { - pci_enable_wake (cp->pdev, 0, 1); /* Enable PME# generation */ + pci_enable_wake(cp->pdev, PCI_D0, 1); /* Enable PME# generation */ pci_set_power_state (cp->pdev, PCI_D3hot); } diff --git a/drivers/net/ethernet/realtek/Kconfig b/drivers/net/ethernet/realtek/Kconfig index 783fa8b5cde7..ae5d027096ed 100644 --- a/drivers/net/ethernet/realtek/Kconfig +++ b/drivers/net/ethernet/realtek/Kconfig @@ -37,7 +37,6 @@ config 8139CP tristate "RealTek RTL-8139 C+ PCI Fast Ethernet Adapter support" depends on PCI select CRC32 - select NET_CORE select MII ---help--- This is a driver for the Fast Ethernet PCI network cards based on @@ -52,7 +51,6 @@ config 8139TOO tristate "RealTek RTL-8129/8130/8139 PCI Fast Ethernet Adapter support" depends on PCI select CRC32 - select NET_CORE select MII ---help--- This is a driver for the Fast Ethernet PCI network cards based on @@ -107,7 +105,6 @@ config R8169 depends on PCI select FW_LOADER select CRC32 - select NET_CORE select MII ---help--- Say Y here if you have a Realtek 8169 PCI Gigabit Ethernet adapter. diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 393f961a013c..4106a743ca74 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -46,6 +46,7 @@ #define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw" #define FIRMWARE_8402_1 "rtl_nic/rtl8402-1.fw" #define FIRMWARE_8411_1 "rtl_nic/rtl8411-1.fw" +#define FIRMWARE_8411_2 "rtl_nic/rtl8411-2.fw" #define FIRMWARE_8106E_1 "rtl_nic/rtl8106e-1.fw" #define FIRMWARE_8106E_2 "rtl_nic/rtl8106e-2.fw" #define FIRMWARE_8168G_2 "rtl_nic/rtl8168g-2.fw" @@ -144,6 +145,7 @@ enum mac_version { RTL_GIGA_MAC_VER_41, RTL_GIGA_MAC_VER_42, RTL_GIGA_MAC_VER_43, + RTL_GIGA_MAC_VER_44, RTL_GIGA_MAC_NONE = 0xff, }; @@ -276,6 +278,9 @@ static const struct { [RTL_GIGA_MAC_VER_43] = _R("RTL8106e", RTL_TD_1, FIRMWARE_8106E_2, JUMBO_1K, true), + [RTL_GIGA_MAC_VER_44] = + _R("RTL8411", RTL_TD_1, FIRMWARE_8411_2, + JUMBO_9K, false), }; #undef _R @@ -394,6 +399,7 @@ enum rtl8168_8101_registers { #define CSIAR_FUNC_CARD 0x00000000 #define CSIAR_FUNC_SDIO 0x00010000 #define CSIAR_FUNC_NIC 0x00020000 +#define CSIAR_FUNC_NIC2 0x00010000 PMCH = 0x6f, EPHYAR = 0x80, #define EPHYAR_FLAG 0x80000000 @@ -826,6 +832,7 @@ MODULE_FIRMWARE(FIRMWARE_8168F_1); MODULE_FIRMWARE(FIRMWARE_8168F_2); MODULE_FIRMWARE(FIRMWARE_8402_1); MODULE_FIRMWARE(FIRMWARE_8411_1); +MODULE_FIRMWARE(FIRMWARE_8411_2); MODULE_FIRMWARE(FIRMWARE_8106E_1); MODULE_FIRMWARE(FIRMWARE_8106E_2); MODULE_FIRMWARE(FIRMWARE_8168G_2); @@ -2051,6 +2058,7 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp, int mac_version; } mac_info[] = { /* 8168G family. */ + { 0x7cf00000, 0x5c800000, RTL_GIGA_MAC_VER_44 }, { 0x7cf00000, 0x50900000, RTL_GIGA_MAC_VER_42 }, { 0x7cf00000, 0x4c100000, RTL_GIGA_MAC_VER_41 }, { 0x7cf00000, 0x4c000000, RTL_GIGA_MAC_VER_40 }, @@ -3651,6 +3659,7 @@ static void rtl_hw_phy_config(struct net_device *dev) break; case RTL_GIGA_MAC_VER_42: case RTL_GIGA_MAC_VER_43: + case RTL_GIGA_MAC_VER_44: rtl8168g_2_hw_phy_config(tp); break; @@ -3863,6 +3872,7 @@ static void rtl_init_mdio_ops(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_41: case RTL_GIGA_MAC_VER_42: case RTL_GIGA_MAC_VER_43: + case RTL_GIGA_MAC_VER_44: ops->write = r8168g_mdio_write; ops->read = r8168g_mdio_read; break; @@ -3916,6 +3926,7 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_41: case RTL_GIGA_MAC_VER_42: case RTL_GIGA_MAC_VER_43: + case RTL_GIGA_MAC_VER_44: RTL_W32(RxConfig, RTL_R32(RxConfig) | AcceptBroadcast | AcceptMulticast | AcceptMyPhys); break; @@ -4178,6 +4189,7 @@ static void rtl_init_pll_power_ops(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_40: case RTL_GIGA_MAC_VER_41: case RTL_GIGA_MAC_VER_42: + case RTL_GIGA_MAC_VER_44: ops->down = r8168_pll_power_down; ops->up = r8168_pll_power_up; break; @@ -4224,6 +4236,7 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_41: case RTL_GIGA_MAC_VER_42: case RTL_GIGA_MAC_VER_43: + case RTL_GIGA_MAC_VER_44: RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST | RX_EARLY_OFF); break; default: @@ -4384,6 +4397,7 @@ static void rtl_init_jumbo_ops(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_41: case RTL_GIGA_MAC_VER_42: case RTL_GIGA_MAC_VER_43: + case RTL_GIGA_MAC_VER_44: default: ops->disable = NULL; ops->enable = NULL; @@ -4493,6 +4507,7 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp) tp->mac_version == RTL_GIGA_MAC_VER_41 || tp->mac_version == RTL_GIGA_MAC_VER_42 || tp->mac_version == RTL_GIGA_MAC_VER_43 || + tp->mac_version == RTL_GIGA_MAC_VER_44 || tp->mac_version == RTL_GIGA_MAC_VER_38) { RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq); rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666); @@ -4782,6 +4797,29 @@ static u32 r8402_csi_read(struct rtl8169_private *tp, int addr) RTL_R32(CSIDR) : ~0; } +static void r8411_csi_write(struct rtl8169_private *tp, int addr, int value) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W32(CSIDR, value); + RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) | + CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT | + CSIAR_FUNC_NIC2); + + rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100); +} + +static u32 r8411_csi_read(struct rtl8169_private *tp, int addr) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC2 | + CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT); + + return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ? + RTL_R32(CSIDR) : ~0; +} + static void rtl_init_csi_ops(struct rtl8169_private *tp) { struct csi_ops *ops = &tp->csi_ops; @@ -4811,6 +4849,11 @@ static void rtl_init_csi_ops(struct rtl8169_private *tp) ops->read = r8402_csi_read; break; + case RTL_GIGA_MAC_VER_44: + ops->write = r8411_csi_write; + ops->read = r8411_csi_read; + break; + default: ops->write = r8169_csi_write; ops->read = r8169_csi_read; @@ -5255,6 +5298,25 @@ static void rtl_hw_start_8168g_2(struct rtl8169_private *tp) rtl_ephy_init(tp, e_info_8168g_2, ARRAY_SIZE(e_info_8168g_2)); } +static void rtl_hw_start_8411_2(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + static const struct ephy_info e_info_8411_2[] = { + { 0x00, 0x0000, 0x0008 }, + { 0x0c, 0x3df0, 0x0200 }, + { 0x0f, 0xffff, 0x5200 }, + { 0x19, 0x0020, 0x0000 }, + { 0x1e, 0x0000, 0x2000 } + }; + + rtl_hw_start_8168g_1(tp); + + /* disable aspm and clock request before access ephy */ + RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn); + RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en); + rtl_ephy_init(tp, e_info_8411_2, ARRAY_SIZE(e_info_8411_2)); +} + static void rtl_hw_start_8168(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev); @@ -5361,6 +5423,10 @@ static void rtl_hw_start_8168(struct net_device *dev) rtl_hw_start_8168g_2(tp); break; + case RTL_GIGA_MAC_VER_44: + rtl_hw_start_8411_2(tp); + break; + default: printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n", dev->name, tp->mac_version); @@ -6877,6 +6943,7 @@ static void rtl_hw_initialize(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_41: case RTL_GIGA_MAC_VER_42: case RTL_GIGA_MAC_VER_43: + case RTL_GIGA_MAC_VER_44: rtl_hw_init_8168g(tp); break; diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig index bed9841d728c..19a8a045e077 100644 --- a/drivers/net/ethernet/renesas/Kconfig +++ b/drivers/net/ethernet/renesas/Kconfig @@ -4,14 +4,8 @@ config SH_ETH tristate "Renesas SuperH Ethernet support" - depends on (SUPERH || ARCH_SHMOBILE) && \ - (CPU_SUBTYPE_SH7710 || CPU_SUBTYPE_SH7712 || \ - CPU_SUBTYPE_SH7763 || CPU_SUBTYPE_SH7619 || \ - CPU_SUBTYPE_SH7724 || CPU_SUBTYPE_SH7734 || \ - CPU_SUBTYPE_SH7757 || ARCH_R8A7740 || \ - ARCH_R8A7778 || ARCH_R8A7779) + depends on HAS_DMA select CRC32 - select NET_CORE select MII select MDIO_BITBANG select PHYLIB diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index e29fe8dbd226..a753928bab9c 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -313,9 +313,14 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = { [TSU_ADRL31] = 0x01fc, }; -#if defined(CONFIG_CPU_SUBTYPE_SH7734) || \ - defined(CONFIG_CPU_SUBTYPE_SH7763) || \ - defined(CONFIG_ARCH_R8A7740) +static int sh_eth_is_gether(struct sh_eth_private *mdp) +{ + if (mdp->reg_offset == sh_eth_offset_gigabit) + return 1; + else + return 0; +} + static void sh_eth_select_mii(struct net_device *ndev) { u32 value = 0x0; @@ -339,11 +344,7 @@ static void sh_eth_select_mii(struct net_device *ndev) sh_eth_write(ndev, value, RMII_MII); } -#endif -/* There is CPU dependent code */ -#if defined(CONFIG_ARCH_R8A7778) || defined(CONFIG_ARCH_R8A7779) -#define SH_ETH_RESET_DEFAULT 1 static void sh_eth_set_duplex(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); @@ -354,7 +355,8 @@ static void sh_eth_set_duplex(struct net_device *ndev) sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); } -static void sh_eth_set_rate(struct net_device *ndev) +/* There is CPU dependent code */ +static void sh_eth_set_rate_r8a777x(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); @@ -371,9 +373,9 @@ static void sh_eth_set_rate(struct net_device *ndev) } /* R8A7778/9 */ -static struct sh_eth_cpu_data sh_eth_my_cpu_data = { +static struct sh_eth_cpu_data r8a777x_data = { .set_duplex = sh_eth_set_duplex, - .set_rate = sh_eth_set_rate, + .set_rate = sh_eth_set_rate_r8a777x, .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD, .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP, @@ -383,26 +385,14 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = { .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI, - .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE, .apr = 1, .mpr = 1, .tpauser = 1, .hw_swap = 1, }; -#elif defined(CONFIG_CPU_SUBTYPE_SH7724) -#define SH_ETH_RESET_DEFAULT 1 -static void sh_eth_set_duplex(struct net_device *ndev) -{ - struct sh_eth_private *mdp = netdev_priv(ndev); - if (mdp->duplex) /* Full */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); - else /* Half */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); -} - -static void sh_eth_set_rate(struct net_device *ndev) +static void sh_eth_set_rate_sh7724(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); @@ -419,19 +409,18 @@ static void sh_eth_set_rate(struct net_device *ndev) } /* SH7724 */ -static struct sh_eth_cpu_data sh_eth_my_cpu_data = { +static struct sh_eth_cpu_data sh7724_data = { .set_duplex = sh_eth_set_duplex, - .set_rate = sh_eth_set_rate, + .set_rate = sh_eth_set_rate_sh7724, .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD, .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP, - .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x01ff009f, + .eesipr_value = 0x01ff009f, .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI, - .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE, .apr = 1, .mpr = 1, @@ -440,22 +429,8 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = { .rpadir = 1, .rpadir_value = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */ }; -#elif defined(CONFIG_CPU_SUBTYPE_SH7757) -#define SH_ETH_HAS_BOTH_MODULES 1 -#define SH_ETH_HAS_TSU 1 -static int sh_eth_check_reset(struct net_device *ndev); - -static void sh_eth_set_duplex(struct net_device *ndev) -{ - struct sh_eth_private *mdp = netdev_priv(ndev); - if (mdp->duplex) /* Full */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); - else /* Half */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); -} - -static void sh_eth_set_rate(struct net_device *ndev) +static void sh_eth_set_rate_sh7757(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); @@ -472,9 +447,9 @@ static void sh_eth_set_rate(struct net_device *ndev) } /* SH7757 */ -static struct sh_eth_cpu_data sh_eth_my_cpu_data = { - .set_duplex = sh_eth_set_duplex, - .set_rate = sh_eth_set_rate, +static struct sh_eth_cpu_data sh7757_data = { + .set_duplex = sh_eth_set_duplex, + .set_rate = sh_eth_set_rate_sh7757, .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, .rmcr_value = 0x00000001, @@ -483,8 +458,8 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = { .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI, - .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE, + .irq_flags = IRQF_SHARED, .apr = 1, .mpr = 1, .tpauser = 1, @@ -494,7 +469,7 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = { .rpadir_value = 2 << 16, }; -#define SH_GIGA_ETH_BASE 0xfee00000 +#define SH_GIGA_ETH_BASE 0xfee00000UL #define GIGA_MALR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8) #define GIGA_MAHR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0) static void sh_eth_chip_reset_giga(struct net_device *ndev) @@ -519,52 +494,6 @@ static void sh_eth_chip_reset_giga(struct net_device *ndev) } } -static int sh_eth_is_gether(struct sh_eth_private *mdp); -static int sh_eth_reset(struct net_device *ndev) -{ - struct sh_eth_private *mdp = netdev_priv(ndev); - int ret = 0; - - if (sh_eth_is_gether(mdp)) { - sh_eth_write(ndev, 0x03, EDSR); - sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, - EDMR); - - ret = sh_eth_check_reset(ndev); - if (ret) - goto out; - - /* Table Init */ - sh_eth_write(ndev, 0x0, TDLAR); - sh_eth_write(ndev, 0x0, TDFAR); - sh_eth_write(ndev, 0x0, TDFXR); - sh_eth_write(ndev, 0x0, TDFFR); - sh_eth_write(ndev, 0x0, RDLAR); - sh_eth_write(ndev, 0x0, RDFAR); - sh_eth_write(ndev, 0x0, RDFXR); - sh_eth_write(ndev, 0x0, RDFFR); - } else { - sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER, - EDMR); - mdelay(3); - sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, - EDMR); - } - -out: - return ret; -} - -static void sh_eth_set_duplex_giga(struct net_device *ndev) -{ - struct sh_eth_private *mdp = netdev_priv(ndev); - - if (mdp->duplex) /* Full */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); - else /* Half */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); -} - static void sh_eth_set_rate_giga(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); @@ -585,9 +514,9 @@ static void sh_eth_set_rate_giga(struct net_device *ndev) } /* SH7757(GETHERC) */ -static struct sh_eth_cpu_data sh_eth_my_cpu_data_giga = { +static struct sh_eth_cpu_data sh7757_data_giga = { .chip_reset = sh_eth_chip_reset_giga, - .set_duplex = sh_eth_set_duplex_giga, + .set_duplex = sh_eth_set_duplex, .set_rate = sh_eth_set_rate_giga, .ecsr_value = ECSR_ICD | ECSR_MPD, @@ -598,11 +527,10 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data_giga = { .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI, - .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \ - EESR_TFE, .fdr_value = 0x0000072f, .rmcr_value = 0x00000001, + .irq_flags = IRQF_SHARED, .apr = 1, .mpr = 1, .tpauser = 1, @@ -615,19 +543,6 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data_giga = { .tsu = 1, }; -static struct sh_eth_cpu_data *sh_eth_get_cpu_data(struct sh_eth_private *mdp) -{ - if (sh_eth_is_gether(mdp)) - return &sh_eth_my_cpu_data_giga; - else - return &sh_eth_my_cpu_data; -} - -#elif defined(CONFIG_CPU_SUBTYPE_SH7734) || defined(CONFIG_CPU_SUBTYPE_SH7763) -#define SH_ETH_HAS_TSU 1 -static int sh_eth_check_reset(struct net_device *ndev); -static void sh_eth_reset_hw_crc(struct net_device *ndev); - static void sh_eth_chip_reset(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); @@ -637,17 +552,7 @@ static void sh_eth_chip_reset(struct net_device *ndev) mdelay(1); } -static void sh_eth_set_duplex(struct net_device *ndev) -{ - struct sh_eth_private *mdp = netdev_priv(ndev); - - if (mdp->duplex) /* Full */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); - else /* Half */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); -} - -static void sh_eth_set_rate(struct net_device *ndev) +static void sh_eth_set_rate_gether(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); @@ -666,11 +571,11 @@ static void sh_eth_set_rate(struct net_device *ndev) } } -/* sh7763 */ -static struct sh_eth_cpu_data sh_eth_my_cpu_data = { +/* SH7734 */ +static struct sh_eth_cpu_data sh7734_data = { .chip_reset = sh_eth_chip_reset, .set_duplex = sh_eth_set_duplex, - .set_rate = sh_eth_set_rate, + .set_rate = sh_eth_set_rate_gether, .ecsr_value = ECSR_ICD | ECSR_MPD, .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, @@ -680,8 +585,6 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = { .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI, - .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \ - EESR_TFE, .apr = 1, .mpr = 1, @@ -691,54 +594,37 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = { .no_trimd = 1, .no_ade = 1, .tsu = 1, -#if defined(CONFIG_CPU_SUBTYPE_SH7734) - .hw_crc = 1, - .select_mii = 1, -#endif + .hw_crc = 1, + .select_mii = 1, }; -static int sh_eth_reset(struct net_device *ndev) -{ - int ret = 0; - - sh_eth_write(ndev, EDSR_ENALL, EDSR); - sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR); - - ret = sh_eth_check_reset(ndev); - if (ret) - goto out; +/* SH7763 */ +static struct sh_eth_cpu_data sh7763_data = { + .chip_reset = sh_eth_chip_reset, + .set_duplex = sh_eth_set_duplex, + .set_rate = sh_eth_set_rate_gether, - /* Table Init */ - sh_eth_write(ndev, 0x0, TDLAR); - sh_eth_write(ndev, 0x0, TDFAR); - sh_eth_write(ndev, 0x0, TDFXR); - sh_eth_write(ndev, 0x0, TDFFR); - sh_eth_write(ndev, 0x0, RDLAR); - sh_eth_write(ndev, 0x0, RDFAR); - sh_eth_write(ndev, 0x0, RDFXR); - sh_eth_write(ndev, 0x0, RDFFR); - - /* Reset HW CRC register */ - sh_eth_reset_hw_crc(ndev); - - /* Select MII mode */ - if (sh_eth_my_cpu_data.select_mii) - sh_eth_select_mii(ndev); -out: - return ret; -} + .ecsr_value = ECSR_ICD | ECSR_MPD, + .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, + .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, -static void sh_eth_reset_hw_crc(struct net_device *ndev) -{ - if (sh_eth_my_cpu_data.hw_crc) - sh_eth_write(ndev, 0x0, CSMR); -} + .tx_check = EESR_TC1 | EESR_FTC, + .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \ + EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \ + EESR_ECI, -#elif defined(CONFIG_ARCH_R8A7740) -#define SH_ETH_HAS_TSU 1 -static int sh_eth_check_reset(struct net_device *ndev); + .apr = 1, + .mpr = 1, + .tpauser = 1, + .bculr = 1, + .hw_swap = 1, + .no_trimd = 1, + .no_ade = 1, + .tsu = 1, + .irq_flags = IRQF_SHARED, +}; -static void sh_eth_chip_reset(struct net_device *ndev) +static void sh_eth_chip_reset_r8a7740(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); @@ -749,65 +635,11 @@ static void sh_eth_chip_reset(struct net_device *ndev) sh_eth_select_mii(ndev); } -static int sh_eth_reset(struct net_device *ndev) -{ - int ret = 0; - - sh_eth_write(ndev, EDSR_ENALL, EDSR); - sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR); - - ret = sh_eth_check_reset(ndev); - if (ret) - goto out; - - /* Table Init */ - sh_eth_write(ndev, 0x0, TDLAR); - sh_eth_write(ndev, 0x0, TDFAR); - sh_eth_write(ndev, 0x0, TDFXR); - sh_eth_write(ndev, 0x0, TDFFR); - sh_eth_write(ndev, 0x0, RDLAR); - sh_eth_write(ndev, 0x0, RDFAR); - sh_eth_write(ndev, 0x0, RDFXR); - sh_eth_write(ndev, 0x0, RDFFR); - -out: - return ret; -} - -static void sh_eth_set_duplex(struct net_device *ndev) -{ - struct sh_eth_private *mdp = netdev_priv(ndev); - - if (mdp->duplex) /* Full */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); - else /* Half */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); -} - -static void sh_eth_set_rate(struct net_device *ndev) -{ - struct sh_eth_private *mdp = netdev_priv(ndev); - - switch (mdp->speed) { - case 10: /* 10BASE */ - sh_eth_write(ndev, GECMR_10, GECMR); - break; - case 100:/* 100BASE */ - sh_eth_write(ndev, GECMR_100, GECMR); - break; - case 1000: /* 1000BASE */ - sh_eth_write(ndev, GECMR_1000, GECMR); - break; - default: - break; - } -} - /* R8A7740 */ -static struct sh_eth_cpu_data sh_eth_my_cpu_data = { - .chip_reset = sh_eth_chip_reset, +static struct sh_eth_cpu_data r8a7740_data = { + .chip_reset = sh_eth_chip_reset_r8a7740, .set_duplex = sh_eth_set_duplex, - .set_rate = sh_eth_set_rate, + .set_rate = sh_eth_set_rate_gether, .ecsr_value = ECSR_ICD | ECSR_MPD, .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, @@ -817,8 +649,6 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = { .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI, - .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \ - EESR_TFE, .apr = 1, .mpr = 1, @@ -829,11 +659,10 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = { .no_ade = 1, .tsu = 1, .select_mii = 1, + .shift_rd0 = 1, }; -#elif defined(CONFIG_CPU_SUBTYPE_SH7619) -#define SH_ETH_RESET_DEFAULT 1 -static struct sh_eth_cpu_data sh_eth_my_cpu_data = { +static struct sh_eth_cpu_data sh7619_data = { .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, .apr = 1, @@ -841,14 +670,11 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = { .tpauser = 1, .hw_swap = 1, }; -#elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712) -#define SH_ETH_RESET_DEFAULT 1 -#define SH_ETH_HAS_TSU 1 -static struct sh_eth_cpu_data sh_eth_my_cpu_data = { + +static struct sh_eth_cpu_data sh771x_data = { .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, .tsu = 1, }; -#endif static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd) { @@ -873,22 +699,8 @@ static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd) if (!cd->eesr_err_check) cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK; - - if (!cd->tx_error_check) - cd->tx_error_check = DEFAULT_TX_ERROR_CHECK; } -#if defined(SH_ETH_RESET_DEFAULT) -/* Chip Reset */ -static int sh_eth_reset(struct net_device *ndev) -{ - sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER, EDMR); - mdelay(3); - sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, EDMR); - - return 0; -} -#else static int sh_eth_check_reset(struct net_device *ndev) { int ret = 0; @@ -906,7 +718,49 @@ static int sh_eth_check_reset(struct net_device *ndev) } return ret; } -#endif + +static int sh_eth_reset(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + int ret = 0; + + if (sh_eth_is_gether(mdp)) { + sh_eth_write(ndev, EDSR_ENALL, EDSR); + sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, + EDMR); + + ret = sh_eth_check_reset(ndev); + if (ret) + goto out; + + /* Table Init */ + sh_eth_write(ndev, 0x0, TDLAR); + sh_eth_write(ndev, 0x0, TDFAR); + sh_eth_write(ndev, 0x0, TDFXR); + sh_eth_write(ndev, 0x0, TDFFR); + sh_eth_write(ndev, 0x0, RDLAR); + sh_eth_write(ndev, 0x0, RDFAR); + sh_eth_write(ndev, 0x0, RDFXR); + sh_eth_write(ndev, 0x0, RDFFR); + + /* Reset HW CRC register */ + if (mdp->cd->hw_crc) + sh_eth_write(ndev, 0x0, CSMR); + + /* Select MII mode */ + if (mdp->cd->select_mii) + sh_eth_select_mii(ndev); + } else { + sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER, + EDMR); + mdelay(3); + sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, + EDMR); + } + +out: + return ret; +} #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) static void sh_eth_set_receive_align(struct sk_buff *skb) @@ -982,14 +836,6 @@ static void read_mac_address(struct net_device *ndev, unsigned char *mac) } } -static int sh_eth_is_gether(struct sh_eth_private *mdp) -{ - if (mdp->reg_offset == sh_eth_offset_gigabit) - return 1; - else - return 0; -} - static unsigned long sh_eth_get_edtrr_trns(struct sh_eth_private *mdp) { if (sh_eth_is_gether(mdp)) @@ -1388,7 +1234,7 @@ static int sh_eth_txfree(struct net_device *ndev) } /* Packet receive function */ -static int sh_eth_rx(struct net_device *ndev, u32 intr_status) +static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) { struct sh_eth_private *mdp = netdev_priv(ndev); struct sh_eth_rxdesc *rxdesc; @@ -1396,6 +1242,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status) int entry = mdp->cur_rx % mdp->num_rx_ring; int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx; struct sk_buff *skb; + int exceeded = 0; u16 pkt_len = 0; u32 desc_status; @@ -1407,10 +1254,15 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status) if (--boguscnt < 0) break; + if (*quota <= 0) { + exceeded = 1; + break; + } + (*quota)--; + if (!(desc_status & RDFEND)) ndev->stats.rx_length_errors++; -#if defined(CONFIG_ARCH_R8A7740) /* * In case of almost all GETHER/ETHERs, the Receive Frame State * (RFS) bits in the Receive Descriptor 0 are from bit 9 to @@ -1418,8 +1270,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status) * bits are from bit 25 to bit 16. So, the driver needs right * shifting by 16. */ - desc_status >>= 16; -#endif + if (mdp->cd->shift_rd0) + desc_status >>= 16; if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 | RD_RFS5 | RD_RFS6 | RD_RFS10)) { @@ -1494,7 +1346,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status) sh_eth_write(ndev, EDRRR_R, EDRRR); } - return 0; + return exceeded; } static void sh_eth_rcv_snd_disable(struct net_device *ndev) @@ -1636,7 +1488,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev) struct sh_eth_private *mdp = netdev_priv(ndev); struct sh_eth_cpu_data *cd = mdp->cd; irqreturn_t ret = IRQ_NONE; - unsigned long intr_status; + unsigned long intr_status, intr_enable; spin_lock(&mdp->lock); @@ -1647,34 +1499,41 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev) * and we need to fully handle it in sh_eth_error() in order to quench * it as it doesn't get cleared by just writing 1 to the ECI bit... */ - intr_status &= sh_eth_read(ndev, EESIPR) | DMAC_M_ECI; - /* Clear interrupt */ - if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF | - EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF | - cd->tx_check | cd->eesr_err_check)) { - sh_eth_write(ndev, intr_status, EESR); + intr_enable = sh_eth_read(ndev, EESIPR); + intr_status &= intr_enable | DMAC_M_ECI; + if (intr_status & (EESR_RX_CHECK | cd->tx_check | cd->eesr_err_check)) ret = IRQ_HANDLED; - } else + else goto other_irq; - if (intr_status & (EESR_FRC | /* Frame recv*/ - EESR_RMAF | /* Multi cast address recv*/ - EESR_RRF | /* Bit frame recv */ - EESR_RTLF | /* Long frame recv*/ - EESR_RTSF | /* short frame recv */ - EESR_PRE | /* PHY-LSI recv error */ - EESR_CERF)){ /* recv frame CRC error */ - sh_eth_rx(ndev, intr_status); + if (intr_status & EESR_RX_CHECK) { + if (napi_schedule_prep(&mdp->napi)) { + /* Mask Rx interrupts */ + sh_eth_write(ndev, intr_enable & ~EESR_RX_CHECK, + EESIPR); + __napi_schedule(&mdp->napi); + } else { + dev_warn(&ndev->dev, + "ignoring interrupt, status 0x%08lx, mask 0x%08lx.\n", + intr_status, intr_enable); + } } /* Tx Check */ if (intr_status & cd->tx_check) { + /* Clear Tx interrupts */ + sh_eth_write(ndev, intr_status & cd->tx_check, EESR); + sh_eth_txfree(ndev); netif_wake_queue(ndev); } - if (intr_status & cd->eesr_err_check) + if (intr_status & cd->eesr_err_check) { + /* Clear error interrupts */ + sh_eth_write(ndev, intr_status & cd->eesr_err_check, EESR); + sh_eth_error(ndev, intr_status); + } other_irq: spin_unlock(&mdp->lock); @@ -1682,6 +1541,33 @@ other_irq: return ret; } +static int sh_eth_poll(struct napi_struct *napi, int budget) +{ + struct sh_eth_private *mdp = container_of(napi, struct sh_eth_private, + napi); + struct net_device *ndev = napi->dev; + int quota = budget; + unsigned long intr_status; + + for (;;) { + intr_status = sh_eth_read(ndev, EESR); + if (!(intr_status & EESR_RX_CHECK)) + break; + /* Clear Rx interrupts */ + sh_eth_write(ndev, intr_status & EESR_RX_CHECK, EESR); + + if (sh_eth_rx(ndev, intr_status, "a)) + goto out; + } + + napi_complete(napi); + + /* Reenable Rx interrupts */ + sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); +out: + return budget - quota; +} + /* PHY state control function */ static void sh_eth_adjust_link(struct net_device *ndev) { @@ -1972,14 +1858,7 @@ static int sh_eth_open(struct net_device *ndev) pm_runtime_get_sync(&mdp->pdev->dev); ret = request_irq(ndev->irq, sh_eth_interrupt, -#if defined(CONFIG_CPU_SUBTYPE_SH7763) || \ - defined(CONFIG_CPU_SUBTYPE_SH7764) || \ - defined(CONFIG_CPU_SUBTYPE_SH7757) - IRQF_SHARED, -#else - 0, -#endif - ndev->name, ndev); + mdp->cd->irq_flags, ndev->name, ndev); if (ret) { dev_err(&ndev->dev, "Can not assign IRQ number\n"); return ret; @@ -2000,6 +1879,8 @@ static int sh_eth_open(struct net_device *ndev) if (ret) goto out_free_irq; + napi_enable(&mdp->napi); + return ret; out_free_irq: @@ -2095,6 +1976,8 @@ static int sh_eth_close(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); + napi_disable(&mdp->napi); + netif_stop_queue(ndev); /* Disable interrupts by clearing the interrupt mask. */ @@ -2165,7 +2048,6 @@ static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq, return phy_mii_ioctl(phydev, rq, cmd); } -#if defined(SH_ETH_HAS_TSU) /* For TSU_POSTn. Please refer to the manual about this (strange) bitfields */ static void *sh_eth_tsu_get_post_reg_offset(struct sh_eth_private *mdp, int entry) @@ -2508,7 +2390,6 @@ static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev, return 0; } -#endif /* SH_ETH_HAS_TSU */ /* SuperH's TSU register init function */ static void sh_eth_tsu_init(struct sh_eth_private *mdp) @@ -2652,11 +2533,21 @@ static const struct net_device_ops sh_eth_netdev_ops = { .ndo_stop = sh_eth_close, .ndo_start_xmit = sh_eth_start_xmit, .ndo_get_stats = sh_eth_get_stats, -#if defined(SH_ETH_HAS_TSU) + .ndo_tx_timeout = sh_eth_tx_timeout, + .ndo_do_ioctl = sh_eth_do_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_change_mtu = eth_change_mtu, +}; + +static const struct net_device_ops sh_eth_netdev_ops_tsu = { + .ndo_open = sh_eth_open, + .ndo_stop = sh_eth_close, + .ndo_start_xmit = sh_eth_start_xmit, + .ndo_get_stats = sh_eth_get_stats, .ndo_set_rx_mode = sh_eth_set_multicast_list, .ndo_vlan_rx_add_vid = sh_eth_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = sh_eth_vlan_rx_kill_vid, -#endif .ndo_tx_timeout = sh_eth_tx_timeout, .ndo_do_ioctl = sh_eth_do_ioctl, .ndo_validate_addr = eth_validate_addr, @@ -2671,6 +2562,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev) struct net_device *ndev = NULL; struct sh_eth_private *mdp = NULL; struct sh_eth_plat_data *pd = pdev->dev.platform_data; + const struct platform_device_id *id = platform_get_device_id(pdev); /* get base addr */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -2729,15 +2621,14 @@ static int sh_eth_drv_probe(struct platform_device *pdev) mdp->reg_offset = sh_eth_get_register_offset(pd->register_type); /* set cpu data */ -#if defined(SH_ETH_HAS_BOTH_MODULES) - mdp->cd = sh_eth_get_cpu_data(mdp); -#else - mdp->cd = &sh_eth_my_cpu_data; -#endif + mdp->cd = (struct sh_eth_cpu_data *)id->driver_data; sh_eth_set_default_cpu_data(mdp->cd); /* set function */ - ndev->netdev_ops = &sh_eth_netdev_ops; + if (mdp->cd->tsu) + ndev->netdev_ops = &sh_eth_netdev_ops_tsu; + else + ndev->netdev_ops = &sh_eth_netdev_ops; SET_ETHTOOL_OPS(ndev, &sh_eth_ethtool_ops); ndev->watchdog_timeo = TX_TIMEOUT; @@ -2776,10 +2667,12 @@ static int sh_eth_drv_probe(struct platform_device *pdev) } } + netif_napi_add(ndev, &mdp->napi, sh_eth_poll, 64); + /* network device register */ ret = register_netdev(ndev); if (ret) - goto out_release; + goto out_napi_del; /* mdio bus init */ ret = sh_mdio_init(ndev, pdev->id, pd); @@ -2797,6 +2690,9 @@ static int sh_eth_drv_probe(struct platform_device *pdev) out_unregister: unregister_netdev(ndev); +out_napi_del: + netif_napi_del(&mdp->napi); + out_release: /* net_dev free */ if (ndev) @@ -2809,16 +2705,18 @@ out: static int sh_eth_drv_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); + struct sh_eth_private *mdp = netdev_priv(ndev); sh_mdio_release(ndev); unregister_netdev(ndev); + netif_napi_del(&mdp->napi); pm_runtime_disable(&pdev->dev); free_netdev(ndev); - platform_set_drvdata(pdev, NULL); return 0; } +#ifdef CONFIG_PM static int sh_eth_runtime_nop(struct device *dev) { /* @@ -2832,17 +2730,36 @@ static int sh_eth_runtime_nop(struct device *dev) return 0; } -static struct dev_pm_ops sh_eth_dev_pm_ops = { +static const struct dev_pm_ops sh_eth_dev_pm_ops = { .runtime_suspend = sh_eth_runtime_nop, .runtime_resume = sh_eth_runtime_nop, }; +#define SH_ETH_PM_OPS (&sh_eth_dev_pm_ops) +#else +#define SH_ETH_PM_OPS NULL +#endif + +static struct platform_device_id sh_eth_id_table[] = { + { "sh7619-ether", (kernel_ulong_t)&sh7619_data }, + { "sh771x-ether", (kernel_ulong_t)&sh771x_data }, + { "sh7724-ether", (kernel_ulong_t)&sh7724_data }, + { "sh7734-gether", (kernel_ulong_t)&sh7734_data }, + { "sh7757-ether", (kernel_ulong_t)&sh7757_data }, + { "sh7757-gether", (kernel_ulong_t)&sh7757_data_giga }, + { "sh7763-gether", (kernel_ulong_t)&sh7763_data }, + { "r8a7740-gether", (kernel_ulong_t)&r8a7740_data }, + { "r8a777x-ether", (kernel_ulong_t)&r8a777x_data }, + { } +}; +MODULE_DEVICE_TABLE(platform, sh_eth_id_table); static struct platform_driver sh_eth_driver = { .probe = sh_eth_drv_probe, .remove = sh_eth_drv_remove, + .id_table = sh_eth_id_table, .driver = { .name = CARDNAME, - .pm = &sh_eth_dev_pm_ops, + .pm = SH_ETH_PM_OPS, }, }; diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index 62689a5823be..99995bf38c40 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -166,19 +166,16 @@ enum { /* * Register's bits */ -#if defined(CONFIG_CPU_SUBTYPE_SH7734) || defined(CONFIG_CPU_SUBTYPE_SH7763) ||\ - defined(CONFIG_ARCH_R8A7740) -/* EDSR */ +/* EDSR : sh7734, sh7757, sh7763, and r8a7740 only */ enum EDSR_BIT { EDSR_ENT = 0x01, EDSR_ENR = 0x02, }; #define EDSR_ENALL (EDSR_ENT|EDSR_ENR) -/* GECMR */ +/* GECMR : sh7734, sh7763 and r8a7740 only */ enum GECMR_BIT { GECMR_10 = 0x0, GECMR_100 = 0x04, GECMR_1000 = 0x01, }; -#endif /* EDMR */ enum DMAC_M_BIT { @@ -251,13 +248,19 @@ enum EESR_BIT { EESR_CERF = 0x00000001, }; +#define EESR_RX_CHECK (EESR_FRC | /* Frame recv */ \ + EESR_RMAF | /* Multicast address recv */ \ + EESR_RRF | /* Bit frame recv */ \ + EESR_RTLF | /* Long frame recv */ \ + EESR_RTSF | /* Short frame recv */ \ + EESR_PRE | /* PHY-LSI recv error */ \ + EESR_CERF) /* Recv frame CRC error */ + #define DEFAULT_TX_CHECK (EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | \ EESR_RTO) #define DEFAULT_EESR_ERR_CHECK (EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | \ EESR_RDE | EESR_RFRMER | EESR_ADE | \ EESR_TFE | EESR_TDE | EESR_ECI) -#define DEFAULT_TX_ERROR_CHECK (EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | \ - EESR_TFE) /* EESIPR */ enum DMAC_IM_BIT { @@ -299,11 +302,11 @@ enum FCFTR_BIT { #define DEFAULT_FIFO_F_D_RFF (FCFTR_RFF2 | FCFTR_RFF1 | FCFTR_RFF0) #define DEFAULT_FIFO_F_D_RFD (FCFTR_RFD2 | FCFTR_RFD1 | FCFTR_RFD0) -/* Transfer descriptor bit */ +/* Transmit descriptor bit */ enum TD_STS_BIT { - TD_TACT = 0x80000000, - TD_TDLE = 0x40000000, TD_TFP1 = 0x20000000, - TD_TFP0 = 0x10000000, + TD_TACT = 0x80000000, TD_TDLE = 0x40000000, + TD_TFP1 = 0x20000000, TD_TFP0 = 0x10000000, + TD_TFE = 0x08000000, TD_TWBI = 0x04000000, }; #define TDF1ST TD_TFP1 #define TDFEND TD_TFP0 @@ -463,9 +466,9 @@ struct sh_eth_cpu_data { /* interrupt checking mask */ unsigned long tx_check; unsigned long eesr_err_check; - unsigned long tx_error_check; /* hardware features */ + unsigned long irq_flags; /* IRQ configuration flags */ unsigned no_psr:1; /* EtherC DO NOT have PSR */ unsigned apr:1; /* EtherC have APR */ unsigned mpr:1; /* EtherC have MPR */ @@ -478,6 +481,7 @@ struct sh_eth_cpu_data { unsigned no_ade:1; /* E-DMAC DO NOT have ADE bit in EESR */ unsigned hw_crc:1; /* E-DMAC have CSMR */ unsigned select_mii:1; /* EtherC have RMII_MII (MII select register) */ + unsigned shift_rd0:1; /* shift Rx descriptor word 0 right by 16 */ }; struct sh_eth_private { @@ -499,6 +503,7 @@ struct sh_eth_private { u32 cur_tx, dirty_tx; u32 rx_buf_sz; /* Based on MTU+slack. */ int edmac_endian; + struct napi_struct napi; /* MII transceiver section. */ u32 phy_id; /* PHY ID */ struct mii_bus *mii_bus; /* MDIO bus control */ diff --git a/drivers/net/ethernet/s6gmac.c b/drivers/net/ethernet/s6gmac.c index b6739afeaca1..a99739c5142c 100644 --- a/drivers/net/ethernet/s6gmac.c +++ b/drivers/net/ethernet/s6gmac.c @@ -1040,7 +1040,6 @@ static int s6gmac_remove(struct platform_device *pdev) unregister_netdev(dev); free_irq(dev->irq, dev); free_netdev(dev); - platform_set_drvdata(pdev, NULL); } return 0; } diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c index 0ad5694b41f8..856e523ac936 100644 --- a/drivers/net/ethernet/seeq/sgiseeq.c +++ b/drivers/net/ethernet/seeq/sgiseeq.c @@ -818,7 +818,6 @@ static int __exit sgiseeq_remove(struct platform_device *pdev) dma_free_noncoherent(&pdev->dev, sizeof(*sp->srings), sp->srings, sp->srings_dma); free_netdev(dev); - platform_set_drvdata(pdev, NULL); return 0; } diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 4a14a940c65e..c72968840f1a 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -21,8 +21,8 @@ #include <linux/ethtool.h> #include <linux/topology.h> #include <linux/gfp.h> -#include <linux/cpu_rmap.h> #include <linux/aer.h> +#include <linux/interrupt.h> #include "net_driver.h" #include "efx.h" #include "nic.h" @@ -1283,29 +1283,6 @@ static unsigned int efx_wanted_parallelism(struct efx_nic *efx) return count; } -static int -efx_init_rx_cpu_rmap(struct efx_nic *efx, struct msix_entry *xentries) -{ -#ifdef CONFIG_RFS_ACCEL - unsigned int i; - int rc; - - efx->net_dev->rx_cpu_rmap = alloc_irq_cpu_rmap(efx->n_rx_channels); - if (!efx->net_dev->rx_cpu_rmap) - return -ENOMEM; - for (i = 0; i < efx->n_rx_channels; i++) { - rc = irq_cpu_rmap_add(efx->net_dev->rx_cpu_rmap, - xentries[i].vector); - if (rc) { - free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap); - efx->net_dev->rx_cpu_rmap = NULL; - return rc; - } - } -#endif - return 0; -} - /* Probe the number and type of interrupts we are able to obtain, and * the resulting numbers of channels and RX queues. */ @@ -1359,11 +1336,6 @@ static int efx_probe_interrupts(struct efx_nic *efx) efx->n_tx_channels = n_channels; efx->n_rx_channels = n_channels; } - rc = efx_init_rx_cpu_rmap(efx, xentries); - if (rc) { - pci_disable_msix(efx->pci_dev); - return rc; - } for (i = 0; i < efx->n_channels; i++) efx_get_channel(efx, i)->irq = xentries[i].vector; @@ -1427,6 +1399,10 @@ static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq) BUG_ON(efx->state == STATE_DISABLED); + if (efx->eeh_disabled_legacy_irq) { + enable_irq(efx->legacy_irq); + efx->eeh_disabled_legacy_irq = false; + } if (efx->legacy_irq) efx->legacy_irq_enabled = true; efx_nic_enable_interrupts(efx); @@ -2120,7 +2096,7 @@ static void efx_update_name(struct efx_nic *efx) static int efx_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *net_dev = ptr; + struct net_device *net_dev = netdev_notifier_info_to_dev(ptr); if (net_dev->netdev_ops == &efx_netdev_ops && event == NETDEV_CHANGENAME) @@ -2365,7 +2341,7 @@ out: * Returns 0 if the recovery mechanisms are unsuccessful. * Returns a non-zero value otherwise. */ -static int efx_try_recovery(struct efx_nic *efx) +int efx_try_recovery(struct efx_nic *efx) { #ifdef CONFIG_EEH /* A PCI error can occur and not be seen by EEH because nothing @@ -2603,10 +2579,6 @@ static void efx_pci_remove_main(struct efx_nic *efx) BUG_ON(efx->state == STATE_READY); cancel_work_sync(&efx->reset_work); -#ifdef CONFIG_RFS_ACCEL - free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap); - efx->net_dev->rx_cpu_rmap = NULL; -#endif efx_stop_interrupts(efx, false); efx_nic_fini_interrupt(efx); efx_fini_port(efx); diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index 8372da239b43..bdb30bbb0c97 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@ -124,6 +124,7 @@ extern const struct ethtool_ops efx_ethtool_ops; extern int efx_reset(struct efx_nic *efx, enum reset_type method); extern void efx_reset_down(struct efx_nic *efx, enum reset_type method); extern int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok); +extern int efx_try_recovery(struct efx_nic *efx); /* Global */ extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type); diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c index 6e768175e7e0..1fc21458413d 100644 --- a/drivers/net/ethernet/sfc/ethtool.c +++ b/drivers/net/ethernet/sfc/ethtool.c @@ -1114,6 +1114,20 @@ static int efx_ethtool_set_rxfh_indir(struct net_device *net_dev, return 0; } +int efx_ethtool_get_ts_info(struct net_device *net_dev, + struct ethtool_ts_info *ts_info) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + /* Software capabilities */ + ts_info->so_timestamping = (SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE); + ts_info->phc_index = -1; + + efx_ptp_get_ts_info(efx, ts_info); + return 0; +} + static int efx_ethtool_get_module_eeprom(struct net_device *net_dev, struct ethtool_eeprom *ee, u8 *data) @@ -1176,7 +1190,7 @@ const struct ethtool_ops efx_ethtool_ops = { .get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size, .get_rxfh_indir = efx_ethtool_get_rxfh_indir, .set_rxfh_indir = efx_ethtool_set_rxfh_indir, - .get_ts_info = efx_ptp_get_ts_info, + .get_ts_info = efx_ethtool_get_ts_info, .get_module_info = efx_ethtool_get_module_info, .get_module_eeprom = efx_ethtool_get_module_eeprom, }; diff --git a/drivers/net/ethernet/sfc/filter.c b/drivers/net/ethernet/sfc/filter.c index 2397f0e8d3eb..b74a60ab9ac7 100644 --- a/drivers/net/ethernet/sfc/filter.c +++ b/drivers/net/ethernet/sfc/filter.c @@ -1185,8 +1185,21 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, nhoff = skb_network_offset(skb); - if (skb->protocol != htons(ETH_P_IP)) + if (skb->protocol == htons(ETH_P_8021Q)) { + EFX_BUG_ON_PARANOID(skb_headlen(skb) < + nhoff + sizeof(struct vlan_hdr)); + if (((const struct vlan_hdr *)skb->data + nhoff)-> + h_vlan_encapsulated_proto != htons(ETH_P_IP)) + return -EPROTONOSUPPORT; + + /* This is IP over 802.1q VLAN. We can't filter on the + * IP 5-tuple and the vlan together, so just strip the + * vlan header and filter on the IP part. + */ + nhoff += sizeof(struct vlan_hdr); + } else if (skb->protocol != htons(ETH_P_IP)) { return -EPROTONOSUPPORT; + } /* RFS must validate the IP header length before calling us */ EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip)); diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 39d6bd77f015..f4c7e6b67743 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -243,6 +243,7 @@ struct efx_rx_buffer { #define EFX_RX_BUF_LAST_IN_PAGE 0x0001 #define EFX_RX_PKT_CSUMMED 0x0002 #define EFX_RX_PKT_DISCARD 0x0004 +#define EFX_RX_PKT_TCP 0x0040 /** * struct efx_rx_page_state - Page-based rx buffer state @@ -784,9 +785,11 @@ struct efx_nic { char name[IFNAMSIZ]; struct pci_dev *pci_dev; + unsigned int port_num; const struct efx_nic_type *type; int legacy_irq; bool legacy_irq_enabled; + bool eeh_disabled_legacy_irq; struct workqueue_struct *workqueue; char workqueue_name[16]; struct work_struct reset_work; @@ -916,7 +919,7 @@ static inline int efx_dev_registered(struct efx_nic *efx) static inline unsigned int efx_port_num(struct efx_nic *efx) { - return efx->net_dev->dev_id; + return efx->port_num; } /** diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c index b0503cd8c2a0..56ed3bc71e00 100644 --- a/drivers/net/ethernet/sfc/nic.c +++ b/drivers/net/ethernet/sfc/nic.c @@ -14,6 +14,7 @@ #include <linux/pci.h> #include <linux/module.h> #include <linux/seq_file.h> +#include <linux/cpu_rmap.h> #include "net_driver.h" #include "bitfield.h" #include "efx.h" @@ -1080,12 +1081,21 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); if (likely(rx_ev_pkt_ok)) { - /* If packet is marked as OK and packet type is TCP/IP or - * UDP/IP, then we can rely on the hardware checksum. + /* If packet is marked as OK then we can rely on the + * hardware checksum and classification. */ - flags = (rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP || - rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP) ? - EFX_RX_PKT_CSUMMED : 0; + flags = 0; + switch (rx_ev_hdr_type) { + case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP: + flags |= EFX_RX_PKT_TCP; + /* fall through */ + case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP: + flags |= EFX_RX_PKT_CSUMMED; + /* fall through */ + case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER: + case FSE_AZ_RX_EV_HDR_TYPE_OTHER: + break; + } } else { flags = efx_handle_rx_not_ok(rx_queue, event); } @@ -1579,6 +1589,16 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id) efx_readd(efx, ®, FR_BZ_INT_ISR0); queues = EFX_EXTRACT_DWORD(reg, 0, 31); + /* Legacy interrupts are disabled too late by the EEH kernel + * code. Disable them earlier. + * If an EEH error occurred, the read will have returned all ones. + */ + if (EFX_DWORD_IS_ALL_ONES(reg) && efx_try_recovery(efx) && + !efx->eeh_disabled_legacy_irq) { + disable_irq_nosync(efx->legacy_irq); + efx->eeh_disabled_legacy_irq = true; + } + /* Handle non-event-queue sources */ if (queues & (1U << efx->irq_level)) { syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); @@ -1687,6 +1707,7 @@ void efx_nic_push_rx_indir_table(struct efx_nic *efx) int efx_nic_init_interrupt(struct efx_nic *efx) { struct efx_channel *channel; + unsigned int n_irqs; int rc; if (!EFX_INT_MODE_USE_MSI(efx)) { @@ -1707,7 +1728,19 @@ int efx_nic_init_interrupt(struct efx_nic *efx) return 0; } +#ifdef CONFIG_RFS_ACCEL + if (efx->interrupt_mode == EFX_INT_MODE_MSIX) { + efx->net_dev->rx_cpu_rmap = + alloc_irq_cpu_rmap(efx->n_rx_channels); + if (!efx->net_dev->rx_cpu_rmap) { + rc = -ENOMEM; + goto fail1; + } + } +#endif + /* Hook MSI or MSI-X interrupt */ + n_irqs = 0; efx_for_each_channel(channel, efx) { rc = request_irq(channel->irq, efx_msi_interrupt, IRQF_PROBE_SHARED, /* Not shared */ @@ -1718,13 +1751,31 @@ int efx_nic_init_interrupt(struct efx_nic *efx) "failed to hook IRQ %d\n", channel->irq); goto fail2; } + ++n_irqs; + +#ifdef CONFIG_RFS_ACCEL + if (efx->interrupt_mode == EFX_INT_MODE_MSIX && + channel->channel < efx->n_rx_channels) { + rc = irq_cpu_rmap_add(efx->net_dev->rx_cpu_rmap, + channel->irq); + if (rc) + goto fail2; + } +#endif } return 0; fail2: - efx_for_each_channel(channel, efx) +#ifdef CONFIG_RFS_ACCEL + free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap); + efx->net_dev->rx_cpu_rmap = NULL; +#endif + efx_for_each_channel(channel, efx) { + if (n_irqs-- == 0) + break; free_irq(channel->irq, &efx->channel[channel->channel]); + } fail1: return rc; } @@ -1734,11 +1785,14 @@ void efx_nic_fini_interrupt(struct efx_nic *efx) struct efx_channel *channel; efx_oword_t reg; +#ifdef CONFIG_RFS_ACCEL + free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap); + efx->net_dev->rx_cpu_rmap = NULL; +#endif + /* Disable MSI/MSI-X interrupts */ - efx_for_each_channel(channel, efx) { - if (channel->irq) - free_irq(channel->irq, &efx->channel[channel->channel]); - } + efx_for_each_channel(channel, efx) + free_irq(channel->irq, &efx->channel[channel->channel]); /* ACK legacy interrupt */ if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index 1b0003323498..d63c2991a751 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h @@ -254,8 +254,8 @@ extern int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf, struct ethtool_ts_info; extern void efx_ptp_probe(struct efx_nic *efx); extern int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd); -extern int efx_ptp_get_ts_info(struct net_device *net_dev, - struct ethtool_ts_info *ts_info); +extern void efx_ptp_get_ts_info(struct efx_nic *efx, + struct ethtool_ts_info *ts_info); extern bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); extern int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); extern void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev); diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c index 9a95abf2dedf..b495394a6dfa 100644 --- a/drivers/net/ethernet/sfc/ptp.c +++ b/drivers/net/ethernet/sfc/ptp.c @@ -1203,18 +1203,16 @@ static int efx_ptp_ts_init(struct efx_nic *efx, struct hwtstamp_config *init) return 0; } -int -efx_ptp_get_ts_info(struct net_device *net_dev, struct ethtool_ts_info *ts_info) +void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info) { - struct efx_nic *efx = netdev_priv(net_dev); struct efx_ptp_data *ptp = efx->ptp_data; if (!ptp) - return -EOPNOTSUPP; + return; - ts_info->so_timestamping = (SOF_TIMESTAMPING_TX_HARDWARE | - SOF_TIMESTAMPING_RX_HARDWARE | - SOF_TIMESTAMPING_RAW_HARDWARE); + ts_info->so_timestamping |= (SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE); ts_info->phc_index = ptp_clock_index(ptp->phc_clock); ts_info->tx_types = 1 << HWTSTAMP_TX_OFF | 1 << HWTSTAMP_TX_ON; ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE | @@ -1224,7 +1222,6 @@ efx_ptp_get_ts_info(struct net_device *net_dev, struct ethtool_ts_info *ts_info) 1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT | 1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC | 1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ); - return 0; } int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd) diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index a7dfe36cabf4..6af9cfda50fb 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -36,7 +36,7 @@ #define EFX_RECYCLE_RING_SIZE_NOIOMMU (2 * EFX_RX_PREFERRED_BATCH) /* Size of buffer allocated for skb header area. */ -#define EFX_SKB_HEADERS 64u +#define EFX_SKB_HEADERS 128u /* This is the percentage fill level below which new RX descriptors * will be added to the RX descriptor ring. @@ -282,9 +282,9 @@ static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue, } /* Recycle the pages that are used by buffers that have just been received. */ -static void efx_recycle_rx_buffers(struct efx_channel *channel, - struct efx_rx_buffer *rx_buf, - unsigned int n_frags) +static void efx_recycle_rx_pages(struct efx_channel *channel, + struct efx_rx_buffer *rx_buf, + unsigned int n_frags) { struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); @@ -294,6 +294,20 @@ static void efx_recycle_rx_buffers(struct efx_channel *channel, } while (--n_frags); } +static void efx_discard_rx_packet(struct efx_channel *channel, + struct efx_rx_buffer *rx_buf, + unsigned int n_frags) +{ + struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); + + efx_recycle_rx_pages(channel, rx_buf, n_frags); + + do { + efx_free_rx_buffer(rx_buf); + rx_buf = efx_rx_buf_next(rx_queue, rx_buf); + } while (--n_frags); +} + /** * efx_fast_push_rx_descriptors - push new RX descriptors quickly * @rx_queue: RX descriptor queue @@ -533,8 +547,7 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, */ if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) { efx_rx_flush_packet(channel); - put_page(rx_buf->page); - efx_recycle_rx_buffers(channel, rx_buf, n_frags); + efx_discard_rx_packet(channel, rx_buf, n_frags); return; } @@ -570,9 +583,9 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, efx_sync_rx_buffer(efx, rx_buf, rx_buf->len); } - /* All fragments have been DMA-synced, so recycle buffers and pages. */ + /* All fragments have been DMA-synced, so recycle pages. */ rx_buf = efx_rx_buffer(rx_queue, index); - efx_recycle_rx_buffers(channel, rx_buf, n_frags); + efx_recycle_rx_pages(channel, rx_buf, n_frags); /* Pipeline receives so that we give time for packet headers to be * prefetched into cache. @@ -598,6 +611,8 @@ static void efx_rx_deliver(struct efx_channel *channel, u8 *eh, /* Set the SKB flags */ skb_checksum_none_assert(skb); + if (likely(rx_buf->flags & EFX_RX_PKT_CSUMMED)) + skb->ip_summed = CHECKSUM_UNNECESSARY; if (channel->type->receive_skb) if (channel->type->receive_skb(channel, skb)) @@ -627,7 +642,7 @@ void __efx_rx_packet(struct efx_channel *channel) if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM))) rx_buf->flags &= ~EFX_RX_PKT_CSUMMED; - if (!channel->type->receive_skb) + if ((rx_buf->flags & EFX_RX_PKT_TCP) && !channel->type->receive_skb) efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh); else efx_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags); @@ -675,7 +690,7 @@ static void efx_init_rx_recycle_ring(struct efx_nic *efx, #ifdef CONFIG_PPC64 bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU; #else - if (efx->pci_dev->dev.iommu_group) + if (iommu_present(&pci_bus_type)) bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU; else bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_NOIOMMU; diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c index 51669244d154..8c91775e3c5f 100644 --- a/drivers/net/ethernet/sfc/siena.c +++ b/drivers/net/ethernet/sfc/siena.c @@ -304,7 +304,7 @@ static int siena_probe_nic(struct efx_nic *efx) } efx_reado(efx, ®, FR_AZ_CS_DEBUG); - efx->net_dev->dev_id = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1; + efx->port_num = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1; efx_mcdi_init(efx); diff --git a/drivers/net/ethernet/sgi/Kconfig b/drivers/net/ethernet/sgi/Kconfig index c1c4bb868a3b..e832f46660c9 100644 --- a/drivers/net/ethernet/sgi/Kconfig +++ b/drivers/net/ethernet/sgi/Kconfig @@ -22,7 +22,6 @@ config SGI_IOC3_ETH bool "SGI IOC3 Ethernet" depends on PCI && SGI_IP27 select CRC32 - select NET_CORE select MII ---help--- If you have a network (Ethernet) card of this type, say Y and read diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c index 7ed08c32a9c5..ffa78432164d 100644 --- a/drivers/net/ethernet/sgi/ioc3-eth.c +++ b/drivers/net/ethernet/sgi/ioc3-eth.c @@ -1398,16 +1398,6 @@ static struct pci_driver ioc3_driver = { .remove = ioc3_remove_one, }; -static int __init ioc3_init_module(void) -{ - return pci_register_driver(&ioc3_driver); -} - -static void __exit ioc3_cleanup_module(void) -{ - pci_unregister_driver(&ioc3_driver); -} - static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev) { unsigned long data; @@ -1677,9 +1667,7 @@ static void ioc3_set_multicast_list(struct net_device *dev) netif_wake_queue(dev); /* Let us get going again. */ } +module_pci_driver(ioc3_driver); MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>"); MODULE_DESCRIPTION("SGI IOC3 Ethernet driver"); MODULE_LICENSE("GPL"); - -module_init(ioc3_init_module); -module_exit(ioc3_cleanup_module); diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c index 4bdbaad9932d..9f5f35e041ac 100644 --- a/drivers/net/ethernet/sgi/meth.c +++ b/drivers/net/ethernet/sgi/meth.c @@ -863,7 +863,6 @@ static int __exit meth_remove(struct platform_device *pdev) unregister_netdev(dev); free_netdev(dev); - platform_set_drvdata(pdev, NULL); return 0; } diff --git a/drivers/net/ethernet/silan/sc92031.c b/drivers/net/ethernet/silan/sc92031.c index 28f7268f1b88..5eb933c97bba 100644 --- a/drivers/net/ethernet/silan/sc92031.c +++ b/drivers/net/ethernet/silan/sc92031.c @@ -1578,19 +1578,7 @@ static struct pci_driver sc92031_pci_driver = { .resume = sc92031_resume, }; -static int __init sc92031_init(void) -{ - return pci_register_driver(&sc92031_pci_driver); -} - -static void __exit sc92031_exit(void) -{ - pci_unregister_driver(&sc92031_pci_driver); -} - -module_init(sc92031_init); -module_exit(sc92031_exit); - +module_pci_driver(sc92031_pci_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Cesar Eduardo Barros <cesarb@cesarb.net>"); MODULE_DESCRIPTION("Silan SC92031 PCI Fast Ethernet Adapter driver"); diff --git a/drivers/net/ethernet/sis/Kconfig b/drivers/net/ethernet/sis/Kconfig index f1135cc1bd48..68d052b09af1 100644 --- a/drivers/net/ethernet/sis/Kconfig +++ b/drivers/net/ethernet/sis/Kconfig @@ -22,7 +22,6 @@ config SIS900 tristate "SiS 900/7016 PCI Fast Ethernet Adapter support" depends on PCI select CRC32 - select NET_CORE select MII ---help--- This is a driver for the Fast Ethernet PCI network cards based on @@ -39,7 +38,6 @@ config SIS190 tristate "SiS190/SiS191 gigabit ethernet support" depends on PCI select CRC32 - select NET_CORE select MII ---help--- Say Y here if you have a SiS 190 PCI Fast Ethernet adapter or diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c index 9a9c379420d1..02df0894690d 100644 --- a/drivers/net/ethernet/sis/sis190.c +++ b/drivers/net/ethernet/sis/sis190.c @@ -1934,15 +1934,4 @@ static struct pci_driver sis190_pci_driver = { .remove = sis190_remove_one, }; -static int __init sis190_init_module(void) -{ - return pci_register_driver(&sis190_pci_driver); -} - -static void __exit sis190_cleanup_module(void) -{ - pci_unregister_driver(&sis190_pci_driver); -} - -module_init(sis190_init_module); -module_exit(sis190_cleanup_module); +module_pci_driver(sis190_pci_driver); diff --git a/drivers/net/ethernet/smsc/Kconfig b/drivers/net/ethernet/smsc/Kconfig index bb4c1674ff99..068fc44d37e1 100644 --- a/drivers/net/ethernet/smsc/Kconfig +++ b/drivers/net/ethernet/smsc/Kconfig @@ -37,7 +37,6 @@ config SMC9194 config SMC91X tristate "SMC 91C9x/91C1xxx support" select CRC32 - select NET_CORE select MII depends on (ARM || M32R || SUPERH || MIPS || BLACKFIN || \ MN10300 || COLDFIRE || ARM64) @@ -57,7 +56,6 @@ config PCMCIA_SMC91C92 tristate "SMC 91Cxx PCMCIA support" depends on PCMCIA select CRC32 - select NET_CORE select MII ---help--- Say Y here if you intend to attach an SMC 91Cxx compatible PCMCIA @@ -70,7 +68,6 @@ config EPIC100 tristate "SMC EtherPower II" depends on PCI select CRC32 - select NET_CORE select MII ---help--- This driver is for the SMC EtherPower II 9432 PCI Ethernet NIC, @@ -81,7 +78,6 @@ config EPIC100 config SMC911X tristate "SMSC LAN911[5678] support" select CRC32 - select NET_CORE select MII depends on (ARM || SUPERH || MN10300) ---help--- @@ -97,9 +93,8 @@ config SMC911X config SMSC911X tristate "SMSC LAN911x/LAN921x families embedded ethernet support" - depends on (ARM || SUPERH || BLACKFIN || MIPS || MN10300) + depends on HAS_IOMEM select CRC32 - select NET_CORE select MII select PHYLIB ---help--- diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c index 9dd842dbb859..345558fe7367 100644 --- a/drivers/net/ethernet/smsc/smc911x.c +++ b/drivers/net/ethernet/smsc/smc911x.c @@ -2087,7 +2087,6 @@ static int smc911x_drv_probe(struct platform_device *pdev) ndev->base_addr = res->start; ret = smc911x_probe(ndev); if (ret != 0) { - platform_set_drvdata(pdev, NULL); iounmap(addr); release_both: free_netdev(ndev); @@ -2113,7 +2112,6 @@ static int smc911x_drv_remove(struct platform_device *pdev) struct resource *res; DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__); - platform_set_drvdata(pdev, NULL); unregister_netdev(ndev); diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index dfbf978315df..cde13be7c7de 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c @@ -2299,7 +2299,6 @@ static int smc_drv_probe(struct platform_device *pdev) return 0; out_iounmap: - platform_set_drvdata(pdev, NULL); iounmap(addr); out_release_attrib: smc_release_attrib(pdev, ndev); @@ -2319,8 +2318,6 @@ static int smc_drv_remove(struct platform_device *pdev) struct smc_local *lp = netdev_priv(ndev); struct resource *res; - platform_set_drvdata(pdev, NULL); - unregister_netdev(ndev); free_irq(ndev->irq, ndev); diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index 3663b9e04a31..a1419211585b 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c @@ -2284,7 +2284,6 @@ static int smsc911x_drv_remove(struct platform_device *pdev) mdiobus_unregister(pdata->mii_bus); mdiobus_free(pdata->mii_bus); - platform_set_drvdata(pdev, NULL); unregister_netdev(dev); free_irq(dev->irq, dev); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, @@ -2539,7 +2538,6 @@ out_disable_resources: out_enable_resources_fail: smsc911x_free_resources(pdev); out_request_resources_fail: - platform_set_drvdata(pdev, NULL); iounmap(pdata->ioaddr); free_netdev(dev); out_release_io_1: diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index 43c1f3223322..6e52c0f74cd9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig @@ -1,7 +1,6 @@ config STMMAC_ETH tristate "STMicroelectronics 10/100/1000 Ethernet driver" depends on HAS_IOMEM && HAS_DMA - select NET_CORE select MII select PHYLIB select CRC32 diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 95176979b2d2..7eb8babed2cb 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -38,16 +38,6 @@ #include "descs.h" #include "mmc.h" -#undef CHIP_DEBUG_PRINT -/* Turn-on extra printk debug for MAC core, dma and descriptors */ -/* #define CHIP_DEBUG_PRINT */ - -#ifdef CHIP_DEBUG_PRINT -#define CHIP_DBG(fmt, args...) printk(fmt, ## args) -#else -#define CHIP_DBG(fmt, args...) do { } while (0) -#endif - /* Synopsys Core versions */ #define DWMAC_CORE_3_40 0x34 #define DWMAC_CORE_3_50 0x35 diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c index 7e05e8d0f1c2..cdd926832e27 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c @@ -91,8 +91,8 @@ static void dwmac1000_set_filter(struct net_device *dev, int id) unsigned int value = 0; unsigned int perfect_addr_number; - CHIP_DBG(KERN_INFO "%s: # mcasts %d, # unicast %d\n", - __func__, netdev_mc_count(dev), netdev_uc_count(dev)); + pr_debug("%s: # mcasts %d, # unicast %d\n", __func__, + netdev_mc_count(dev), netdev_uc_count(dev)); if (dev->flags & IFF_PROMISC) value = GMAC_FRAME_FILTER_PR; @@ -152,7 +152,7 @@ static void dwmac1000_set_filter(struct net_device *dev, int id) #endif writel(value, ioaddr + GMAC_FRAME_FILTER); - CHIP_DBG(KERN_INFO "\tFilter: 0x%08x\n\tHash: HI 0x%08x, LO 0x%08x\n", + pr_debug("\tFilter: 0x%08x\n\tHash: HI 0x%08x, LO 0x%08x\n", readl(ioaddr + GMAC_FRAME_FILTER), readl(ioaddr + GMAC_HASH_HIGH), readl(ioaddr + GMAC_HASH_LOW)); } @@ -162,18 +162,18 @@ static void dwmac1000_flow_ctrl(void __iomem *ioaddr, unsigned int duplex, { unsigned int flow = 0; - CHIP_DBG(KERN_DEBUG "GMAC Flow-Control:\n"); + pr_debug("GMAC Flow-Control:\n"); if (fc & FLOW_RX) { - CHIP_DBG(KERN_DEBUG "\tReceive Flow-Control ON\n"); + pr_debug("\tReceive Flow-Control ON\n"); flow |= GMAC_FLOW_CTRL_RFE; } if (fc & FLOW_TX) { - CHIP_DBG(KERN_DEBUG "\tTransmit Flow-Control ON\n"); + pr_debug("\tTransmit Flow-Control ON\n"); flow |= GMAC_FLOW_CTRL_TFE; } if (duplex) { - CHIP_DBG(KERN_DEBUG "\tduplex mode: PAUSE %d\n", pause_time); + pr_debug("\tduplex mode: PAUSE %d\n", pause_time); flow |= (pause_time << GMAC_FLOW_CTRL_PT_SHIFT); } @@ -185,11 +185,11 @@ static void dwmac1000_pmt(void __iomem *ioaddr, unsigned long mode) unsigned int pmt = 0; if (mode & WAKE_MAGIC) { - CHIP_DBG(KERN_DEBUG "GMAC: WOL Magic frame\n"); + pr_debug("GMAC: WOL Magic frame\n"); pmt |= power_down | magic_pkt_en; } if (mode & WAKE_UCAST) { - CHIP_DBG(KERN_DEBUG "GMAC: WOL on global unicast\n"); + pr_debug("GMAC: WOL on global unicast\n"); pmt |= global_unicast; } @@ -203,23 +203,13 @@ static int dwmac1000_irq_status(void __iomem *ioaddr, int ret = 0; /* Not used events (e.g. MMC interrupts) are not handled. */ - if ((intr_status & mmc_tx_irq)) { - CHIP_DBG(KERN_INFO "GMAC: MMC tx interrupt: 0x%08x\n", - readl(ioaddr + GMAC_MMC_TX_INTR)); + if ((intr_status & mmc_tx_irq)) x->mmc_tx_irq_n++; - } - if (unlikely(intr_status & mmc_rx_irq)) { - CHIP_DBG(KERN_INFO "GMAC: MMC rx interrupt: 0x%08x\n", - readl(ioaddr + GMAC_MMC_RX_INTR)); + if (unlikely(intr_status & mmc_rx_irq)) x->mmc_rx_irq_n++; - } - if (unlikely(intr_status & mmc_rx_csum_offload_irq)) { - CHIP_DBG(KERN_INFO "GMAC: MMC rx csum offload: 0x%08x\n", - readl(ioaddr + GMAC_MMC_RX_CSUM_OFFLOAD)); + if (unlikely(intr_status & mmc_rx_csum_offload_irq)) x->mmc_rx_csum_offload_irq_n++; - } if (unlikely(intr_status & pmt_irq)) { - CHIP_DBG(KERN_INFO "GMAC: received Magic frame\n"); /* clear the PMT bits 5 and 6 by reading the PMT status reg */ readl(ioaddr + GMAC_PMT); x->irq_receive_pmt_irq_n++; @@ -229,32 +219,22 @@ static int dwmac1000_irq_status(void __iomem *ioaddr, /* Clean LPI interrupt by reading the Reg 12 */ ret = readl(ioaddr + LPI_CTRL_STATUS); - if (ret & LPI_CTRL_STATUS_TLPIEN) { - CHIP_DBG(KERN_INFO "GMAC TX entered in LPI\n"); + if (ret & LPI_CTRL_STATUS_TLPIEN) x->irq_tx_path_in_lpi_mode_n++; - } - if (ret & LPI_CTRL_STATUS_TLPIEX) { - CHIP_DBG(KERN_INFO "GMAC TX exit from LPI\n"); + if (ret & LPI_CTRL_STATUS_TLPIEX) x->irq_tx_path_exit_lpi_mode_n++; - } - if (ret & LPI_CTRL_STATUS_RLPIEN) { - CHIP_DBG(KERN_INFO "GMAC RX entered in LPI\n"); + if (ret & LPI_CTRL_STATUS_RLPIEN) x->irq_rx_path_in_lpi_mode_n++; - } - if (ret & LPI_CTRL_STATUS_RLPIEX) { - CHIP_DBG(KERN_INFO "GMAC RX exit from LPI\n"); + if (ret & LPI_CTRL_STATUS_RLPIEX) x->irq_rx_path_exit_lpi_mode_n++; - } } if ((intr_status & pcs_ane_irq) || (intr_status & pcs_link_irq)) { - CHIP_DBG(KERN_INFO "GMAC PCS ANE IRQ\n"); readl(ioaddr + GMAC_AN_STATUS); x->irq_pcs_ane_n++; } if (intr_status & rgmii_irq) { u32 status = readl(ioaddr + GMAC_S_R_GMII); - CHIP_DBG(KERN_INFO "GMAC RGMII/SGMII interrupt\n"); x->irq_rgmii_n++; /* Save and dump the link status. */ @@ -271,11 +251,12 @@ static int dwmac1000_irq_status(void __iomem *ioaddr, x->pcs_speed = SPEED_10; x->pcs_link = 1; - pr_debug("Link is Up - %d/%s\n", (int)x->pcs_speed, + pr_debug("%s: Link is Up - %d/%s\n", __func__, + (int)x->pcs_speed, x->pcs_duplex ? "Full" : "Half"); } else { x->pcs_link = 0; - pr_debug("Link is Down\n"); + pr_debug("%s: Link is Down\n", __func__); } } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c index 2c431b616058..0c2058a69fd2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c @@ -116,7 +116,7 @@ static void dwmac1000_dma_operation_mode(void __iomem *ioaddr, int txmode, u32 csr6 = readl(ioaddr + DMA_CONTROL); if (txmode == SF_DMA_MODE) { - CHIP_DBG(KERN_DEBUG "GMAC: enable TX store and forward mode\n"); + pr_debug("GMAC: enable TX store and forward mode\n"); /* Transmit COE type 2 cannot be done in cut-through mode. */ csr6 |= DMA_CONTROL_TSF; /* Operating on second frame increase the performance @@ -124,8 +124,7 @@ static void dwmac1000_dma_operation_mode(void __iomem *ioaddr, int txmode, */ csr6 |= DMA_CONTROL_OSF; } else { - CHIP_DBG(KERN_DEBUG "GMAC: disabling TX SF (threshold %d)\n", - txmode); + pr_debug("GMAC: disabling TX SF (threshold %d)\n", txmode); csr6 &= ~DMA_CONTROL_TSF; csr6 &= DMA_CONTROL_TC_TX_MASK; /* Set the transmit threshold */ @@ -142,11 +141,10 @@ static void dwmac1000_dma_operation_mode(void __iomem *ioaddr, int txmode, } if (rxmode == SF_DMA_MODE) { - CHIP_DBG(KERN_DEBUG "GMAC: enable RX store and forward mode\n"); + pr_debug("GMAC: enable RX store and forward mode\n"); csr6 |= DMA_CONTROL_RSF; } else { - CHIP_DBG(KERN_DEBUG "GMAC: disable RX SF mode (threshold %d)\n", - rxmode); + pr_debug("GMAC: disable RX SF mode (threshold %d)\n", rxmode); csr6 &= ~DMA_CONTROL_RSF; csr6 &= DMA_CONTROL_TC_RX_MASK; if (rxmode <= 32) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c index 007bb2be3f10..5857d677dac1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c @@ -135,10 +135,6 @@ static void dwmac100_set_filter(struct net_device *dev, int id) } writel(value, ioaddr + MAC_CONTROL); - - CHIP_DBG(KERN_INFO "%s: Filter: 0x%08x Hash: HI 0x%08x, LO 0x%08x\n", - __func__, readl(ioaddr + MAC_CONTROL), - readl(ioaddr + MAC_HASH_HIGH), readl(ioaddr + MAC_HASH_LOW)); } static void dwmac100_flow_ctrl(void __iomem *ioaddr, unsigned int duplex, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c index 67551c154138..7d1dce9e7ffc 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c @@ -90,14 +90,14 @@ static void dwmac100_dump_dma_regs(void __iomem *ioaddr) { int i; - CHIP_DBG(KERN_DEBUG "DWMAC 100 DMA CSR\n"); + pr_debug("DWMAC 100 DMA CSR\n"); for (i = 0; i < 9; i++) pr_debug("\t CSR%d (offset 0x%x): 0x%08x\n", i, (DMA_BUS_MODE + i * 4), readl(ioaddr + DMA_BUS_MODE + i * 4)); - CHIP_DBG(KERN_DEBUG "\t CSR20 (offset 0x%x): 0x%08x\n", - DMA_CUR_TX_BUF_ADDR, readl(ioaddr + DMA_CUR_TX_BUF_ADDR)); - CHIP_DBG(KERN_DEBUG "\t CSR21 (offset 0x%x): 0x%08x\n", + + pr_debug("\tCSR20 (0x%x): 0x%08x, CSR21 (0x%x): 0x%08x\n", + DMA_CUR_TX_BUF_ADDR, readl(ioaddr + DMA_CUR_TX_BUF_ADDR), DMA_CUR_RX_BUF_ADDR, readl(ioaddr + DMA_CUR_RX_BUF_ADDR)); } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c index 491d7e930603..484e3cf9c414 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c @@ -24,13 +24,6 @@ #include "common.h" #include "dwmac_dma.h" -#undef DWMAC_DMA_DEBUG -#ifdef DWMAC_DMA_DEBUG -#define DWMAC_LIB_DBG(fmt, args...) printk(fmt, ## args) -#else -#define DWMAC_LIB_DBG(fmt, args...) do { } while (0) -#endif - #define GMAC_HI_REG_AE 0x80000000 /* CSR1 enables the transmit DMA to check for new descriptor */ @@ -85,24 +78,24 @@ static void show_tx_process_state(unsigned int status) switch (state) { case 0: - pr_info("- TX (Stopped): Reset or Stop command\n"); + pr_debug("- TX (Stopped): Reset or Stop command\n"); break; case 1: - pr_info("- TX (Running):Fetching the Tx desc\n"); + pr_debug("- TX (Running):Fetching the Tx desc\n"); break; case 2: - pr_info("- TX (Running): Waiting for end of tx\n"); + pr_debug("- TX (Running): Waiting for end of tx\n"); break; case 3: - pr_info("- TX (Running): Reading the data " + pr_debug("- TX (Running): Reading the data " "and queuing the data into the Tx buf\n"); break; case 6: - pr_info("- TX (Suspended): Tx Buff Underflow " + pr_debug("- TX (Suspended): Tx Buff Underflow " "or an unavailable Transmit descriptor\n"); break; case 7: - pr_info("- TX (Running): Closing Tx descriptor\n"); + pr_debug("- TX (Running): Closing Tx descriptor\n"); break; default: break; @@ -116,29 +109,29 @@ static void show_rx_process_state(unsigned int status) switch (state) { case 0: - pr_info("- RX (Stopped): Reset or Stop command\n"); + pr_debug("- RX (Stopped): Reset or Stop command\n"); break; case 1: - pr_info("- RX (Running): Fetching the Rx desc\n"); + pr_debug("- RX (Running): Fetching the Rx desc\n"); break; case 2: - pr_info("- RX (Running):Checking for end of pkt\n"); + pr_debug("- RX (Running):Checking for end of pkt\n"); break; case 3: - pr_info("- RX (Running): Waiting for Rx pkt\n"); + pr_debug("- RX (Running): Waiting for Rx pkt\n"); break; case 4: - pr_info("- RX (Suspended): Unavailable Rx buf\n"); + pr_debug("- RX (Suspended): Unavailable Rx buf\n"); break; case 5: - pr_info("- RX (Running): Closing Rx descriptor\n"); + pr_debug("- RX (Running): Closing Rx descriptor\n"); break; case 6: - pr_info("- RX(Running): Flushing the current frame" + pr_debug("- RX(Running): Flushing the current frame" " from the Rx buf\n"); break; case 7: - pr_info("- RX (Running): Queuing the Rx frame" + pr_debug("- RX (Running): Queuing the Rx frame" " from the Rx buf into memory\n"); break; default: @@ -154,51 +147,37 @@ int dwmac_dma_interrupt(void __iomem *ioaddr, /* read the status register (CSR5) */ u32 intr_status = readl(ioaddr + DMA_STATUS); - DWMAC_LIB_DBG(KERN_INFO "%s: [CSR5: 0x%08x]\n", __func__, intr_status); #ifdef DWMAC_DMA_DEBUG - /* It displays the DMA process states (CSR5 register) */ + /* Enable it to monitor DMA rx/tx status in case of critical problems */ + pr_debug("%s: [CSR5: 0x%08x]\n", __func__, intr_status); show_tx_process_state(intr_status); show_rx_process_state(intr_status); #endif /* ABNORMAL interrupts */ if (unlikely(intr_status & DMA_STATUS_AIS)) { - DWMAC_LIB_DBG(KERN_INFO "CSR5[15] DMA ABNORMAL IRQ: "); if (unlikely(intr_status & DMA_STATUS_UNF)) { - DWMAC_LIB_DBG(KERN_INFO "transmit underflow\n"); ret = tx_hard_error_bump_tc; x->tx_undeflow_irq++; } - if (unlikely(intr_status & DMA_STATUS_TJT)) { - DWMAC_LIB_DBG(KERN_INFO "transmit jabber\n"); + if (unlikely(intr_status & DMA_STATUS_TJT)) x->tx_jabber_irq++; - } - if (unlikely(intr_status & DMA_STATUS_OVF)) { - DWMAC_LIB_DBG(KERN_INFO "recv overflow\n"); + + if (unlikely(intr_status & DMA_STATUS_OVF)) x->rx_overflow_irq++; - } - if (unlikely(intr_status & DMA_STATUS_RU)) { - DWMAC_LIB_DBG(KERN_INFO "receive buffer unavailable\n"); + + if (unlikely(intr_status & DMA_STATUS_RU)) x->rx_buf_unav_irq++; - } - if (unlikely(intr_status & DMA_STATUS_RPS)) { - DWMAC_LIB_DBG(KERN_INFO "receive process stopped\n"); + if (unlikely(intr_status & DMA_STATUS_RPS)) x->rx_process_stopped_irq++; - } - if (unlikely(intr_status & DMA_STATUS_RWT)) { - DWMAC_LIB_DBG(KERN_INFO "receive watchdog\n"); + if (unlikely(intr_status & DMA_STATUS_RWT)) x->rx_watchdog_irq++; - } - if (unlikely(intr_status & DMA_STATUS_ETI)) { - DWMAC_LIB_DBG(KERN_INFO "transmit early interrupt\n"); + if (unlikely(intr_status & DMA_STATUS_ETI)) x->tx_early_irq++; - } if (unlikely(intr_status & DMA_STATUS_TPS)) { - DWMAC_LIB_DBG(KERN_INFO "transmit process stopped\n"); x->tx_process_stopped_irq++; ret = tx_hard_error; } if (unlikely(intr_status & DMA_STATUS_FBI)) { - DWMAC_LIB_DBG(KERN_INFO "fatal bus error\n"); x->fatal_bus_error_irq++; ret = tx_hard_error; } @@ -224,12 +203,11 @@ int dwmac_dma_interrupt(void __iomem *ioaddr, /* Optional hardware blocks, interrupts should be disabled */ if (unlikely(intr_status & (DMA_STATUS_GPI | DMA_STATUS_GMI | DMA_STATUS_GLI))) - pr_info("%s: unexpected status %08x\n", __func__, intr_status); + pr_warn("%s: unexpected status %08x\n", __func__, intr_status); /* Clear the interrupt by writing a logic 1 to the CSR5[15-0] */ writel((intr_status & 0x1ffff), ioaddr + DMA_STATUS); - DWMAC_LIB_DBG(KERN_INFO "\n\n"); return ret; } diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c index 0fbc8fafa706..7e6628a91514 100644 --- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c @@ -33,54 +33,40 @@ static int enh_desc_get_tx_status(void *data, struct stmmac_extra_stats *x, struct net_device_stats *stats = (struct net_device_stats *)data; if (unlikely(p->des01.etx.error_summary)) { - CHIP_DBG(KERN_ERR "GMAC TX error... 0x%08x\n", p->des01.etx); - if (unlikely(p->des01.etx.jabber_timeout)) { - CHIP_DBG(KERN_ERR "\tjabber_timeout error\n"); + if (unlikely(p->des01.etx.jabber_timeout)) x->tx_jabber++; - } if (unlikely(p->des01.etx.frame_flushed)) { - CHIP_DBG(KERN_ERR "\tframe_flushed error\n"); x->tx_frame_flushed++; dwmac_dma_flush_tx_fifo(ioaddr); } if (unlikely(p->des01.etx.loss_carrier)) { - CHIP_DBG(KERN_ERR "\tloss_carrier error\n"); x->tx_losscarrier++; stats->tx_carrier_errors++; } if (unlikely(p->des01.etx.no_carrier)) { - CHIP_DBG(KERN_ERR "\tno_carrier error\n"); x->tx_carrier++; stats->tx_carrier_errors++; } - if (unlikely(p->des01.etx.late_collision)) { - CHIP_DBG(KERN_ERR "\tlate_collision error\n"); + if (unlikely(p->des01.etx.late_collision)) stats->collisions += p->des01.etx.collision_count; - } - if (unlikely(p->des01.etx.excessive_collisions)) { - CHIP_DBG(KERN_ERR "\texcessive_collisions\n"); + + if (unlikely(p->des01.etx.excessive_collisions)) stats->collisions += p->des01.etx.collision_count; - } - if (unlikely(p->des01.etx.excessive_deferral)) { - CHIP_DBG(KERN_INFO "\texcessive tx_deferral\n"); + + if (unlikely(p->des01.etx.excessive_deferral)) x->tx_deferred++; - } if (unlikely(p->des01.etx.underflow_error)) { - CHIP_DBG(KERN_ERR "\tunderflow error\n"); dwmac_dma_flush_tx_fifo(ioaddr); x->tx_underflow++; } - if (unlikely(p->des01.etx.ip_header_error)) { - CHIP_DBG(KERN_ERR "\tTX IP header csum error\n"); + if (unlikely(p->des01.etx.ip_header_error)) x->tx_ip_header_error++; - } if (unlikely(p->des01.etx.payload_error)) { - CHIP_DBG(KERN_ERR "\tAddr/Payload csum error\n"); x->tx_payload_error++; dwmac_dma_flush_tx_fifo(ioaddr); } @@ -88,15 +74,12 @@ static int enh_desc_get_tx_status(void *data, struct stmmac_extra_stats *x, ret = -1; } - if (unlikely(p->des01.etx.deferred)) { - CHIP_DBG(KERN_INFO "GMAC TX status: tx deferred\n"); + if (unlikely(p->des01.etx.deferred)) x->tx_deferred++; - } + #ifdef STMMAC_VLAN_TAG_USED - if (p->des01.etx.vlan_frame) { - CHIP_DBG(KERN_INFO "GMAC TX status: VLAN frame\n"); + if (p->des01.etx.vlan_frame) x->tx_vlan++; - } #endif return ret; @@ -123,30 +106,20 @@ static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err) * 0 1 1 | COE bypassed.. no IPv4/6 frame * 0 1 0 | Reserved. */ - if (status == 0x0) { - CHIP_DBG(KERN_INFO "RX Des0 status: IEEE 802.3 Type frame.\n"); + if (status == 0x0) ret = llc_snap; - } else if (status == 0x4) { - CHIP_DBG(KERN_INFO "RX Des0 status: IPv4/6 No CSUM errorS.\n"); + else if (status == 0x4) ret = good_frame; - } else if (status == 0x5) { - CHIP_DBG(KERN_ERR "RX Des0 status: IPv4/6 Payload Error.\n"); + else if (status == 0x5) ret = csum_none; - } else if (status == 0x6) { - CHIP_DBG(KERN_ERR "RX Des0 status: IPv4/6 Header Error.\n"); + else if (status == 0x6) ret = csum_none; - } else if (status == 0x7) { - CHIP_DBG(KERN_ERR - "RX Des0 status: IPv4/6 Header and Payload Error.\n"); + else if (status == 0x7) ret = csum_none; - } else if (status == 0x1) { - CHIP_DBG(KERN_ERR - "RX Des0 status: IPv4/6 unsupported IP PAYLOAD.\n"); + else if (status == 0x1) ret = discard_frame; - } else if (status == 0x3) { - CHIP_DBG(KERN_ERR "RX Des0 status: No IPv4, IPv6 frame.\n"); + else if (status == 0x3) ret = discard_frame; - } return ret; } @@ -208,36 +181,26 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x, struct net_device_stats *stats = (struct net_device_stats *)data; if (unlikely(p->des01.erx.error_summary)) { - CHIP_DBG(KERN_ERR "GMAC RX Error Summary 0x%08x\n", - p->des01.erx); if (unlikely(p->des01.erx.descriptor_error)) { - CHIP_DBG(KERN_ERR "\tdescriptor error\n"); x->rx_desc++; stats->rx_length_errors++; } - if (unlikely(p->des01.erx.overflow_error)) { - CHIP_DBG(KERN_ERR "\toverflow error\n"); + if (unlikely(p->des01.erx.overflow_error)) x->rx_gmac_overflow++; - } if (unlikely(p->des01.erx.ipc_csum_error)) - CHIP_DBG(KERN_ERR "\tIPC Csum Error/Giant frame\n"); + pr_err("\tIPC Csum Error/Giant frame\n"); if (unlikely(p->des01.erx.late_collision)) { - CHIP_DBG(KERN_ERR "\tlate_collision error\n"); - stats->collisions++; stats->collisions++; } - if (unlikely(p->des01.erx.receive_watchdog)) { - CHIP_DBG(KERN_ERR "\treceive_watchdog error\n"); + if (unlikely(p->des01.erx.receive_watchdog)) x->rx_watchdog++; - } - if (unlikely(p->des01.erx.error_gmii)) { - CHIP_DBG(KERN_ERR "\tReceive Error\n"); + + if (unlikely(p->des01.erx.error_gmii)) x->rx_mii++; - } + if (unlikely(p->des01.erx.crc_error)) { - CHIP_DBG(KERN_ERR "\tCRC error\n"); x->rx_crc++; stats->rx_crc_errors++; } @@ -251,30 +214,24 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x, ret = enh_desc_coe_rdes0(p->des01.erx.ipc_csum_error, p->des01.erx.frame_type, p->des01.erx.rx_mac_addr); - if (unlikely(p->des01.erx.dribbling)) { - CHIP_DBG(KERN_ERR "GMAC RX: dribbling error\n"); + if (unlikely(p->des01.erx.dribbling)) x->dribbling_bit++; - } + if (unlikely(p->des01.erx.sa_filter_fail)) { - CHIP_DBG(KERN_ERR "GMAC RX : Source Address filter fail\n"); x->sa_rx_filter_fail++; ret = discard_frame; } if (unlikely(p->des01.erx.da_filter_fail)) { - CHIP_DBG(KERN_ERR "GMAC RX : Dest Address filter fail\n"); x->da_rx_filter_fail++; ret = discard_frame; } if (unlikely(p->des01.erx.length_error)) { - CHIP_DBG(KERN_ERR "GMAC RX: length_error error\n"); x->rx_length++; ret = discard_frame; } #ifdef STMMAC_VLAN_TAG_USED - if (p->des01.erx.vlan_tag) { - CHIP_DBG(KERN_INFO "GMAC RX: VLAN frame tagged\n"); + if (p->des01.erx.vlan_tag) x->rx_vlan++; - } #endif return ret; diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c index 11775b99afc5..35ad4f427ae2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c @@ -52,10 +52,8 @@ static int ndesc_get_tx_status(void *data, struct stmmac_extra_stats *x, ret = -1; } - if (p->des01.etx.vlan_frame) { - CHIP_DBG(KERN_INFO "GMAC TX status: VLAN frame\n"); + if (p->des01.etx.vlan_frame) x->tx_vlan++; - } if (unlikely(p->des01.tx.deferred)) x->tx_deferred++; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index e9eab29db7be..f2ccb36e8685 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -51,32 +51,6 @@ #include "stmmac_ptp.h" #include "stmmac.h" -#undef STMMAC_DEBUG -/*#define STMMAC_DEBUG*/ -#ifdef STMMAC_DEBUG -#define DBG(nlevel, klevel, fmt, args...) \ - ((void)(netif_msg_##nlevel(priv) && \ - printk(KERN_##klevel fmt, ## args))) -#else -#define DBG(nlevel, klevel, fmt, args...) do { } while (0) -#endif - -#undef STMMAC_RX_DEBUG -/*#define STMMAC_RX_DEBUG*/ -#ifdef STMMAC_RX_DEBUG -#define RX_DBG(fmt, args...) printk(fmt, ## args) -#else -#define RX_DBG(fmt, args...) do { } while (0) -#endif - -#undef STMMAC_XMIT_DEBUG -/*#define STMMAC_XMIT_DEBUG*/ -#ifdef STMMAC_XMIT_DEBUG -#define TX_DBG(fmt, args...) printk(fmt, ## args) -#else -#define TX_DBG(fmt, args...) do { } while (0) -#endif - #define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x) #define JUMBO_LEN 9000 @@ -214,19 +188,17 @@ static void stmmac_clk_csr_set(struct stmmac_priv *priv) } } -#if defined(STMMAC_XMIT_DEBUG) || defined(STMMAC_RX_DEBUG) static void print_pkt(unsigned char *buf, int len) { int j; - pr_info("len = %d byte, buf addr: 0x%p", len, buf); + pr_debug("len = %d byte, buf addr: 0x%p", len, buf); for (j = 0; j < len; j++) { if ((j % 16) == 0) - pr_info("\n %03x:", j); - pr_info(" %02x", buf[j]); + pr_debug("\n %03x:", j); + pr_debug(" %02x", buf[j]); } - pr_info("\n"); + pr_debug("\n"); } -#endif /* minimum number of free TX descriptors required to wake up TX process */ #define STMMAC_TX_THRESH(x) (x->dma_tx_size/4) @@ -696,9 +668,6 @@ static void stmmac_adjust_link(struct net_device *dev) if (phydev == NULL) return; - DBG(probe, DEBUG, "stmmac_adjust_link: called. address %d link %d\n", - phydev->addr, phydev->link); - spin_lock_irqsave(&priv->lock, flags); if (phydev->link) { @@ -773,8 +742,6 @@ static void stmmac_adjust_link(struct net_device *dev) priv->eee_enabled = stmmac_eee_init(priv); spin_unlock_irqrestore(&priv->lock, flags); - - DBG(probe, DEBUG, "stmmac_adjust_link: exiting\n"); } /** @@ -789,13 +756,13 @@ static void stmmac_check_pcs_mode(struct stmmac_priv *priv) int interface = priv->plat->interface; if (priv->dma_cap.pcs) { - if ((interface & PHY_INTERFACE_MODE_RGMII) || - (interface & PHY_INTERFACE_MODE_RGMII_ID) || - (interface & PHY_INTERFACE_MODE_RGMII_RXID) || - (interface & PHY_INTERFACE_MODE_RGMII_TXID)) { + if ((interface == PHY_INTERFACE_MODE_RGMII) || + (interface == PHY_INTERFACE_MODE_RGMII_ID) || + (interface == PHY_INTERFACE_MODE_RGMII_RXID) || + (interface == PHY_INTERFACE_MODE_RGMII_TXID)) { pr_debug("STMMAC: PCS RGMII support enable\n"); priv->pcs = STMMAC_PCS_RGMII; - } else if (interface & PHY_INTERFACE_MODE_SGMII) { + } else if (interface == PHY_INTERFACE_MODE_SGMII) { pr_debug("STMMAC: PCS SGMII support enable\n"); priv->pcs = STMMAC_PCS_SGMII; } @@ -1015,8 +982,9 @@ static void init_dma_desc_rings(struct net_device *dev) if (bfsize < BUF_SIZE_16KiB) bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz); - DBG(probe, INFO, "stmmac: txsize %d, rxsize %d, bfsize %d\n", - txsize, rxsize, bfsize); + if (netif_msg_probe(priv)) + pr_debug("%s: txsize %d, rxsize %d, bfsize %d\n", __func__, + txsize, rxsize, bfsize); if (priv->extend_desc) { priv->dma_erx = dma_alloc_coherent(priv->device, rxsize * @@ -1052,12 +1020,13 @@ static void init_dma_desc_rings(struct net_device *dev) GFP_KERNEL); priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *), GFP_KERNEL); - if (netif_msg_drv(priv)) + if (netif_msg_probe(priv)) { pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__, (u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy); - /* RX INITIALIZATION */ - DBG(probe, INFO, "stmmac: SKB addresses:\nskb\t\tskb data\tdma data\n"); + /* RX INITIALIZATION */ + pr_debug("\tSKB addresses:\nskb\t\tskb data\tdma data\n"); + } for (i = 0; i < rxsize; i++) { struct dma_desc *p; if (priv->extend_desc) @@ -1068,8 +1037,10 @@ static void init_dma_desc_rings(struct net_device *dev) if (stmmac_init_rx_buffers(priv, p, i)) break; - DBG(probe, INFO, "[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i], - priv->rx_skbuff[i]->data, priv->rx_skbuff_dma[i]); + if (netif_msg_probe(priv)) + pr_debug("[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i], + priv->rx_skbuff[i]->data, + (unsigned int)priv->rx_skbuff_dma[i]); } priv->cur_rx = 0; priv->dirty_rx = (unsigned int)(i - rxsize); @@ -1244,8 +1215,9 @@ static void stmmac_tx_clean(struct stmmac_priv *priv) stmmac_get_tx_hwtstamp(priv, entry, skb); } - TX_DBG("%s: curr %d, dirty %d\n", __func__, - priv->cur_tx, priv->dirty_tx); + if (netif_msg_tx_done(priv)) + pr_debug("%s: curr %d, dirty %d\n", __func__, + priv->cur_tx, priv->dirty_tx); if (likely(priv->tx_skbuff_dma[entry])) { dma_unmap_single(priv->device, @@ -1270,7 +1242,8 @@ static void stmmac_tx_clean(struct stmmac_priv *priv) netif_tx_lock(priv->dev); if (netif_queue_stopped(priv->dev) && stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv)) { - TX_DBG("%s: restart transmit\n", __func__); + if (netif_msg_tx_done(priv)) + pr_debug("%s: restart transmit\n", __func__); netif_wake_queue(priv->dev); } netif_tx_unlock(priv->dev); @@ -1579,7 +1552,7 @@ static int stmmac_open(struct net_device *dev) if (ret) { pr_err("%s: Cannot attach to PHY (error: %d)\n", __func__, ret); - goto open_error; + goto phy_error; } } @@ -1593,7 +1566,7 @@ static int stmmac_open(struct net_device *dev) ret = stmmac_init_dma_engine(priv); if (ret < 0) { pr_err("%s: DMA initialization failed\n", __func__); - goto open_error; + goto init_error; } /* Copy the MAC addr into the HW */ @@ -1612,7 +1585,7 @@ static int stmmac_open(struct net_device *dev) if (unlikely(ret < 0)) { pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n", __func__, dev->irq, ret); - goto open_error; + goto init_error; } /* Request the Wake IRQ in case of another line is used for WoL */ @@ -1622,7 +1595,7 @@ static int stmmac_open(struct net_device *dev) if (unlikely(ret < 0)) { pr_err("%s: ERROR: allocating the WoL IRQ %d (%d)\n", __func__, priv->wol_irq, ret); - goto open_error_wolirq; + goto wolirq_error; } } @@ -1633,7 +1606,7 @@ static int stmmac_open(struct net_device *dev) if (unlikely(ret < 0)) { pr_err("%s: ERROR: allocating the LPI IRQ %d (%d)\n", __func__, priv->lpi_irq, ret); - goto open_error_lpiirq; + goto lpiirq_error; } } @@ -1659,7 +1632,7 @@ static int stmmac_open(struct net_device *dev) pr_warn("%s: failed debugFS registration\n", __func__); #endif /* Start the ball rolling... */ - DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name); + pr_debug("%s: DMA RX/TX processes started...\n", dev->name); priv->hw->dma->start_tx(priv->ioaddr); priv->hw->dma->start_rx(priv->ioaddr); @@ -1691,17 +1664,17 @@ static int stmmac_open(struct net_device *dev) return 0; -open_error_lpiirq: +lpiirq_error: if (priv->wol_irq != dev->irq) free_irq(priv->wol_irq, dev); - -open_error_wolirq: +wolirq_error: free_irq(dev->irq, dev); -open_error: +init_error: + free_dma_desc_resources(priv); if (priv->phydev) phy_disconnect(priv->phydev); - +phy_error: clk_disable_unprepare(priv->stmmac_clk); return ret; @@ -1796,16 +1769,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) entry = priv->cur_tx % txsize; -#ifdef STMMAC_XMIT_DEBUG - if ((skb->len > ETH_FRAME_LEN) || nfrags) - pr_debug("%s: [entry %d]: skb addr %p len: %d nopagedlen: %d\n" - "\tn_frags: %d - ip_summed: %d - %s gso\n" - "\ttx_count_frames %d\n", __func__, entry, - skb, skb->len, nopaged_len, nfrags, skb->ip_summed, - !skb_is_gso(skb) ? "isn't" : "is", - priv->tx_count_frames); -#endif - csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL); if (priv->extend_desc) @@ -1815,12 +1778,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) first = desc; -#ifdef STMMAC_XMIT_DEBUG - if ((nfrags > 0) || (skb->len > ETH_FRAME_LEN)) - pr_debug("\tskb len: %d, nopaged_len: %d,\n" - "\t\tn_frags: %d, ip_summed: %d\n", - skb->len, nopaged_len, nfrags, skb->ip_summed); -#endif priv->tx_skbuff[entry] = skb; /* To program the descriptors according to the size of the frame */ @@ -1856,7 +1813,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) else desc = priv->dma_tx + entry; - TX_DBG("\t[entry %d] segment len: %d\n", entry, len); desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len, DMA_TO_DEVICE); priv->tx_skbuff_dma[entry] = desc->des2; @@ -1880,8 +1836,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) if (priv->tx_coal_frames > priv->tx_count_frames) { priv->hw->desc->clear_tx_ic(desc); priv->xstats.tx_reset_ic_bit++; - TX_DBG("\t[entry %d]: tx_count_frames %d\n", entry, - priv->tx_count_frames); mod_timer(&priv->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer)); } else @@ -1893,22 +1847,22 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) priv->cur_tx++; -#ifdef STMMAC_XMIT_DEBUG if (netif_msg_pktdata(priv)) { - pr_info("%s: curr %d dirty=%d entry=%d, first=%p, nfrags=%d", + pr_debug("%s: curr %d dirty=%d entry=%d, first=%p, nfrags=%d", __func__, (priv->cur_tx % txsize), (priv->dirty_tx % txsize), entry, first, nfrags); + if (priv->extend_desc) stmmac_display_ring((void *)priv->dma_etx, txsize, 1); else stmmac_display_ring((void *)priv->dma_tx, txsize, 0); - pr_info(">>> frame to be transmitted: "); + pr_debug(">>> frame to be transmitted: "); print_pkt(skb->data, skb->len); } -#endif if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) { - TX_DBG("%s: stop transmitted packets\n", __func__); + if (netif_msg_hw(priv)) + pr_debug("%s: stop transmitted packets\n", __func__); netif_stop_queue(dev); } @@ -1968,7 +1922,8 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv) priv->hw->ring->refill_desc3(priv, p); - RX_DBG(KERN_INFO "\trefill entry #%d\n", entry); + if (netif_msg_rx_status(priv)) + pr_debug("\trefill entry #%d\n", entry); } wmb(); priv->hw->desc->set_rx_owner(p); @@ -1991,15 +1946,13 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit) unsigned int count = 0; int coe = priv->plat->rx_coe; -#ifdef STMMAC_RX_DEBUG - if (netif_msg_hw(priv)) { - pr_debug(">>> stmmac_rx: descriptor ring:\n"); + if (netif_msg_rx_status(priv)) { + pr_debug("%s: descriptor ring:\n", __func__); if (priv->extend_desc) stmmac_display_ring((void *)priv->dma_erx, rxsize, 1); else stmmac_display_ring((void *)priv->dma_rx, rxsize, 0); } -#endif while (count < limit) { int status; struct dma_desc *p; @@ -2053,15 +2006,14 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit) */ if (unlikely(status != llc_snap)) frame_len -= ETH_FCS_LEN; -#ifdef STMMAC_RX_DEBUG - if (frame_len > ETH_FRAME_LEN) - pr_debug("\tRX frame size %d, COE status: %d\n", - frame_len, status); - if (netif_msg_hw(priv)) + if (netif_msg_rx_status(priv)) { pr_debug("\tdesc: %p [entry %d] buff=0x%x\n", p, entry, p->des2); -#endif + if (frame_len > ETH_FRAME_LEN) + pr_debug("\tframe size %d, COE: %d\n", + frame_len, status); + } skb = priv->rx_skbuff[entry]; if (unlikely(!skb)) { pr_err("%s: Inconsistent Rx descriptor chain\n", @@ -2078,12 +2030,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit) dma_unmap_single(priv->device, priv->rx_skbuff_dma[entry], priv->dma_buf_sz, DMA_FROM_DEVICE); -#ifdef STMMAC_RX_DEBUG + if (netif_msg_pktdata(priv)) { - pr_info(" frame received (%dbytes)", frame_len); + pr_debug("frame received (%dbytes)", frame_len); print_pkt(skb->data, frame_len); } -#endif + skb->protocol = eth_type_trans(skb, priv->dev); if (unlikely(!coe)) @@ -2562,9 +2514,6 @@ static int stmmac_hw_init(struct stmmac_priv *priv) /* Get and dump the chip ID */ priv->synopsys_id = stmmac_get_synopsys_id(priv); - /* To use alternate (extended) or normal descriptor structures */ - stmmac_selec_desc_mode(priv); - /* To use the chained or ring mode */ if (chain_mode) { priv->hw->chain = &chain_mode_ops; @@ -2599,6 +2548,9 @@ static int stmmac_hw_init(struct stmmac_priv *priv) } else pr_info(" No HW DMA feature register supported"); + /* To use alternate (extended) or normal descriptor structures */ + stmmac_selec_desc_mode(priv); + ret = priv->hw->mac->rx_ipc(priv->ioaddr); if (!ret) { pr_warn(" RX IPC Checksum Offload not configured.\n"); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index cc15039eaa47..fe7bc9903867 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -27,6 +27,9 @@ #include <linux/mii.h> #include <linux/phy.h> #include <linux/slab.h> +#include <linux/of.h> +#include <linux/of_gpio.h> + #include <asm/io.h> #include "stmmac.h" @@ -131,10 +134,46 @@ static int stmmac_mdio_reset(struct mii_bus *bus) struct net_device *ndev = bus->priv; struct stmmac_priv *priv = netdev_priv(ndev); unsigned int mii_address = priv->hw->mii.addr; + struct stmmac_mdio_bus_data *data = priv->plat->mdio_bus_data; + +#ifdef CONFIG_OF + if (priv->device->of_node) { + int reset_gpio, active_low; + + if (data->reset_gpio < 0) { + struct device_node *np = priv->device->of_node; + if (!np) + return 0; + + data->reset_gpio = of_get_named_gpio(np, + "snps,reset-gpio", 0); + if (data->reset_gpio < 0) + return 0; + + data->active_low = of_property_read_bool(np, + "snps,reset-active-low"); + of_property_read_u32_array(np, + "snps,reset-delays-us", data->delays, 3); + } + + reset_gpio = data->reset_gpio; + active_low = data->active_low; + + if (!gpio_request(reset_gpio, "mdio-reset")) { + gpio_direction_output(reset_gpio, active_low ? 1 : 0); + udelay(data->delays[0]); + gpio_set_value(reset_gpio, active_low ? 0 : 1); + udelay(data->delays[1]); + gpio_set_value(reset_gpio, active_low ? 1 : 0); + udelay(data->delays[2]); + gpio_free(reset_gpio); + } + } +#endif - if (priv->plat->mdio_bus_data->phy_reset) { + if (data->phy_reset) { pr_debug("stmmac_mdio_reset: calling phy_reset\n"); - priv->plat->mdio_bus_data->phy_reset(priv->plat->bsp_priv); + data->phy_reset(priv->plat->bsp_priv); } /* This is a workaround for problems with the STE101P PHY. @@ -172,6 +211,11 @@ int stmmac_mdio_register(struct net_device *ndev) else irqlist = priv->mii_irq; +#ifdef CONFIG_OF + if (priv->device->of_node) + mdio_bus_data->reset_gpio = -1; +#endif + new_bus->name = "stmmac"; new_bus->read = &stmmac_mdio_read; new_bus->write = &stmmac_mdio_write; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 1d3780f55ba2..03de76c7a177 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -34,12 +34,20 @@ static int stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) { struct device_node *np = pdev->dev.of_node; + struct stmmac_dma_cfg *dma_cfg; if (!np) return -ENODEV; *mac = of_get_mac_address(np); plat->interface = of_get_phy_mode(np); + + plat->bus_id = of_alias_get_id(np, "ethernet"); + if (plat->bus_id < 0) + plat->bus_id = 0; + + of_property_read_u32(np, "snps,phy-addr", &plat->phy_addr); + plat->mdio_bus_data = devm_kzalloc(&pdev->dev, sizeof(struct stmmac_mdio_bus_data), GFP_KERNEL); @@ -56,6 +64,22 @@ static int stmmac_probe_config_dt(struct platform_device *pdev, plat->pmt = 1; } + if (of_device_is_compatible(np, "snps,dwmac-3.610") || + of_device_is_compatible(np, "snps,dwmac-3.710")) { + plat->enh_desc = 1; + plat->bugged_jumbo = 1; + plat->force_sf_dma_mode = 1; + } + + dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), GFP_KERNEL); + if (!dma_cfg) + return -ENOMEM; + + plat->dma_cfg = dma_cfg; + of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl); + dma_cfg->fixed_burst = of_property_read_bool(np, "snps,fixed-burst"); + dma_cfg->mixed_burst = of_property_read_bool(np, "snps,mixed-burst"); + return 0; } #else @@ -92,8 +116,10 @@ static int stmmac_pltfr_probe(struct platform_device *pdev) if (IS_ERR(addr)) return PTR_ERR(addr); + plat_dat = pdev->dev.platform_data; if (pdev->dev.of_node) { - plat_dat = devm_kzalloc(&pdev->dev, + if (!plat_dat) + plat_dat = devm_kzalloc(&pdev->dev, sizeof(struct plat_stmmacenet_data), GFP_KERNEL); if (!plat_dat) { @@ -106,8 +132,6 @@ static int stmmac_pltfr_probe(struct platform_device *pdev) pr_err("%s: main dt probe failed", __func__); return ret; } - } else { - plat_dat = pdev->dev.platform_data; } /* Custom initialisation (if needed)*/ @@ -171,8 +195,6 @@ static int stmmac_pltfr_remove(struct platform_device *pdev) if (priv->plat->exit) priv->plat->exit(pdev); - platform_set_drvdata(pdev, NULL); - return ret; } @@ -230,7 +252,9 @@ static const struct dev_pm_ops stmmac_pltfr_pm_ops; static const struct of_device_id stmmac_dt_ids[] = { { .compatible = "st,spear600-gmac"}, + { .compatible = "snps,dwmac-3.610"}, { .compatible = "snps,dwmac-3.70a"}, + { .compatible = "snps,dwmac-3.710"}, { .compatible = "snps,dwmac"}, { /* sentinel */ } }; diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c index 4c682a3d0424..759441b29e53 100644 --- a/drivers/net/ethernet/sun/cassini.c +++ b/drivers/net/ethernet/sun/cassini.c @@ -808,44 +808,43 @@ static int cas_reset_mii_phy(struct cas *cp) return limit <= 0; } -static int cas_saturn_firmware_init(struct cas *cp) +static void cas_saturn_firmware_init(struct cas *cp) { const struct firmware *fw; const char fw_name[] = "sun/cassini.bin"; int err; if (PHY_NS_DP83065 != cp->phy_id) - return 0; + return; err = request_firmware(&fw, fw_name, &cp->pdev->dev); if (err) { pr_err("Failed to load firmware \"%s\"\n", fw_name); - return err; + return; } if (fw->size < 2) { pr_err("bogus length %zu in \"%s\"\n", fw->size, fw_name); - err = -EINVAL; goto out; } cp->fw_load_addr= fw->data[1] << 8 | fw->data[0]; cp->fw_size = fw->size - 2; cp->fw_data = vmalloc(cp->fw_size); - if (!cp->fw_data) { - err = -ENOMEM; + if (!cp->fw_data) goto out; - } memcpy(cp->fw_data, &fw->data[2], cp->fw_size); out: release_firmware(fw); - return err; } static void cas_saturn_firmware_load(struct cas *cp) { int i; + if (!cp->fw_data) + return; + cas_phy_powerdown(cp); /* expanded memory access mode */ @@ -5083,8 +5082,7 @@ static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (cas_check_invariants(cp)) goto err_out_iounmap; if (cp->cas_flags & CAS_FLAG_SATURN) - if (cas_saturn_firmware_init(cp)) - goto err_out_iounmap; + cas_saturn_firmware_init(cp); cp->init_block = (struct cas_init_block *) pci_alloc_consistent(pdev, sizeof(struct cas_init_block), diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index 95cff98d8a34..fa322409bff3 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -10108,7 +10108,7 @@ static int niu_of_probe(struct platform_device *op) goto err_out_iounmap; } - dev_set_drvdata(&op->dev, dev); + platform_set_drvdata(op, dev); niu_device_announce(np); @@ -10145,7 +10145,7 @@ err_out: static int niu_of_remove(struct platform_device *op) { - struct net_device *dev = dev_get_drvdata(&op->dev); + struct net_device *dev = platform_get_drvdata(op); if (dev) { struct niu *np = netdev_priv(dev); @@ -10175,7 +10175,6 @@ static int niu_of_remove(struct platform_device *op) niu_put_parent(np); free_netdev(dev); - dev_set_drvdata(&op->dev, NULL); } return 0; } diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c index 054975939a18..0d43fa9ff980 100644 --- a/drivers/net/ethernet/sun/sunbmac.c +++ b/drivers/net/ethernet/sun/sunbmac.c @@ -995,7 +995,6 @@ static void bigmac_set_multicast(struct net_device *dev) struct bigmac *bp = netdev_priv(dev); void __iomem *bregs = bp->bregs; struct netdev_hw_addr *ha; - int i; u32 tmp, crc; /* Disable the receiver. The bit self-clears when @@ -1017,10 +1016,7 @@ static void bigmac_set_multicast(struct net_device *dev) tmp |= BIGMAC_RXCFG_PMISC; sbus_writel(tmp, bregs + BMAC_RXCFG); } else { - u16 hash_table[4]; - - for (i = 0; i < 4; i++) - hash_table[i] = 0; + u16 hash_table[4] = { 0 }; netdev_for_each_mc_addr(ha, dev) { crc = ether_crc_le(6, ha->addr); diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c index 5f3f9d52757d..e62df2b81302 100644 --- a/drivers/net/ethernet/sun/sungem.c +++ b/drivers/net/ethernet/sun/sungem.c @@ -3028,15 +3028,4 @@ static struct pci_driver gem_driver = { #endif /* CONFIG_PM */ }; -static int __init gem_init(void) -{ - return pci_register_driver(&gem_driver); -} - -static void __exit gem_cleanup(void) -{ - pci_unregister_driver(&gem_driver); -} - -module_init(gem_init); -module_exit(gem_cleanup); +module_pci_driver(gem_driver); diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c index 436fa9d5a071..171f5b0809c4 100644 --- a/drivers/net/ethernet/sun/sunhme.c +++ b/drivers/net/ethernet/sun/sunhme.c @@ -2506,7 +2506,7 @@ static struct quattro *quattro_sbus_find(struct platform_device *child) struct quattro *qp; op = to_platform_device(parent); - qp = dev_get_drvdata(&op->dev); + qp = platform_get_drvdata(op); if (qp) return qp; @@ -2521,7 +2521,7 @@ static struct quattro *quattro_sbus_find(struct platform_device *child) qp->next = qfe_sbus_list; qfe_sbus_list = qp; - dev_set_drvdata(&op->dev, qp); + platform_set_drvdata(op, qp); } return qp; } diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c index 8182591bc187..b072f4dba033 100644 --- a/drivers/net/ethernet/sun/sunqe.c +++ b/drivers/net/ethernet/sun/sunqe.c @@ -767,7 +767,7 @@ static struct sunqec *get_qec(struct platform_device *child) struct platform_device *op = to_platform_device(child->dev.parent); struct sunqec *qecp; - qecp = dev_get_drvdata(&op->dev); + qecp = platform_get_drvdata(op); if (!qecp) { qecp = kzalloc(sizeof(struct sunqec), GFP_KERNEL); if (qecp) { @@ -801,7 +801,7 @@ static struct sunqec *get_qec(struct platform_device *child) goto fail; } - dev_set_drvdata(&op->dev, qecp); + platform_set_drvdata(op, qecp); qecp->next_module = root_qec_dev; root_qec_dev = qecp; @@ -902,7 +902,7 @@ static int qec_ether_init(struct platform_device *op) if (res) goto fail; - dev_set_drvdata(&op->dev, qe); + platform_set_drvdata(op, qe); printk(KERN_INFO "%s: qe channel[%d] %pM\n", dev->name, qe->channel, dev->dev_addr); @@ -934,7 +934,7 @@ static int qec_sbus_probe(struct platform_device *op) static int qec_sbus_remove(struct platform_device *op) { - struct sunqe *qp = dev_get_drvdata(&op->dev); + struct sunqe *qp = platform_get_drvdata(op); struct net_device *net_dev = qp->dev; unregister_netdev(net_dev); @@ -948,8 +948,6 @@ static int qec_sbus_remove(struct platform_device *op) free_netdev(net_dev); - dev_set_drvdata(&op->dev, NULL); - return 0; } diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index 1df0ff3839e8..3df56840a3b9 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c @@ -1239,6 +1239,8 @@ static int vnet_port_remove(struct vio_dev *vdev) dev_set_drvdata(&vdev->dev, NULL); kfree(port); + + unregister_netdev(vp->dev); } return 0; } diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index da4415d9dee6..05a1674e204f 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -1555,6 +1555,8 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, if (mac_addr) memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN); + slave_data->phy_if = of_get_phy_mode(slave_node); + if (data->dual_emac) { if (of_property_read_u32(slave_node, "dual_emac_res_vlan", &prop)) { @@ -1702,10 +1704,10 @@ static int cpsw_probe(struct platform_device *pdev) if (is_valid_ether_addr(data->slave_data[0].mac_addr)) { memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN); - pr_info("Detected MACID = %pM", priv->mac_addr); + pr_info("Detected MACID = %pM\n", priv->mac_addr); } else { eth_random_addr(priv->mac_addr); - pr_info("Random MACID = %pM", priv->mac_addr); + pr_info("Random MACID = %pM\n", priv->mac_addr); } memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN); @@ -1944,7 +1946,6 @@ static int cpsw_remove(struct platform_device *pdev) struct cpsw_priv *priv = netdev_priv(ndev); int i; - platform_set_drvdata(pdev, NULL); if (priv->data.dual_emac) unregister_netdev(cpsw_get_slave_ndev(priv, 1)); unregister_netdev(ndev); diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c index 053c84fd0853..031ebc81b50c 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.c +++ b/drivers/net/ethernet/ti/davinci_cpdma.c @@ -64,6 +64,7 @@ #define CPDMA_DESC_TO_PORT_EN BIT(20) #define CPDMA_TO_PORT_SHIFT 16 #define CPDMA_DESC_PORT_MASK (BIT(18) | BIT(17) | BIT(16)) +#define CPDMA_DESC_CRC_LEN 4 #define CPDMA_TEARDOWN_VALUE 0xfffffffc @@ -805,6 +806,10 @@ static int __cpdma_chan_process(struct cpdma_chan *chan) status = -EBUSY; goto unlock_ret; } + + if (status & CPDMA_DESC_PASS_CRC) + outlen -= CPDMA_DESC_CRC_LEN; + status = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE | CPDMA_DESC_PORT_MASK); diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index 860e15ddfbcb..07b176bcf929 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -1532,7 +1532,7 @@ static int emac_dev_open(struct net_device *ndev) struct device *emac_dev = &ndev->dev; u32 cnt; struct resource *res; - int q, m, ret; + int ret; int i = 0; int k = 0; struct emac_priv *priv = netdev_priv(ndev); @@ -1567,8 +1567,9 @@ static int emac_dev_open(struct net_device *ndev) while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) { for (i = res->start; i <= res->end; i++) { - if (request_irq(i, emac_irq, IRQF_DISABLED, - ndev->name, ndev)) + if (devm_request_irq(&priv->pdev->dev, i, emac_irq, + IRQF_DISABLED, + ndev->name, ndev)) goto rollback; } k++; @@ -1641,15 +1642,7 @@ static int emac_dev_open(struct net_device *ndev) rollback: - dev_err(emac_dev, "DaVinci EMAC: request_irq() failed"); - - for (q = k; k >= 0; k--) { - for (m = i; m >= res->start; m--) - free_irq(m, ndev); - res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k-1); - m = res->end; - } - + dev_err(emac_dev, "DaVinci EMAC: devm_request_irq() failed"); ret = -EBUSY; err: pm_runtime_put(&priv->pdev->dev); @@ -1667,9 +1660,6 @@ err: */ static int emac_dev_stop(struct net_device *ndev) { - struct resource *res; - int i = 0; - int irq_num; struct emac_priv *priv = netdev_priv(ndev); struct device *emac_dev = &ndev->dev; @@ -1685,13 +1675,6 @@ static int emac_dev_stop(struct net_device *ndev) if (priv->phydev) phy_disconnect(priv->phydev); - /* Free IRQ */ - while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, i))) { - for (irq_num = res->start; irq_num <= res->end; irq_num++) - free_irq(irq_num, priv->ndev); - i++; - } - if (netif_msg_drv(priv)) dev_notice(emac_dev, "DaVinci EMAC: %s stopped\n", ndev->name); @@ -1771,29 +1754,22 @@ static const struct net_device_ops emac_netdev_ops = { #endif }; -#ifdef CONFIG_OF -static struct emac_platform_data - *davinci_emac_of_get_pdata(struct platform_device *pdev, - struct emac_priv *priv) +static struct emac_platform_data * +davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv) { struct device_node *np; struct emac_platform_data *pdata = NULL; const u8 *mac_addr; - u32 data; - int ret; - pdata = pdev->dev.platform_data; - if (!pdata) { - pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); - if (!pdata) - goto nodata; - } + if (!IS_ENABLED(CONFIG_OF) || !pdev->dev.of_node) + return pdev->dev.platform_data; + + pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return NULL; np = pdev->dev.of_node; - if (!np) - goto nodata; - else - pdata->version = EMAC_VERSION_2; + pdata->version = EMAC_VERSION_2; if (!is_valid_ether_addr(pdata->mac_addr)) { mac_addr = of_get_mac_address(np); @@ -1801,47 +1777,31 @@ static struct emac_platform_data memcpy(pdata->mac_addr, mac_addr, ETH_ALEN); } - ret = of_property_read_u32(np, "ti,davinci-ctrl-reg-offset", &data); - if (!ret) - pdata->ctrl_reg_offset = data; + of_property_read_u32(np, "ti,davinci-ctrl-reg-offset", + &pdata->ctrl_reg_offset); - ret = of_property_read_u32(np, "ti,davinci-ctrl-mod-reg-offset", - &data); - if (!ret) - pdata->ctrl_mod_reg_offset = data; + of_property_read_u32(np, "ti,davinci-ctrl-mod-reg-offset", + &pdata->ctrl_mod_reg_offset); - ret = of_property_read_u32(np, "ti,davinci-ctrl-ram-offset", &data); - if (!ret) - pdata->ctrl_ram_offset = data; + of_property_read_u32(np, "ti,davinci-ctrl-ram-offset", + &pdata->ctrl_ram_offset); - ret = of_property_read_u32(np, "ti,davinci-ctrl-ram-size", &data); - if (!ret) - pdata->ctrl_ram_size = data; + of_property_read_u32(np, "ti,davinci-ctrl-ram-size", + &pdata->ctrl_ram_size); - ret = of_property_read_u32(np, "ti,davinci-rmii-en", &data); - if (!ret) - pdata->rmii_en = data; + of_property_read_u8(np, "ti,davinci-rmii-en", &pdata->rmii_en); - ret = of_property_read_u32(np, "ti,davinci-no-bd-ram", &data); - if (!ret) - pdata->no_bd_ram = data; + pdata->no_bd_ram = of_property_read_bool(np, "ti,davinci-no-bd-ram"); priv->phy_node = of_parse_phandle(np, "phy-handle", 0); if (!priv->phy_node) pdata->phy_id = ""; pdev->dev.platform_data = pdata; -nodata: + return pdata; } -#else -static struct emac_platform_data - *davinci_emac_of_get_pdata(struct platform_device *pdev, - struct emac_priv *priv) -{ - return pdev->dev.platform_data; -} -#endif + /** * davinci_emac_probe - EMAC device probe * @pdev: The DaVinci EMAC device that we are removing @@ -1856,7 +1816,7 @@ static int davinci_emac_probe(struct platform_device *pdev) struct resource *res; struct net_device *ndev; struct emac_priv *priv; - unsigned long size, hw_ram_addr; + unsigned long hw_ram_addr; struct emac_platform_data *pdata; struct device *emac_dev; struct cpdma_params dma_params; @@ -1907,25 +1867,10 @@ static int davinci_emac_probe(struct platform_device *pdev) emac_dev = &ndev->dev; /* Get EMAC platform data */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(&pdev->dev,"error getting res\n"); - rc = -ENOENT; - goto no_pdata; - } - priv->emac_base_phys = res->start + pdata->ctrl_reg_offset; - size = resource_size(res); - if (!devm_request_mem_region(&pdev->dev, res->start, - size, ndev->name)) { - dev_err(&pdev->dev, "failed request_mem_region() for regs\n"); - rc = -ENXIO; - goto no_pdata; - } - - priv->remap_addr = devm_ioremap(&pdev->dev, res->start, size); - if (!priv->remap_addr) { - dev_err(&pdev->dev, "unable to map IO\n"); - rc = -ENOMEM; + priv->remap_addr = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->remap_addr)) { + rc = PTR_ERR(priv->remap_addr); goto no_pdata; } priv->emac_base = priv->remap_addr + pdata->ctrl_reg_offset; @@ -2037,8 +1982,6 @@ static int davinci_emac_remove(struct platform_device *pdev) dev_notice(&ndev->dev, "DaVinci EMAC: davinci_emac_remove()\n"); - platform_set_drvdata(pdev, NULL); - if (priv->txchan) cpdma_chan_destroy(priv->txchan); if (priv->rxchan) @@ -2078,11 +2021,13 @@ static const struct dev_pm_ops davinci_emac_pm_ops = { .resume = davinci_emac_resume, }; +#if IS_ENABLED(CONFIG_OF) static const struct of_device_id davinci_emac_of_match[] = { {.compatible = "ti,davinci-dm6467-emac", }, {}, }; MODULE_DEVICE_TABLE(of, davinci_emac_of_match); +#endif /* davinci_emac_driver: EMAC platform driver structure */ static struct platform_driver davinci_emac_driver = { diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c index ce7c4991e41c..16ddfc348062 100644 --- a/drivers/net/ethernet/ti/davinci_mdio.c +++ b/drivers/net/ethernet/ti/davinci_mdio.c @@ -292,6 +292,7 @@ static int davinci_mdio_write(struct mii_bus *bus, int phy_id, return 0; } +#if IS_ENABLED(CONFIG_OF) static int davinci_mdio_probe_dt(struct mdio_platform_data *data, struct platform_device *pdev) { @@ -309,7 +310,7 @@ static int davinci_mdio_probe_dt(struct mdio_platform_data *data, return 0; } - +#endif static int davinci_mdio_probe(struct platform_device *pdev) { @@ -487,11 +488,13 @@ static const struct dev_pm_ops davinci_mdio_pm_ops = { .resume_early = davinci_mdio_resume, }; +#if IS_ENABLED(CONFIG_OF) static const struct of_device_id davinci_mdio_of_mtable[] = { { .compatible = "ti,davinci_mdio", }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, davinci_mdio_of_mtable); +#endif static struct platform_driver davinci_mdio_driver = { .driver = { diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c index 60c400f6d01f..591437e59b90 100644 --- a/drivers/net/ethernet/ti/tlan.c +++ b/drivers/net/ethernet/ti/tlan.c @@ -372,7 +372,7 @@ static int tlan_resume(struct pci_dev *pdev) pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); - pci_enable_wake(pdev, 0, 0); + pci_enable_wake(pdev, PCI_D0, 0); netif_device_attach(dev); if (netif_running(dev)) @@ -533,7 +533,6 @@ static int tlan_probe1(struct pci_dev *pdev, long ioaddr, int irq, int rev, /* This is a hack. We need to know which board structure * is suited for this adapter */ device_id = inw(ioaddr + EISA_ID2); - priv->is_eisa = 1; if (device_id == 0x20F1) { priv->adapter = &board_info[13]; /* NetFlex-3/E */ priv->adapter_rev = 23; /* TLAN 2.3 */ diff --git a/drivers/net/ethernet/ti/tlan.h b/drivers/net/ethernet/ti/tlan.h index 5fc98a8e4889..2eb33a250788 100644 --- a/drivers/net/ethernet/ti/tlan.h +++ b/drivers/net/ethernet/ti/tlan.h @@ -207,7 +207,6 @@ struct tlan_priv { u8 tlan_full_duplex; spinlock_t lock; u8 link; - u8 is_eisa; struct work_struct tlan_tqueue; u8 neg_be_verbose; }; diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c index fe256094db35..a971b9cca564 100644 --- a/drivers/net/ethernet/toshiba/tc35815.c +++ b/drivers/net/ethernet/toshiba/tc35815.c @@ -2209,18 +2209,6 @@ MODULE_PARM_DESC(speed, "0:auto, 10:10Mbps, 100:100Mbps"); module_param_named(duplex, options.duplex, int, 0); MODULE_PARM_DESC(duplex, "0:auto, 1:half, 2:full"); -static int __init tc35815_init_module(void) -{ - return pci_register_driver(&tc35815_pci_driver); -} - -static void __exit tc35815_cleanup_module(void) -{ - pci_unregister_driver(&tc35815_pci_driver); -} - -module_init(tc35815_init_module); -module_exit(tc35815_cleanup_module); - +module_pci_driver(tc35815_pci_driver); MODULE_DESCRIPTION("TOSHIBA TC35815 PCI 10M/100M Ethernet driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c index 3c69a0460832..01bdc6ca0755 100644 --- a/drivers/net/ethernet/tundra/tsi108_eth.c +++ b/drivers/net/ethernet/tundra/tsi108_eth.c @@ -1682,7 +1682,6 @@ static int tsi108_ether_remove(struct platform_device *pdev) unregister_netdev(dev); tsi108_stop_ethernet(dev); - platform_set_drvdata(pdev, NULL); iounmap(priv->regs); iounmap(priv->phyregs); free_netdev(dev); diff --git a/drivers/net/ethernet/via/Kconfig b/drivers/net/ethernet/via/Kconfig index 68a9ba66feba..8a049a2b4474 100644 --- a/drivers/net/ethernet/via/Kconfig +++ b/drivers/net/ethernet/via/Kconfig @@ -5,7 +5,6 @@ config NET_VENDOR_VIA bool "VIA devices" default y - depends on PCI ---help--- If you have a network (Ethernet) card belonging to this class, say Y and read the Ethernet-HOWTO, available from @@ -22,7 +21,6 @@ config VIA_RHINE tristate "VIA Rhine support" depends on PCI select CRC32 - select NET_CORE select MII ---help--- If you have a VIA "Rhine" based network card (Rhine-I (VT86C100A), @@ -45,10 +43,9 @@ config VIA_RHINE_MMIO config VIA_VELOCITY tristate "VIA Velocity support" - depends on PCI + depends on (PCI || USE_OF) select CRC32 select CRC_CCITT - select NET_CORE select MII ---help--- If you have a VIA "Velocity" based network card say Y here. diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c index ca98acabf1b4..b75eb9e0e867 100644 --- a/drivers/net/ethernet/via/via-rhine.c +++ b/drivers/net/ethernet/via/via-rhine.c @@ -1171,7 +1171,11 @@ static void alloc_rbufs(struct net_device *dev) rp->rx_skbuff_dma[i] = pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz, PCI_DMA_FROMDEVICE); - + if (dma_mapping_error(&rp->pdev->dev, rp->rx_skbuff_dma[i])) { + rp->rx_skbuff_dma[i] = 0; + dev_kfree_skb(skb); + break; + } rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]); rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn); } @@ -1687,6 +1691,12 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb, rp->tx_skbuff_dma[entry] = pci_map_single(rp->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); + if (dma_mapping_error(&rp->pdev->dev, rp->tx_skbuff_dma[entry])) { + dev_kfree_skb(skb); + rp->tx_skbuff_dma[entry] = 0; + dev->stats.tx_dropped++; + return NETDEV_TX_OK; + } rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]); } @@ -1961,6 +1971,11 @@ static int rhine_rx(struct net_device *dev, int limit) pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz, PCI_DMA_FROMDEVICE); + if (dma_mapping_error(&rp->pdev->dev, rp->rx_skbuff_dma[entry])) { + dev_kfree_skb(skb); + rp->rx_skbuff_dma[entry] = 0; + break; + } rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]); } rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn); diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c index fb6248956ee2..1d6dc41f755d 100644 --- a/drivers/net/ethernet/via/via-velocity.c +++ b/drivers/net/ethernet/via/via-velocity.c @@ -46,6 +46,7 @@ #include <linux/types.h> #include <linux/bitops.h> #include <linux/init.h> +#include <linux/dma-mapping.h> #include <linux/mm.h> #include <linux/errno.h> #include <linux/ioport.h> @@ -64,7 +65,11 @@ #include <linux/if.h> #include <linux/uaccess.h> #include <linux/proc_fs.h> +#include <linux/of_address.h> +#include <linux/of_device.h> +#include <linux/of_irq.h> #include <linux/inetdevice.h> +#include <linux/platform_device.h> #include <linux/reboot.h> #include <linux/ethtool.h> #include <linux/mii.h> @@ -79,10 +84,24 @@ #include "via-velocity.h" +enum velocity_bus_type { + BUS_PCI, + BUS_PLATFORM, +}; static int velocity_nics; static int msglevel = MSG_LEVEL_INFO; +static void velocity_set_power_state(struct velocity_info *vptr, char state) +{ + void *addr = vptr->mac_regs; + + if (vptr->pdev) + pci_set_power_state(vptr->pdev, state); + else + writeb(state, addr + 0x154); +} + /** * mac_get_cam_mask - Read a CAM mask * @regs: register block for this velocity @@ -361,12 +380,23 @@ static struct velocity_info_tbl chip_info_table[] = { * Describe the PCI device identifiers that we support in this * device driver. Used for hotplug autoloading. */ -static DEFINE_PCI_DEVICE_TABLE(velocity_id_table) = { + +static DEFINE_PCI_DEVICE_TABLE(velocity_pci_id_table) = { { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_612X) }, { } }; -MODULE_DEVICE_TABLE(pci, velocity_id_table); +MODULE_DEVICE_TABLE(pci, velocity_pci_id_table); + +/** + * Describe the OF device identifiers that we support in this + * device driver. Used for devicetree nodes. + */ +static struct of_device_id velocity_of_ids[] = { + { .compatible = "via,velocity-vt6110", .data = &chip_info_table[0] }, + { /* Sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, velocity_of_ids); /** * get_chip_name - identifier to name @@ -385,29 +415,6 @@ static const char *get_chip_name(enum chip_type chip_id) } /** - * velocity_remove1 - device unplug - * @pdev: PCI device being removed - * - * Device unload callback. Called on an unplug or on module - * unload for each active device that is present. Disconnects - * the device from the network layer and frees all the resources - */ -static void velocity_remove1(struct pci_dev *pdev) -{ - struct net_device *dev = pci_get_drvdata(pdev); - struct velocity_info *vptr = netdev_priv(dev); - - unregister_netdev(dev); - iounmap(vptr->mac_regs); - pci_release_regions(pdev); - pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); - free_netdev(dev); - - velocity_nics--; -} - -/** * velocity_set_int_opt - parser for integer options * @opt: pointer to option value * @val: value the user requested (or -1 for default) @@ -998,9 +1005,9 @@ static void velocity_print_link_status(struct velocity_info *vptr) { if (vptr->mii_status & VELOCITY_LINK_FAIL) { - VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: failed to detect cable link\n", vptr->dev->name); + VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: failed to detect cable link\n", vptr->netdev->name); } else if (vptr->options.spd_dpx == SPD_DPX_AUTO) { - VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link auto-negotiation", vptr->dev->name); + VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link auto-negotiation", vptr->netdev->name); if (vptr->mii_status & VELOCITY_SPEED_1000) VELOCITY_PRT(MSG_LEVEL_INFO, " speed 1000M bps"); @@ -1014,7 +1021,7 @@ static void velocity_print_link_status(struct velocity_info *vptr) else VELOCITY_PRT(MSG_LEVEL_INFO, " half duplex\n"); } else { - VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link forced", vptr->dev->name); + VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link forced", vptr->netdev->name); switch (vptr->options.spd_dpx) { case SPD_DPX_1000_FULL: VELOCITY_PRT(MSG_LEVEL_INFO, " speed 1000M bps full duplex\n"); @@ -1180,6 +1187,17 @@ static void mii_init(struct velocity_info *vptr, u32 mii_status) u16 BMCR; switch (PHYID_GET_PHY_ID(vptr->phy_id)) { + case PHYID_ICPLUS_IP101A: + MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), + MII_ADVERTISE, vptr->mac_regs); + if (vptr->mii_status & VELOCITY_DUPLEX_FULL) + MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, + vptr->mac_regs); + else + MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, + vptr->mac_regs); + MII_REG_BITS_ON(PLED_LALBE, MII_TPISTATUS, vptr->mac_regs); + break; case PHYID_CICADA_CS8201: /* * Reset to hardware default @@ -1311,6 +1329,7 @@ static void velocity_init_registers(struct velocity_info *vptr, enum velocity_init_type type) { struct mac_regs __iomem *regs = vptr->mac_regs; + struct net_device *netdev = vptr->netdev; int i, mii_status; mac_wol_reset(regs); @@ -1319,7 +1338,7 @@ static void velocity_init_registers(struct velocity_info *vptr, case VELOCITY_INIT_RESET: case VELOCITY_INIT_WOL: - netif_stop_queue(vptr->dev); + netif_stop_queue(netdev); /* * Reset RX to prevent RX pointer not on the 4X location @@ -1332,7 +1351,7 @@ static void velocity_init_registers(struct velocity_info *vptr, if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) { velocity_print_link_status(vptr); if (!(vptr->mii_status & VELOCITY_LINK_FAIL)) - netif_wake_queue(vptr->dev); + netif_wake_queue(netdev); } enable_flow_control_ability(vptr); @@ -1352,9 +1371,11 @@ static void velocity_init_registers(struct velocity_info *vptr, velocity_soft_reset(vptr); mdelay(5); - mac_eeprom_reload(regs); - for (i = 0; i < 6; i++) - writeb(vptr->dev->dev_addr[i], &(regs->PAR[i])); + if (!vptr->no_eeprom) { + mac_eeprom_reload(regs); + for (i = 0; i < 6; i++) + writeb(netdev->dev_addr[i], regs->PAR + i); + } /* * clear Pre_ACPI bit. @@ -1377,7 +1398,7 @@ static void velocity_init_registers(struct velocity_info *vptr, /* * Set packet filter: Receive directed and broadcast address */ - velocity_set_multi(vptr->dev); + velocity_set_multi(netdev); /* * Enable MII auto-polling @@ -1404,14 +1425,14 @@ static void velocity_init_registers(struct velocity_info *vptr, writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT), ®s->CR0Set); mii_status = velocity_get_opt_media_mode(vptr); - netif_stop_queue(vptr->dev); + netif_stop_queue(netdev); mii_init(vptr, mii_status); if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) { velocity_print_link_status(vptr); if (!(vptr->mii_status & VELOCITY_LINK_FAIL)) - netif_wake_queue(vptr->dev); + netif_wake_queue(netdev); } enable_flow_control_ability(vptr); @@ -1459,7 +1480,6 @@ static int velocity_init_dma_rings(struct velocity_info *vptr) struct velocity_opt *opt = &vptr->options; const unsigned int rx_ring_size = opt->numrx * sizeof(struct rx_desc); const unsigned int tx_ring_size = opt->numtx * sizeof(struct tx_desc); - struct pci_dev *pdev = vptr->pdev; dma_addr_t pool_dma; void *pool; unsigned int i; @@ -1467,14 +1487,14 @@ static int velocity_init_dma_rings(struct velocity_info *vptr) /* * Allocate all RD/TD rings a single pool. * - * pci_alloc_consistent() fulfills the requirement for 64 bytes + * dma_alloc_coherent() fulfills the requirement for 64 bytes * alignment */ - pool = pci_alloc_consistent(pdev, tx_ring_size * vptr->tx.numq + - rx_ring_size, &pool_dma); + pool = dma_alloc_coherent(vptr->dev, tx_ring_size * vptr->tx.numq + + rx_ring_size, &pool_dma, GFP_ATOMIC); if (!pool) { - dev_err(&pdev->dev, "%s : DMA memory allocation failed.\n", - vptr->dev->name); + dev_err(vptr->dev, "%s : DMA memory allocation failed.\n", + vptr->netdev->name); return -ENOMEM; } @@ -1514,7 +1534,7 @@ static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx) struct rx_desc *rd = &(vptr->rx.ring[idx]); struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]); - rd_info->skb = netdev_alloc_skb(vptr->dev, vptr->rx.buf_sz + 64); + rd_info->skb = netdev_alloc_skb(vptr->netdev, vptr->rx.buf_sz + 64); if (rd_info->skb == NULL) return -ENOMEM; @@ -1524,8 +1544,8 @@ static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx) */ skb_reserve(rd_info->skb, 64 - ((unsigned long) rd_info->skb->data & 63)); - rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data, - vptr->rx.buf_sz, PCI_DMA_FROMDEVICE); + rd_info->skb_dma = dma_map_single(vptr->dev, rd_info->skb->data, + vptr->rx.buf_sz, DMA_FROM_DEVICE); /* * Fill in the descriptor to match @@ -1588,8 +1608,8 @@ static void velocity_free_rd_ring(struct velocity_info *vptr) if (!rd_info->skb) continue; - pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz, - PCI_DMA_FROMDEVICE); + dma_unmap_single(vptr->dev, rd_info->skb_dma, vptr->rx.buf_sz, + DMA_FROM_DEVICE); rd_info->skb_dma = 0; dev_kfree_skb(rd_info->skb); @@ -1620,7 +1640,7 @@ static int velocity_init_rd_ring(struct velocity_info *vptr) if (velocity_rx_refill(vptr) != vptr->options.numrx) { VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR - "%s: failed to allocate RX buffer.\n", vptr->dev->name); + "%s: failed to allocate RX buffer.\n", vptr->netdev->name); velocity_free_rd_ring(vptr); goto out; } @@ -1670,7 +1690,7 @@ static void velocity_free_dma_rings(struct velocity_info *vptr) const int size = vptr->options.numrx * sizeof(struct rx_desc) + vptr->options.numtx * sizeof(struct tx_desc) * vptr->tx.numq; - pci_free_consistent(vptr->pdev, size, vptr->rx.ring, vptr->rx.pool_dma); + dma_free_coherent(vptr->dev, size, vptr->rx.ring, vptr->rx.pool_dma); } static int velocity_init_rings(struct velocity_info *vptr, int mtu) @@ -1727,8 +1747,8 @@ static void velocity_free_tx_buf(struct velocity_info *vptr, pktlen = max_t(size_t, pktlen, td->td_buf[i].size & ~TD_QUEUE); - pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], - le16_to_cpu(pktlen), PCI_DMA_TODEVICE); + dma_unmap_single(vptr->dev, tdinfo->skb_dma[i], + le16_to_cpu(pktlen), DMA_TO_DEVICE); } } dev_kfree_skb_irq(skb); @@ -1750,8 +1770,8 @@ static void velocity_free_td_ring_entry(struct velocity_info *vptr, if (td_info->skb) { for (i = 0; i < td_info->nskb_dma; i++) { if (td_info->skb_dma[i]) { - pci_unmap_single(vptr->pdev, td_info->skb_dma[i], - td_info->skb->len, PCI_DMA_TODEVICE); + dma_unmap_single(vptr->dev, td_info->skb_dma[i], + td_info->skb->len, DMA_TO_DEVICE); td_info->skb_dma[i] = 0; } } @@ -1809,7 +1829,7 @@ static void velocity_error(struct velocity_info *vptr, int status) printk(KERN_ERR "TD structure error TDindex=%hx\n", readw(®s->TDIdx[0])); BYTE_REG_BITS_ON(TXESR_TDSTR, ®s->TXESR); writew(TRDCSR_RUN, ®s->TDCSRClr); - netif_stop_queue(vptr->dev); + netif_stop_queue(vptr->netdev); /* FIXME: port over the pci_device_failed code and use it here */ @@ -1850,10 +1870,10 @@ static void velocity_error(struct velocity_info *vptr, int status) if (linked) { vptr->mii_status &= ~VELOCITY_LINK_FAIL; - netif_carrier_on(vptr->dev); + netif_carrier_on(vptr->netdev); } else { vptr->mii_status |= VELOCITY_LINK_FAIL; - netif_carrier_off(vptr->dev); + netif_carrier_off(vptr->netdev); } velocity_print_link_status(vptr); @@ -1867,9 +1887,9 @@ static void velocity_error(struct velocity_info *vptr, int status) enable_mii_autopoll(regs); if (vptr->mii_status & VELOCITY_LINK_FAIL) - netif_stop_queue(vptr->dev); + netif_stop_queue(vptr->netdev); else - netif_wake_queue(vptr->dev); + netif_wake_queue(vptr->netdev); } if (status & ISR_MIBFI) @@ -1894,7 +1914,7 @@ static int velocity_tx_srv(struct velocity_info *vptr) int idx; int works = 0; struct velocity_td_info *tdinfo; - struct net_device_stats *stats = &vptr->dev->stats; + struct net_device_stats *stats = &vptr->netdev->stats; for (qnum = 0; qnum < vptr->tx.numq; qnum++) { for (idx = vptr->tx.tail[qnum]; vptr->tx.used[qnum] > 0; @@ -1939,9 +1959,9 @@ static int velocity_tx_srv(struct velocity_info *vptr) * Look to see if we should kick the transmit network * layer for more work. */ - if (netif_queue_stopped(vptr->dev) && (full == 0) && + if (netif_queue_stopped(vptr->netdev) && (full == 0) && (!(vptr->mii_status & VELOCITY_LINK_FAIL))) { - netif_wake_queue(vptr->dev); + netif_wake_queue(vptr->netdev); } return works; } @@ -1989,7 +2009,7 @@ static int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size, if (pkt_size < rx_copybreak) { struct sk_buff *new_skb; - new_skb = netdev_alloc_skb_ip_align(vptr->dev, pkt_size); + new_skb = netdev_alloc_skb_ip_align(vptr->netdev, pkt_size); if (new_skb) { new_skb->ip_summed = rx_skb[0]->ip_summed; skb_copy_from_linear_data(*rx_skb, new_skb->data, pkt_size); @@ -2029,15 +2049,14 @@ static inline void velocity_iph_realign(struct velocity_info *vptr, */ static int velocity_receive_frame(struct velocity_info *vptr, int idx) { - void (*pci_action)(struct pci_dev *, dma_addr_t, size_t, int); - struct net_device_stats *stats = &vptr->dev->stats; + struct net_device_stats *stats = &vptr->netdev->stats; struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]); struct rx_desc *rd = &(vptr->rx.ring[idx]); int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff; struct sk_buff *skb; if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) { - VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame span multple RDs.\n", vptr->dev->name); + VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame span multple RDs.\n", vptr->netdev->name); stats->rx_length_errors++; return -EINVAL; } @@ -2047,8 +2066,8 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx) skb = rd_info->skb; - pci_dma_sync_single_for_cpu(vptr->pdev, rd_info->skb_dma, - vptr->rx.buf_sz, PCI_DMA_FROMDEVICE); + dma_sync_single_for_cpu(vptr->dev, rd_info->skb_dma, + vptr->rx.buf_sz, DMA_FROM_DEVICE); /* * Drop frame not meeting IEEE 802.3 @@ -2061,21 +2080,20 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx) } } - pci_action = pci_dma_sync_single_for_device; - velocity_rx_csum(rd, skb); if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) { velocity_iph_realign(vptr, skb, pkt_len); - pci_action = pci_unmap_single; rd_info->skb = NULL; + dma_unmap_single(vptr->dev, rd_info->skb_dma, vptr->rx.buf_sz, + DMA_FROM_DEVICE); + } else { + dma_sync_single_for_device(vptr->dev, rd_info->skb_dma, + vptr->rx.buf_sz, DMA_FROM_DEVICE); } - pci_action(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz, - PCI_DMA_FROMDEVICE); - skb_put(skb, pkt_len - 4); - skb->protocol = eth_type_trans(skb, vptr->dev); + skb->protocol = eth_type_trans(skb, vptr->netdev); if (rd->rdesc0.RSR & RSR_DETAG) { u16 vid = swab16(le16_to_cpu(rd->rdesc1.PQTAG)); @@ -2100,7 +2118,7 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx) */ static int velocity_rx_srv(struct velocity_info *vptr, int budget_left) { - struct net_device_stats *stats = &vptr->dev->stats; + struct net_device_stats *stats = &vptr->netdev->stats; int rd_curr = vptr->rx.curr; int works = 0; @@ -2235,15 +2253,15 @@ static int velocity_open(struct net_device *dev) goto out; /* Ensure chip is running */ - pci_set_power_state(vptr->pdev, PCI_D0); + velocity_set_power_state(vptr, PCI_D0); velocity_init_registers(vptr, VELOCITY_INIT_COLD); - ret = request_irq(vptr->pdev->irq, velocity_intr, IRQF_SHARED, + ret = request_irq(dev->irq, velocity_intr, IRQF_SHARED, dev->name, dev); if (ret < 0) { /* Power down the chip */ - pci_set_power_state(vptr->pdev, PCI_D3hot); + velocity_set_power_state(vptr, PCI_D3hot); velocity_free_rings(vptr); goto out; } @@ -2292,7 +2310,7 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu) if ((new_mtu < VELOCITY_MIN_MTU) || new_mtu > (VELOCITY_MAX_MTU)) { VELOCITY_PRT(MSG_LEVEL_ERR, KERN_NOTICE "%s: Invalid MTU.\n", - vptr->dev->name); + vptr->netdev->name); ret = -EINVAL; goto out_0; } @@ -2314,8 +2332,9 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu) goto out_0; } - tmp_vptr->dev = dev; + tmp_vptr->netdev = dev; tmp_vptr->pdev = vptr->pdev; + tmp_vptr->dev = vptr->dev; tmp_vptr->options = vptr->options; tmp_vptr->tx.numq = vptr->tx.numq; @@ -2415,7 +2434,7 @@ static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) saving then we need to bring the device back up to talk to it */ if (!netif_running(dev)) - pci_set_power_state(vptr->pdev, PCI_D0); + velocity_set_power_state(vptr, PCI_D0); switch (cmd) { case SIOCGMIIPHY: /* Get address of MII PHY in use. */ @@ -2428,7 +2447,7 @@ static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) ret = -EOPNOTSUPP; } if (!netif_running(dev)) - pci_set_power_state(vptr->pdev, PCI_D3hot); + velocity_set_power_state(vptr, PCI_D3hot); return ret; @@ -2494,7 +2513,7 @@ static int velocity_close(struct net_device *dev) if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) velocity_get_ip(vptr); - free_irq(vptr->pdev->irq, dev); + free_irq(dev->irq, dev); velocity_free_rings(vptr); @@ -2550,7 +2569,8 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb, * add it to the transmit ring. */ tdinfo->skb = skb; - tdinfo->skb_dma[0] = pci_map_single(vptr->pdev, skb->data, pktlen, PCI_DMA_TODEVICE); + tdinfo->skb_dma[0] = dma_map_single(vptr->dev, skb->data, pktlen, + DMA_TO_DEVICE); td_ptr->tdesc0.len = cpu_to_le16(pktlen); td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); td_ptr->td_buf[0].pa_high = 0; @@ -2560,7 +2580,7 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb, for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - tdinfo->skb_dma[i + 1] = skb_frag_dma_map(&vptr->pdev->dev, + tdinfo->skb_dma[i + 1] = skb_frag_dma_map(vptr->dev, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); @@ -2632,12 +2652,9 @@ static const struct net_device_ops velocity_netdev_ops = { * Set up the initial velocity_info struct for the device that has been * discovered. */ -static void velocity_init_info(struct pci_dev *pdev, struct velocity_info *vptr, - const struct velocity_info_tbl *info) +static void velocity_init_info(struct velocity_info *vptr, + const struct velocity_info_tbl *info) { - memset(vptr, 0, sizeof(struct velocity_info)); - - vptr->pdev = pdev; vptr->chip_id = info->chip_id; vptr->tx.numq = info->txqueue; vptr->multicast_limit = MCAM_SIZE; @@ -2652,10 +2669,9 @@ static void velocity_init_info(struct pci_dev *pdev, struct velocity_info *vptr, * Retrieve the PCI configuration space data that interests us from * the kernel PCI layer */ -static int velocity_get_pci_info(struct velocity_info *vptr, - struct pci_dev *pdev) +static int velocity_get_pci_info(struct velocity_info *vptr) { - vptr->rev_id = pdev->revision; + struct pci_dev *pdev = vptr->pdev; pci_set_master(pdev); @@ -2678,7 +2694,37 @@ static int velocity_get_pci_info(struct velocity_info *vptr, dev_err(&pdev->dev, "region #1 is too small.\n"); return -EINVAL; } - vptr->pdev = pdev; + + return 0; +} + +/** + * velocity_get_platform_info - retrieve platform info for device + * @vptr: velocity device + * @pdev: platform device it matches + * + * Retrieve the Platform configuration data that interests us + */ +static int velocity_get_platform_info(struct velocity_info *vptr) +{ + struct resource res; + int ret; + + if (of_get_property(vptr->dev->of_node, "no-eeprom", NULL)) + vptr->no_eeprom = 1; + + ret = of_address_to_resource(vptr->dev->of_node, 0, &res); + if (ret) { + dev_err(vptr->dev, "unable to find memory address\n"); + return ret; + } + + vptr->memaddr = res.start; + + if (resource_size(&res) < VELOCITY_IO_SIZE) { + dev_err(vptr->dev, "memory region is too small.\n"); + return -EINVAL; + } return 0; } @@ -2692,7 +2738,7 @@ static int velocity_get_pci_info(struct velocity_info *vptr, */ static void velocity_print_info(struct velocity_info *vptr) { - struct net_device *dev = vptr->dev; + struct net_device *dev = vptr->netdev; printk(KERN_INFO "%s: %s\n", dev->name, get_chip_name(vptr->chip_id)); printk(KERN_INFO "%s: Ethernet Address: %pM\n", @@ -2707,21 +2753,22 @@ static u32 velocity_get_link(struct net_device *dev) } /** - * velocity_found1 - set up discovered velocity card + * velocity_probe - set up discovered velocity device * @pdev: PCI device * @ent: PCI device table entry that matched + * @bustype: bus that device is connected to * * Configure a discovered adapter from scratch. Return a negative * errno error code on failure paths. */ -static int velocity_found1(struct pci_dev *pdev, - const struct pci_device_id *ent) +static int velocity_probe(struct device *dev, int irq, + const struct velocity_info_tbl *info, + enum velocity_bus_type bustype) { static int first = 1; - struct net_device *dev; + struct net_device *netdev; int i; const char *drv_string; - const struct velocity_info_tbl *info = &chip_info_table[ent->driver_data]; struct velocity_info *vptr; struct mac_regs __iomem *regs; int ret = -ENOMEM; @@ -2730,20 +2777,18 @@ static int velocity_found1(struct pci_dev *pdev, * can support more than MAX_UNITS. */ if (velocity_nics >= MAX_UNITS) { - dev_notice(&pdev->dev, "already found %d NICs.\n", - velocity_nics); + dev_notice(dev, "already found %d NICs.\n", velocity_nics); return -ENODEV; } - dev = alloc_etherdev(sizeof(struct velocity_info)); - if (!dev) + netdev = alloc_etherdev(sizeof(struct velocity_info)); + if (!netdev) goto out; /* Chain it all together */ - SET_NETDEV_DEV(dev, &pdev->dev); - vptr = netdev_priv(dev); - + SET_NETDEV_DEV(netdev, dev); + vptr = netdev_priv(netdev); if (first) { printk(KERN_INFO "%s Ver. %s\n", @@ -2753,41 +2798,41 @@ static int velocity_found1(struct pci_dev *pdev, first = 0; } - velocity_init_info(pdev, vptr, info); - + netdev->irq = irq; + vptr->netdev = netdev; vptr->dev = dev; - ret = pci_enable_device(pdev); - if (ret < 0) - goto err_free_dev; + velocity_init_info(vptr, info); - ret = velocity_get_pci_info(vptr, pdev); - if (ret < 0) { - /* error message already printed */ - goto err_disable; - } + if (bustype == BUS_PCI) { + vptr->pdev = to_pci_dev(dev); - ret = pci_request_regions(pdev, VELOCITY_NAME); - if (ret < 0) { - dev_err(&pdev->dev, "No PCI resources.\n"); - goto err_disable; + ret = velocity_get_pci_info(vptr); + if (ret < 0) + goto err_free_dev; + } else { + vptr->pdev = NULL; + ret = velocity_get_platform_info(vptr); + if (ret < 0) + goto err_free_dev; } regs = ioremap(vptr->memaddr, VELOCITY_IO_SIZE); if (regs == NULL) { ret = -EIO; - goto err_release_res; + goto err_free_dev; } vptr->mac_regs = regs; + vptr->rev_id = readb(®s->rev_id); mac_wol_reset(regs); for (i = 0; i < 6; i++) - dev->dev_addr[i] = readb(®s->PAR[i]); + netdev->dev_addr[i] = readb(®s->PAR[i]); - drv_string = dev_driver_string(&pdev->dev); + drv_string = dev_driver_string(dev); velocity_get_options(&vptr->options, velocity_nics, drv_string); @@ -2808,46 +2853,125 @@ static int velocity_found1(struct pci_dev *pdev, vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs); - dev->netdev_ops = &velocity_netdev_ops; - dev->ethtool_ops = &velocity_ethtool_ops; - netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT); + netdev->netdev_ops = &velocity_netdev_ops; + netdev->ethtool_ops = &velocity_ethtool_ops; + netif_napi_add(netdev, &vptr->napi, velocity_poll, + VELOCITY_NAPI_WEIGHT); - dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | + netdev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_TX; - dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_FILTER | - NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_IP_CSUM; + netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_IP_CSUM; - ret = register_netdev(dev); + ret = register_netdev(netdev); if (ret < 0) goto err_iounmap; - if (!velocity_get_link(dev)) { - netif_carrier_off(dev); + if (!velocity_get_link(netdev)) { + netif_carrier_off(netdev); vptr->mii_status |= VELOCITY_LINK_FAIL; } velocity_print_info(vptr); - pci_set_drvdata(pdev, dev); + dev_set_drvdata(vptr->dev, netdev); /* and leave the chip powered down */ - pci_set_power_state(pdev, PCI_D3hot); + velocity_set_power_state(vptr, PCI_D3hot); velocity_nics++; out: return ret; err_iounmap: iounmap(regs); -err_release_res: - pci_release_regions(pdev); -err_disable: - pci_disable_device(pdev); err_free_dev: - free_netdev(dev); + free_netdev(netdev); goto out; } -#ifdef CONFIG_PM +/** + * velocity_remove - device unplug + * @dev: device being removed + * + * Device unload callback. Called on an unplug or on module + * unload for each active device that is present. Disconnects + * the device from the network layer and frees all the resources + */ +static int velocity_remove(struct device *dev) +{ + struct net_device *netdev = dev_get_drvdata(dev); + struct velocity_info *vptr = netdev_priv(netdev); + + unregister_netdev(netdev); + iounmap(vptr->mac_regs); + free_netdev(netdev); + velocity_nics--; + + return 0; +} + +static int velocity_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + const struct velocity_info_tbl *info = + &chip_info_table[ent->driver_data]; + int ret; + + ret = pci_enable_device(pdev); + if (ret < 0) + return ret; + + ret = pci_request_regions(pdev, VELOCITY_NAME); + if (ret < 0) { + dev_err(&pdev->dev, "No PCI resources.\n"); + goto fail1; + } + + ret = velocity_probe(&pdev->dev, pdev->irq, info, BUS_PCI); + if (ret == 0) + return 0; + + pci_release_regions(pdev); +fail1: + pci_disable_device(pdev); + return ret; +} + +static void velocity_pci_remove(struct pci_dev *pdev) +{ + velocity_remove(&pdev->dev); + + pci_release_regions(pdev); + pci_disable_device(pdev); +} + +static int velocity_platform_probe(struct platform_device *pdev) +{ + const struct of_device_id *of_id; + const struct velocity_info_tbl *info; + int irq; + + of_id = of_match_device(velocity_of_ids, &pdev->dev); + if (!of_id) + return -EINVAL; + info = of_id->data; + + irq = irq_of_parse_and_map(pdev->dev.of_node, 0); + if (!irq) + return -EINVAL; + + return velocity_probe(&pdev->dev, irq, info, BUS_PLATFORM); +} + +static int velocity_platform_remove(struct platform_device *pdev) +{ + velocity_remove(&pdev->dev); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP /** * wol_calc_crc - WOL CRC * @pattern: data pattern @@ -3004,32 +3128,35 @@ static void velocity_save_context(struct velocity_info *vptr, struct velocity_co } -static int velocity_suspend(struct pci_dev *pdev, pm_message_t state) +static int velocity_suspend(struct device *dev) { - struct net_device *dev = pci_get_drvdata(pdev); - struct velocity_info *vptr = netdev_priv(dev); + struct net_device *netdev = dev_get_drvdata(dev); + struct velocity_info *vptr = netdev_priv(netdev); unsigned long flags; - if (!netif_running(vptr->dev)) + if (!netif_running(vptr->netdev)) return 0; - netif_device_detach(vptr->dev); + netif_device_detach(vptr->netdev); spin_lock_irqsave(&vptr->lock, flags); - pci_save_state(pdev); + if (vptr->pdev) + pci_save_state(vptr->pdev); if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) { velocity_get_ip(vptr); velocity_save_context(vptr, &vptr->context); velocity_shutdown(vptr); velocity_set_wol(vptr); - pci_enable_wake(pdev, PCI_D3hot, 1); - pci_set_power_state(pdev, PCI_D3hot); + if (vptr->pdev) + pci_enable_wake(vptr->pdev, PCI_D3hot, 1); + velocity_set_power_state(vptr, PCI_D3hot); } else { velocity_save_context(vptr, &vptr->context); velocity_shutdown(vptr); - pci_disable_device(pdev); - pci_set_power_state(pdev, pci_choose_state(pdev, state)); + if (vptr->pdev) + pci_disable_device(vptr->pdev); + velocity_set_power_state(vptr, PCI_D3hot); } spin_unlock_irqrestore(&vptr->lock, flags); @@ -3071,19 +3198,22 @@ static void velocity_restore_context(struct velocity_info *vptr, struct velocity writeb(*((u8 *) (context->mac_reg + i)), ptr + i); } -static int velocity_resume(struct pci_dev *pdev) +static int velocity_resume(struct device *dev) { - struct net_device *dev = pci_get_drvdata(pdev); - struct velocity_info *vptr = netdev_priv(dev); + struct net_device *netdev = dev_get_drvdata(dev); + struct velocity_info *vptr = netdev_priv(netdev); unsigned long flags; int i; - if (!netif_running(vptr->dev)) + if (!netif_running(vptr->netdev)) return 0; - pci_set_power_state(pdev, PCI_D0); - pci_enable_wake(pdev, 0, 0); - pci_restore_state(pdev); + velocity_set_power_state(vptr, PCI_D0); + + if (vptr->pdev) { + pci_enable_wake(vptr->pdev, PCI_D0, 0); + pci_restore_state(vptr->pdev); + } mac_wol_reset(vptr->mac_regs); @@ -3101,27 +3231,38 @@ static int velocity_resume(struct pci_dev *pdev) mac_enable_int(vptr->mac_regs); spin_unlock_irqrestore(&vptr->lock, flags); - netif_device_attach(vptr->dev); + netif_device_attach(vptr->netdev); return 0; } -#endif +#endif /* CONFIG_PM_SLEEP */ + +static SIMPLE_DEV_PM_OPS(velocity_pm_ops, velocity_suspend, velocity_resume); /* * Definition for our device driver. The PCI layer interface * uses this to handle all our card discover and plugging */ -static struct pci_driver velocity_driver = { +static struct pci_driver velocity_pci_driver = { .name = VELOCITY_NAME, - .id_table = velocity_id_table, - .probe = velocity_found1, - .remove = velocity_remove1, -#ifdef CONFIG_PM - .suspend = velocity_suspend, - .resume = velocity_resume, -#endif + .id_table = velocity_pci_id_table, + .probe = velocity_pci_probe, + .remove = velocity_pci_remove, + .driver = { + .pm = &velocity_pm_ops, + }, }; +static struct platform_driver velocity_platform_driver = { + .probe = velocity_platform_probe, + .remove = velocity_platform_remove, + .driver = { + .name = "via-velocity", + .owner = THIS_MODULE, + .of_match_table = velocity_of_ids, + .pm = &velocity_pm_ops, + }, +}; /** * velocity_ethtool_up - pre hook for ethtool @@ -3134,7 +3275,7 @@ static int velocity_ethtool_up(struct net_device *dev) { struct velocity_info *vptr = netdev_priv(dev); if (!netif_running(dev)) - pci_set_power_state(vptr->pdev, PCI_D0); + velocity_set_power_state(vptr, PCI_D0); return 0; } @@ -3149,7 +3290,7 @@ static void velocity_ethtool_down(struct net_device *dev) { struct velocity_info *vptr = netdev_priv(dev); if (!netif_running(dev)) - pci_set_power_state(vptr->pdev, PCI_D3hot); + velocity_set_power_state(vptr, PCI_D3hot); } static int velocity_get_settings(struct net_device *dev, @@ -3269,9 +3410,14 @@ static int velocity_set_settings(struct net_device *dev, static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct velocity_info *vptr = netdev_priv(dev); + strlcpy(info->driver, VELOCITY_NAME, sizeof(info->driver)); strlcpy(info->version, VELOCITY_VERSION, sizeof(info->version)); - strlcpy(info->bus_info, pci_name(vptr->pdev), sizeof(info->bus_info)); + if (vptr->pdev) + strlcpy(info->bus_info, pci_name(vptr->pdev), + sizeof(info->bus_info)); + else + strlcpy(info->bus_info, "platform", sizeof(info->bus_info)); } static void velocity_ethtool_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) @@ -3561,13 +3707,20 @@ static void velocity_unregister_notifier(void) */ static int __init velocity_init_module(void) { - int ret; + int ret_pci, ret_platform; velocity_register_notifier(); - ret = pci_register_driver(&velocity_driver); - if (ret < 0) + + ret_pci = pci_register_driver(&velocity_pci_driver); + ret_platform = platform_driver_register(&velocity_platform_driver); + + /* if both_registers failed, remove the notifier */ + if ((ret_pci < 0) && (ret_platform < 0)) { velocity_unregister_notifier(); - return ret; + return ret_pci; + } + + return 0; } /** @@ -3581,7 +3734,9 @@ static int __init velocity_init_module(void) static void __exit velocity_cleanup_module(void) { velocity_unregister_notifier(); - pci_unregister_driver(&velocity_driver); + + pci_unregister_driver(&velocity_pci_driver); + platform_driver_unregister(&velocity_platform_driver); } module_init(velocity_init_module); diff --git a/drivers/net/ethernet/via/via-velocity.h b/drivers/net/ethernet/via/via-velocity.h index 4cb9f13485e9..9453bfa9324a 100644 --- a/drivers/net/ethernet/via/via-velocity.h +++ b/drivers/net/ethernet/via/via-velocity.h @@ -1265,7 +1265,7 @@ struct velocity_context { #define PHYID_VT3216_64BIT 0x000FC600UL #define PHYID_MARVELL_1000 0x01410C50UL #define PHYID_MARVELL_1000S 0x01410C40UL - +#define PHYID_ICPLUS_IP101A 0x02430C54UL #define PHYID_REV_ID_MASK 0x0000000FUL #define PHYID_GET_PHY_ID(i) ((i) & ~PHYID_REV_ID_MASK) @@ -1434,8 +1434,10 @@ struct velocity_opt { #define GET_RD_BY_IDX(vptr, idx) (vptr->rd_ring[idx]) struct velocity_info { + struct device *dev; struct pci_dev *pdev; - struct net_device *dev; + struct net_device *netdev; + int no_eeprom; unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; u8 ip_addr[4]; @@ -1514,7 +1516,7 @@ static inline int velocity_get_ip(struct velocity_info *vptr) int res = -ENOENT; rcu_read_lock(); - in_dev = __in_dev_get_rcu(vptr->dev); + in_dev = __in_dev_get_rcu(vptr->netdev); if (in_dev != NULL) { ifa = (struct in_ifaddr *) in_dev->ifa_list; if (ifa != NULL) { diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c index a518dcab396e..30fed08d1674 100644 --- a/drivers/net/ethernet/wiznet/w5100.c +++ b/drivers/net/ethernet/wiznet/w5100.c @@ -734,7 +734,6 @@ err_hw_probe: unregister_netdev(ndev); err_register: free_netdev(ndev); - platform_set_drvdata(pdev, NULL); return err; } @@ -750,7 +749,6 @@ static int w5100_remove(struct platform_device *pdev) unregister_netdev(ndev); free_netdev(ndev); - platform_set_drvdata(pdev, NULL); return 0; } diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c index 6e00e3f94ce4..e92884564e1e 100644 --- a/drivers/net/ethernet/wiznet/w5300.c +++ b/drivers/net/ethernet/wiznet/w5300.c @@ -646,7 +646,6 @@ err_hw_probe: unregister_netdev(ndev); err_register: free_netdev(ndev); - platform_set_drvdata(pdev, NULL); return err; } @@ -662,7 +661,6 @@ static int w5300_remove(struct platform_device *pdev) unregister_netdev(ndev); free_netdev(ndev); - platform_set_drvdata(pdev, NULL); return 0; } diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig index 122d60c0481b..7b90a5eba099 100644 --- a/drivers/net/ethernet/xilinx/Kconfig +++ b/drivers/net/ethernet/xilinx/Kconfig @@ -5,7 +5,7 @@ config NET_VENDOR_XILINX bool "Xilinx devices" default y - depends on PPC || PPC32 || MICROBLAZE + depends on PPC || PPC32 || MICROBLAZE || ARCH_ZYNQ ---help--- If you have a network (Ethernet) card belonging to this class, say Y and read the Ethernet-HOWTO, available from @@ -20,7 +20,7 @@ if NET_VENDOR_XILINX config XILINX_EMACLITE tristate "Xilinx 10/100 Ethernet Lite support" - depends on (PPC32 || MICROBLAZE) + depends on (PPC32 || MICROBLAZE || ARCH_ZYNQ) select PHYLIB ---help--- This driver supports the 10/100 Ethernet Lite from Xilinx. diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 57c2e5ef2804..58eb4488beff 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -1007,7 +1007,7 @@ static int temac_of_probe(struct platform_device *op) return -ENOMEM; ether_setup(ndev); - dev_set_drvdata(&op->dev, ndev); + platform_set_drvdata(op, ndev); SET_NETDEV_DEV(ndev, &op->dev); ndev->flags &= ~IFF_MULTICAST; /* clear multicast */ ndev->features = NETIF_F_SG | NETIF_F_FRAGLIST; @@ -1136,7 +1136,7 @@ static int temac_of_probe(struct platform_device *op) static int temac_of_remove(struct platform_device *op) { - struct net_device *ndev = dev_get_drvdata(&op->dev); + struct net_device *ndev = platform_get_drvdata(op); struct temac_local *lp = netdev_priv(ndev); temac_mdio_teardown(lp); @@ -1145,7 +1145,6 @@ static int temac_of_remove(struct platform_device *op) if (lp->phy_node) of_node_put(lp->phy_node); lp->phy_node = NULL; - dev_set_drvdata(&op->dev, NULL); iounmap(lp->regs); if (lp->sdma_regs) iounmap(lp->sdma_regs); diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 24748e8367a1..fb7d1c28a2ea 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -1484,7 +1484,7 @@ static int axienet_of_probe(struct platform_device *op) return -ENOMEM; ether_setup(ndev); - dev_set_drvdata(&op->dev, ndev); + platform_set_drvdata(op, ndev); SET_NETDEV_DEV(ndev, &op->dev); ndev->flags &= ~IFF_MULTICAST; /* clear multicast */ @@ -1622,7 +1622,7 @@ nodev: static int axienet_of_remove(struct platform_device *op) { - struct net_device *ndev = dev_get_drvdata(&op->dev); + struct net_device *ndev = platform_get_drvdata(op); struct axienet_local *lp = netdev_priv(ndev); axienet_mdio_teardown(lp); @@ -1632,8 +1632,6 @@ static int axienet_of_remove(struct platform_device *op) of_node_put(lp->phy_node); lp->phy_node = NULL; - dev_set_drvdata(&op->dev, NULL); - iounmap(lp->regs); if (lp->dma_regs) iounmap(lp->dma_regs); diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index b7268b3dae77..fd4dbdae5331 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -2,9 +2,9 @@ * Xilinx EmacLite Linux driver for the Xilinx Ethernet MAC Lite device. * * This is a new flat driver which is based on the original emac_lite - * driver from John Williams <john.williams@petalogix.com>. + * driver from John Williams <john.williams@xilinx.com>. * - * 2007-2009 (c) Xilinx, Inc. + * 2007 - 2013 (c) Xilinx, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -159,34 +159,32 @@ static void xemaclite_enable_interrupts(struct net_local *drvdata) u32 reg_data; /* Enable the Tx interrupts for the first Buffer */ - reg_data = in_be32(drvdata->base_addr + XEL_TSR_OFFSET); - out_be32(drvdata->base_addr + XEL_TSR_OFFSET, - reg_data | XEL_TSR_XMIT_IE_MASK); + reg_data = __raw_readl(drvdata->base_addr + XEL_TSR_OFFSET); + __raw_writel(reg_data | XEL_TSR_XMIT_IE_MASK, + drvdata->base_addr + XEL_TSR_OFFSET); /* Enable the Tx interrupts for the second Buffer if * configured in HW */ if (drvdata->tx_ping_pong != 0) { - reg_data = in_be32(drvdata->base_addr + + reg_data = __raw_readl(drvdata->base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET); - out_be32(drvdata->base_addr + XEL_BUFFER_OFFSET + - XEL_TSR_OFFSET, - reg_data | XEL_TSR_XMIT_IE_MASK); + __raw_writel(reg_data | XEL_TSR_XMIT_IE_MASK, + drvdata->base_addr + XEL_BUFFER_OFFSET + + XEL_TSR_OFFSET); } /* Enable the Rx interrupts for the first buffer */ - out_be32(drvdata->base_addr + XEL_RSR_OFFSET, - XEL_RSR_RECV_IE_MASK); + __raw_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + XEL_RSR_OFFSET); /* Enable the Rx interrupts for the second Buffer if * configured in HW */ if (drvdata->rx_ping_pong != 0) { - out_be32(drvdata->base_addr + XEL_BUFFER_OFFSET + - XEL_RSR_OFFSET, - XEL_RSR_RECV_IE_MASK); + __raw_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + + XEL_BUFFER_OFFSET + XEL_RSR_OFFSET); } /* Enable the Global Interrupt Enable */ - out_be32(drvdata->base_addr + XEL_GIER_OFFSET, XEL_GIER_GIE_MASK); + __raw_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET); } /** @@ -201,37 +199,37 @@ static void xemaclite_disable_interrupts(struct net_local *drvdata) u32 reg_data; /* Disable the Global Interrupt Enable */ - out_be32(drvdata->base_addr + XEL_GIER_OFFSET, XEL_GIER_GIE_MASK); + __raw_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET); /* Disable the Tx interrupts for the first buffer */ - reg_data = in_be32(drvdata->base_addr + XEL_TSR_OFFSET); - out_be32(drvdata->base_addr + XEL_TSR_OFFSET, - reg_data & (~XEL_TSR_XMIT_IE_MASK)); + reg_data = __raw_readl(drvdata->base_addr + XEL_TSR_OFFSET); + __raw_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK), + drvdata->base_addr + XEL_TSR_OFFSET); /* Disable the Tx interrupts for the second Buffer * if configured in HW */ if (drvdata->tx_ping_pong != 0) { - reg_data = in_be32(drvdata->base_addr + XEL_BUFFER_OFFSET + + reg_data = __raw_readl(drvdata->base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET); - out_be32(drvdata->base_addr + XEL_BUFFER_OFFSET + - XEL_TSR_OFFSET, - reg_data & (~XEL_TSR_XMIT_IE_MASK)); + __raw_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK), + drvdata->base_addr + XEL_BUFFER_OFFSET + + XEL_TSR_OFFSET); } /* Disable the Rx interrupts for the first buffer */ - reg_data = in_be32(drvdata->base_addr + XEL_RSR_OFFSET); - out_be32(drvdata->base_addr + XEL_RSR_OFFSET, - reg_data & (~XEL_RSR_RECV_IE_MASK)); + reg_data = __raw_readl(drvdata->base_addr + XEL_RSR_OFFSET); + __raw_writel(reg_data & (~XEL_RSR_RECV_IE_MASK), + drvdata->base_addr + XEL_RSR_OFFSET); /* Disable the Rx interrupts for the second buffer * if configured in HW */ if (drvdata->rx_ping_pong != 0) { - reg_data = in_be32(drvdata->base_addr + XEL_BUFFER_OFFSET + + reg_data = __raw_readl(drvdata->base_addr + XEL_BUFFER_OFFSET + XEL_RSR_OFFSET); - out_be32(drvdata->base_addr + XEL_BUFFER_OFFSET + - XEL_RSR_OFFSET, - reg_data & (~XEL_RSR_RECV_IE_MASK)); + __raw_writel(reg_data & (~XEL_RSR_RECV_IE_MASK), + drvdata->base_addr + XEL_BUFFER_OFFSET + + XEL_RSR_OFFSET); } } @@ -351,7 +349,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data, byte_count = ETH_FRAME_LEN; /* Check if the expected buffer is available */ - reg_data = in_be32(addr + XEL_TSR_OFFSET); + reg_data = __raw_readl(addr + XEL_TSR_OFFSET); if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_XMIT_ACTIVE_MASK)) == 0) { @@ -364,7 +362,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data, addr = (void __iomem __force *)((u32 __force)addr ^ XEL_BUFFER_OFFSET); - reg_data = in_be32(addr + XEL_TSR_OFFSET); + reg_data = __raw_readl(addr + XEL_TSR_OFFSET); if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_XMIT_ACTIVE_MASK)) != 0) @@ -375,15 +373,16 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data, /* Write the frame to the buffer */ xemaclite_aligned_write(data, (u32 __force *) addr, byte_count); - out_be32(addr + XEL_TPLR_OFFSET, (byte_count & XEL_TPLR_LENGTH_MASK)); + __raw_writel((byte_count & XEL_TPLR_LENGTH_MASK), + addr + XEL_TPLR_OFFSET); /* Update the Tx Status Register to indicate that there is a * frame to send. Set the XEL_TSR_XMIT_ACTIVE_MASK flag which * is used by the interrupt handler to check whether a frame * has been transmitted */ - reg_data = in_be32(addr + XEL_TSR_OFFSET); + reg_data = __raw_readl(addr + XEL_TSR_OFFSET); reg_data |= (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_XMIT_ACTIVE_MASK); - out_be32(addr + XEL_TSR_OFFSET, reg_data); + __raw_writel(reg_data, addr + XEL_TSR_OFFSET); return 0; } @@ -408,7 +407,7 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data) addr = (drvdata->base_addr + drvdata->next_rx_buf_to_use); /* Verify which buffer has valid data */ - reg_data = in_be32(addr + XEL_RSR_OFFSET); + reg_data = __raw_readl(addr + XEL_RSR_OFFSET); if ((reg_data & XEL_RSR_RECV_DONE_MASK) == XEL_RSR_RECV_DONE_MASK) { if (drvdata->rx_ping_pong != 0) @@ -425,14 +424,14 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data) return 0; /* No data was available */ /* Verify that buffer has valid data */ - reg_data = in_be32(addr + XEL_RSR_OFFSET); + reg_data = __raw_readl(addr + XEL_RSR_OFFSET); if ((reg_data & XEL_RSR_RECV_DONE_MASK) != XEL_RSR_RECV_DONE_MASK) return 0; /* No data was available */ } /* Get the protocol type of the ethernet frame that arrived */ - proto_type = ((ntohl(in_be32(addr + XEL_HEADER_OFFSET + + proto_type = ((ntohl(__raw_readl(addr + XEL_HEADER_OFFSET + XEL_RXBUFF_OFFSET)) >> XEL_HEADER_SHIFT) & XEL_RPLR_LENGTH_MASK); @@ -441,7 +440,7 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data) if (proto_type > (ETH_FRAME_LEN + ETH_FCS_LEN)) { if (proto_type == ETH_P_IP) { - length = ((ntohl(in_be32(addr + + length = ((ntohl(__raw_readl(addr + XEL_HEADER_IP_LENGTH_OFFSET + XEL_RXBUFF_OFFSET)) >> XEL_HEADER_SHIFT) & @@ -463,9 +462,9 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data) data, length); /* Acknowledge the frame */ - reg_data = in_be32(addr + XEL_RSR_OFFSET); + reg_data = __raw_readl(addr + XEL_RSR_OFFSET); reg_data &= ~XEL_RSR_RECV_DONE_MASK; - out_be32(addr + XEL_RSR_OFFSET, reg_data); + __raw_writel(reg_data, addr + XEL_RSR_OFFSET); return length; } @@ -492,14 +491,14 @@ static void xemaclite_update_address(struct net_local *drvdata, xemaclite_aligned_write(address_ptr, (u32 __force *) addr, ETH_ALEN); - out_be32(addr + XEL_TPLR_OFFSET, ETH_ALEN); + __raw_writel(ETH_ALEN, addr + XEL_TPLR_OFFSET); /* Update the MAC address in the EmacLite */ - reg_data = in_be32(addr + XEL_TSR_OFFSET); - out_be32(addr + XEL_TSR_OFFSET, reg_data | XEL_TSR_PROG_MAC_ADDR); + reg_data = __raw_readl(addr + XEL_TSR_OFFSET); + __raw_writel(reg_data | XEL_TSR_PROG_MAC_ADDR, addr + XEL_TSR_OFFSET); /* Wait for EmacLite to finish with the MAC address update */ - while ((in_be32(addr + XEL_TSR_OFFSET) & + while ((__raw_readl(addr + XEL_TSR_OFFSET) & XEL_TSR_PROG_MAC_ADDR) != 0) ; } @@ -669,31 +668,32 @@ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id) u32 tx_status; /* Check if there is Rx Data available */ - if ((in_be32(base_addr + XEL_RSR_OFFSET) & XEL_RSR_RECV_DONE_MASK) || - (in_be32(base_addr + XEL_BUFFER_OFFSET + XEL_RSR_OFFSET) + if ((__raw_readl(base_addr + XEL_RSR_OFFSET) & + XEL_RSR_RECV_DONE_MASK) || + (__raw_readl(base_addr + XEL_BUFFER_OFFSET + XEL_RSR_OFFSET) & XEL_RSR_RECV_DONE_MASK)) xemaclite_rx_handler(dev); /* Check if the Transmission for the first buffer is completed */ - tx_status = in_be32(base_addr + XEL_TSR_OFFSET); + tx_status = __raw_readl(base_addr + XEL_TSR_OFFSET); if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) && (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) { tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK; - out_be32(base_addr + XEL_TSR_OFFSET, tx_status); + __raw_writel(tx_status, base_addr + XEL_TSR_OFFSET); tx_complete = true; } /* Check if the Transmission for the second buffer is completed */ - tx_status = in_be32(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET); + tx_status = __raw_readl(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET); if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) && (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) { tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK; - out_be32(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET, - tx_status); + __raw_writel(tx_status, base_addr + XEL_BUFFER_OFFSET + + XEL_TSR_OFFSET); tx_complete = true; } @@ -726,7 +726,7 @@ static int xemaclite_mdio_wait(struct net_local *lp) /* wait for the MDIO interface to not be busy or timeout after some time. */ - while (in_be32(lp->base_addr + XEL_MDIOCTRL_OFFSET) & + while (__raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET) & XEL_MDIOCTRL_MDIOSTS_MASK) { if (end - jiffies <= 0) { WARN_ON(1); @@ -762,17 +762,17 @@ static int xemaclite_mdio_read(struct mii_bus *bus, int phy_id, int reg) * MDIO Address register. Set the Status bit in the MDIO Control * register to start a MDIO read transaction. */ - ctrl_reg = in_be32(lp->base_addr + XEL_MDIOCTRL_OFFSET); - out_be32(lp->base_addr + XEL_MDIOADDR_OFFSET, - XEL_MDIOADDR_OP_MASK | - ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg)); - out_be32(lp->base_addr + XEL_MDIOCTRL_OFFSET, - ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK); + ctrl_reg = __raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET); + __raw_writel(XEL_MDIOADDR_OP_MASK | + ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg), + lp->base_addr + XEL_MDIOADDR_OFFSET); + __raw_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK, + lp->base_addr + XEL_MDIOCTRL_OFFSET); if (xemaclite_mdio_wait(lp)) return -ETIMEDOUT; - rc = in_be32(lp->base_addr + XEL_MDIORD_OFFSET); + rc = __raw_readl(lp->base_addr + XEL_MDIORD_OFFSET); dev_dbg(&lp->ndev->dev, "xemaclite_mdio_read(phy_id=%i, reg=%x) == %x\n", @@ -809,13 +809,13 @@ static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg, * Data register. Finally, set the Status bit in the MDIO Control * register to start a MDIO write transaction. */ - ctrl_reg = in_be32(lp->base_addr + XEL_MDIOCTRL_OFFSET); - out_be32(lp->base_addr + XEL_MDIOADDR_OFFSET, - ~XEL_MDIOADDR_OP_MASK & - ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg)); - out_be32(lp->base_addr + XEL_MDIOWR_OFFSET, val); - out_be32(lp->base_addr + XEL_MDIOCTRL_OFFSET, - ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK); + ctrl_reg = __raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET); + __raw_writel(~XEL_MDIOADDR_OP_MASK & + ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg), + lp->base_addr + XEL_MDIOADDR_OFFSET); + __raw_writel(val, lp->base_addr + XEL_MDIOWR_OFFSET); + __raw_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK, + lp->base_addr + XEL_MDIOCTRL_OFFSET); return 0; } @@ -848,24 +848,39 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev) int rc; struct resource res; struct device_node *np = of_get_parent(lp->phy_node); + struct device_node *npp; /* Don't register the MDIO bus if the phy_node or its parent node * can't be found. */ - if (!np) + if (!np) { + dev_err(dev, "Failed to register mdio bus.\n"); return -ENODEV; + } + npp = of_get_parent(np); + + of_address_to_resource(npp, 0, &res); + if (lp->ndev->mem_start != res.start) { + struct phy_device *phydev; + phydev = of_phy_find_device(lp->phy_node); + if (!phydev) + dev_info(dev, + "MDIO of the phy is not registered yet\n"); + return 0; + } /* Enable the MDIO bus by asserting the enable bit in MDIO Control * register. */ - out_be32(lp->base_addr + XEL_MDIOCTRL_OFFSET, - XEL_MDIOCTRL_MDIOEN_MASK); + __raw_writel(XEL_MDIOCTRL_MDIOEN_MASK, + lp->base_addr + XEL_MDIOCTRL_OFFSET); bus = mdiobus_alloc(); - if (!bus) + if (!bus) { + dev_err(dev, "Failed to allocate mdiobus\n"); return -ENOMEM; + } - of_address_to_resource(np, 0, &res); snprintf(bus->id, MII_BUS_ID_SIZE, "%.8llx", (unsigned long long)res.start); bus->priv = lp; @@ -879,8 +894,10 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev) lp->mii_bus = bus; rc = of_mdiobus_register(bus, np); - if (rc) + if (rc) { + dev_err(dev, "Failed to register mdio bus.\n"); goto err_register; + } return 0; @@ -896,7 +913,7 @@ err_register: * There's nothing in the Emaclite device to be configured when the link * state changes. We just print the status. */ -void xemaclite_adjust_link(struct net_device *ndev) +static void xemaclite_adjust_link(struct net_device *ndev) { struct net_local *lp = netdev_priv(ndev); struct phy_device *phy = lp->phy_dev; @@ -1058,13 +1075,14 @@ static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev) * This function un maps the IO region of the Emaclite device and frees the net * device. */ -static void xemaclite_remove_ndev(struct net_device *ndev) +static void xemaclite_remove_ndev(struct net_device *ndev, + struct platform_device *pdev) { if (ndev) { struct net_local *lp = netdev_priv(ndev); if (lp->base_addr) - iounmap((void __iomem __force *) (lp->base_addr)); + devm_iounmap(&pdev->dev, lp->base_addr); free_netdev(ndev); } } @@ -1110,8 +1128,7 @@ static struct net_device_ops xemaclite_netdev_ops; */ static int xemaclite_of_probe(struct platform_device *ofdev) { - struct resource r_irq; /* Interrupt resources */ - struct resource r_mem; /* IO mem resources */ + struct resource *res; struct net_device *ndev = NULL; struct net_local *lp = NULL; struct device *dev = &ofdev->dev; @@ -1121,20 +1138,6 @@ static int xemaclite_of_probe(struct platform_device *ofdev) dev_info(dev, "Device Tree Probing\n"); - /* Get iospace for the device */ - rc = of_address_to_resource(ofdev->dev.of_node, 0, &r_mem); - if (rc) { - dev_err(dev, "invalid address\n"); - return rc; - } - - /* Get IRQ for the device */ - rc = of_irq_to_resource(ofdev->dev.of_node, 0, &r_irq); - if (!rc) { - dev_err(dev, "no IRQ found\n"); - return rc; - } - /* Create an ethernet device instance */ ndev = alloc_etherdev(sizeof(struct net_local)); if (!ndev) @@ -1143,30 +1146,28 @@ static int xemaclite_of_probe(struct platform_device *ofdev) dev_set_drvdata(dev, ndev); SET_NETDEV_DEV(ndev, &ofdev->dev); - ndev->irq = r_irq.start; - ndev->mem_start = r_mem.start; - ndev->mem_end = r_mem.end; - lp = netdev_priv(ndev); lp->ndev = ndev; - if (!request_mem_region(ndev->mem_start, - ndev->mem_end - ndev->mem_start + 1, - DRIVER_NAME)) { - dev_err(dev, "Couldn't lock memory region at %p\n", - (void *)ndev->mem_start); - rc = -EBUSY; - goto error2; + /* Get IRQ for the device */ + res = platform_get_resource(ofdev, IORESOURCE_IRQ, 0); + if (!res) { + dev_err(dev, "no IRQ found\n"); + goto error; } - /* Get the virtual base address for the device */ - lp->base_addr = ioremap(r_mem.start, resource_size(&r_mem)); - if (NULL == lp->base_addr) { - dev_err(dev, "EmacLite: Could not allocate iomem\n"); - rc = -EIO; - goto error1; + ndev->irq = res->start; + + res = platform_get_resource(ofdev, IORESOURCE_MEM, 0); + lp->base_addr = devm_ioremap_resource(&ofdev->dev, res); + if (IS_ERR(lp->base_addr)) { + rc = PTR_ERR(lp->base_addr); + goto error; } + ndev->mem_start = res->start; + ndev->mem_end = res->end; + spin_lock_init(&lp->reset_lock); lp->next_tx_buf_to_use = 0x0; lp->next_rx_buf_to_use = 0x0; @@ -1181,8 +1182,8 @@ static int xemaclite_of_probe(struct platform_device *ofdev) dev_warn(dev, "No MAC address found\n"); /* Clear the Tx CSR's in case this is a restart */ - out_be32(lp->base_addr + XEL_TSR_OFFSET, 0); - out_be32(lp->base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET, 0); + __raw_writel(0, lp->base_addr + XEL_TSR_OFFSET); + __raw_writel(0, lp->base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET); /* Set the MAC address in the EmacLite device */ xemaclite_update_address(lp, ndev->dev_addr); @@ -1203,7 +1204,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev) if (rc) { dev_err(dev, "Cannot register network device, aborting\n"); - goto error1; + goto error; } dev_info(dev, @@ -1212,11 +1213,8 @@ static int xemaclite_of_probe(struct platform_device *ofdev) (unsigned int __force)lp->base_addr, ndev->irq); return 0; -error1: - release_mem_region(ndev->mem_start, resource_size(&r_mem)); - -error2: - xemaclite_remove_ndev(ndev); +error: + xemaclite_remove_ndev(ndev, ofdev); return rc; } @@ -1251,9 +1249,7 @@ static int xemaclite_of_remove(struct platform_device *of_dev) of_node_put(lp->phy_node); lp->phy_node = NULL; - release_mem_region(ndev->mem_start, ndev->mem_end-ndev->mem_start + 1); - - xemaclite_remove_ndev(ndev); + xemaclite_remove_ndev(ndev, of_dev); dev_set_drvdata(dev, NULL); return 0; diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c index 6958a5e87703..3d689fcb7917 100644 --- a/drivers/net/ethernet/xscale/ixp4xx_eth.c +++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c @@ -1472,7 +1472,6 @@ err_phy_dis: phy_disconnect(port->phydev); err_free_mem: npe_port_tab[NPE_ID(port->id)] = NULL; - platform_set_drvdata(pdev, NULL); release_resource(port->mem_res); err_npe_rel: npe_release(port->npe); @@ -1489,7 +1488,6 @@ static int eth_remove_one(struct platform_device *pdev) unregister_netdev(dev); phy_disconnect(port->phydev); npe_port_tab[NPE_ID(port->id)] = NULL; - platform_set_drvdata(pdev, NULL); npe_release(port->npe); release_resource(port->mem_res); free_netdev(dev); |