summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/sfc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/sfc')
-rw-r--r--drivers/net/ethernet/sfc/Kconfig11
-rw-r--r--drivers/net/ethernet/sfc/Makefile7
-rw-r--r--drivers/net/ethernet/sfc/bitfield.h8
-rw-r--r--drivers/net/ethernet/sfc/ef10.c3059
-rw-r--r--drivers/net/ethernet/sfc/ef10_regs.h415
-rw-r--r--drivers/net/ethernet/sfc/efx.c500
-rw-r--r--drivers/net/ethernet/sfc/efx.h129
-rw-r--r--drivers/net/ethernet/sfc/enum.h10
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c399
-rw-r--r--drivers/net/ethernet/sfc/falcon.c1171
-rw-r--r--drivers/net/ethernet/sfc/falcon_boards.c4
-rw-r--r--drivers/net/ethernet/sfc/falcon_xmac.c362
-rw-r--r--drivers/net/ethernet/sfc/farch.c2942
-rw-r--r--drivers/net/ethernet/sfc/farch_regs.h (renamed from drivers/net/ethernet/sfc/regs.h)272
-rw-r--r--drivers/net/ethernet/sfc/filter.c1274
-rw-r--r--drivers/net/ethernet/sfc/filter.h238
-rw-r--r--drivers/net/ethernet/sfc/io.h50
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c1262
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h313
-rw-r--r--drivers/net/ethernet/sfc/mcdi_mac.c130
-rw-r--r--drivers/net/ethernet/sfc/mcdi_mon.c274
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h5540
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c (renamed from drivers/net/ethernet/sfc/mcdi_phy.c)347
-rw-r--r--drivers/net/ethernet/sfc/mdio_10g.c2
-rw-r--r--drivers/net/ethernet/sfc/mdio_10g.h2
-rw-r--r--drivers/net/ethernet/sfc/mtd.c634
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h408
-rw-r--r--drivers/net/ethernet/sfc/nic.c1902
-rw-r--r--drivers/net/ethernet/sfc/nic.h542
-rw-r--r--drivers/net/ethernet/sfc/phy.h19
-rw-r--r--drivers/net/ethernet/sfc/ptp.c95
-rw-r--r--drivers/net/ethernet/sfc/qt202x_phy.c4
-rw-r--r--drivers/net/ethernet/sfc/rx.c176
-rw-r--r--drivers/net/ethernet/sfc/selftest.c15
-rw-r--r--drivers/net/ethernet/sfc/selftest.h4
-rw-r--r--drivers/net/ethernet/sfc/siena.c711
-rw-r--r--drivers/net/ethernet/sfc/siena_sriov.c102
-rw-r--r--drivers/net/ethernet/sfc/spi.h99
-rw-r--r--drivers/net/ethernet/sfc/tenxpress.c2
-rw-r--r--drivers/net/ethernet/sfc/tx.c35
-rw-r--r--drivers/net/ethernet/sfc/txc43128_phy.c2
-rw-r--r--drivers/net/ethernet/sfc/vfdi.h2
-rw-r--r--drivers/net/ethernet/sfc/workarounds.h22
43 files changed, 16635 insertions, 6860 deletions
diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig
index 4136ccc4a954..088921294448 100644
--- a/drivers/net/ethernet/sfc/Kconfig
+++ b/drivers/net/ethernet/sfc/Kconfig
@@ -1,5 +1,5 @@
config SFC
- tristate "Solarflare SFC4000/SFC9000-family support"
+ tristate "Solarflare SFC4000/SFC9000/SFC9100-family support"
depends on PCI
select MDIO
select CRC32
@@ -7,13 +7,14 @@ config SFC
select I2C_ALGOBIT
select PTP_1588_CLOCK
---help---
- This driver supports 10-gigabit Ethernet cards based on
- the Solarflare SFC4000 and SFC9000-family controllers.
+ This driver supports 10/40-gigabit Ethernet cards based on
+ the Solarflare SFC4000, SFC9000-family and SFC9100-family
+ controllers.
To compile this driver as a module, choose M here. The module
will be called sfc.
config SFC_MTD
- bool "Solarflare SFC4000/SFC9000-family MTD support"
+ bool "Solarflare SFC4000/SFC9000/SFC9100-family MTD support"
depends on SFC && MTD && !(SFC=y && MTD=m)
default y
---help---
@@ -21,7 +22,7 @@ config SFC_MTD
(e.g. /dev/mtd1). This is required to update the firmware or
the boot configuration under Linux.
config SFC_MCDI_MON
- bool "Solarflare SFC9000-family hwmon support"
+ bool "Solarflare SFC9000/SFC9100-family hwmon support"
depends on SFC && HWMON && !(SFC=y && HWMON=m)
default y
---help---
diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile
index 945bf06e69ef..3a83c0dca8e6 100644
--- a/drivers/net/ethernet/sfc/Makefile
+++ b/drivers/net/ethernet/sfc/Makefile
@@ -1,8 +1,7 @@
-sfc-y += efx.o nic.o falcon.o siena.o tx.o rx.o filter.o \
- falcon_xmac.o mcdi_mac.o \
- selftest.o ethtool.o qt202x_phy.o mdio_10g.o \
+sfc-y += efx.o nic.o farch.o falcon.o siena.o ef10.o tx.o \
+ rx.o selftest.o ethtool.o qt202x_phy.o mdio_10g.o \
tenxpress.o txc43128_phy.o falcon_boards.o \
- mcdi.o mcdi_phy.o mcdi_mon.o ptp.o
+ mcdi.o mcdi_port.o mcdi_mon.o ptp.o
sfc-$(CONFIG_SFC_MTD) += mtd.o
sfc-$(CONFIG_SFC_SRIOV) += siena_sriov.o
diff --git a/drivers/net/ethernet/sfc/bitfield.h b/drivers/net/ethernet/sfc/bitfield.h
index 5400a33f254f..17d83f37fbf2 100644
--- a/drivers/net/ethernet/sfc/bitfield.h
+++ b/drivers/net/ethernet/sfc/bitfield.h
@@ -1,7 +1,7 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2009 Solarflare Communications Inc.
+ * Copyright 2006-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -29,6 +29,10 @@
/* Lowest bit numbers and widths */
#define EFX_DUMMY_FIELD_LBN 0
#define EFX_DUMMY_FIELD_WIDTH 0
+#define EFX_WORD_0_LBN 0
+#define EFX_WORD_0_WIDTH 16
+#define EFX_WORD_1_LBN 16
+#define EFX_WORD_1_WIDTH 16
#define EFX_DWORD_0_LBN 0
#define EFX_DWORD_0_WIDTH 32
#define EFX_DWORD_1_LBN 32
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
new file mode 100644
index 000000000000..9f18ae984f9e
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -0,0 +1,3059 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2012-2013 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "net_driver.h"
+#include "ef10_regs.h"
+#include "io.h"
+#include "mcdi.h"
+#include "mcdi_pcol.h"
+#include "nic.h"
+#include "workarounds.h"
+#include <linux/in.h>
+#include <linux/jhash.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+/* Hardware control for EF10 architecture including 'Huntington'. */
+
+#define EFX_EF10_DRVGEN_EV 7
+enum {
+ EFX_EF10_TEST = 1,
+ EFX_EF10_REFILL,
+};
+
+/* The reserved RSS context value */
+#define EFX_EF10_RSS_CONTEXT_INVALID 0xffffffff
+
+/* The filter table(s) are managed by firmware and we have write-only
+ * access. When removing filters we must identify them to the
+ * firmware by a 64-bit handle, but this is too wide for Linux kernel
+ * interfaces (32-bit for RX NFC, 16-bit for RFS). Also, we need to
+ * be able to tell in advance whether a requested insertion will
+ * replace an existing filter. Therefore we maintain a software hash
+ * table, which should be at least as large as the hardware hash
+ * table.
+ *
+ * Huntington has a single 8K filter table shared between all filter
+ * types and both ports.
+ */
+#define HUNT_FILTER_TBL_ROWS 8192
+
+struct efx_ef10_filter_table {
+/* The RX match field masks supported by this fw & hw, in order of priority */
+ enum efx_filter_match_flags rx_match_flags[
+ MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM];
+ unsigned int rx_match_count;
+
+ struct {
+ unsigned long spec; /* pointer to spec plus flag bits */
+/* BUSY flag indicates that an update is in progress. STACK_OLD is
+ * used to mark and sweep stack-owned MAC filters.
+ */
+#define EFX_EF10_FILTER_FLAG_BUSY 1UL
+#define EFX_EF10_FILTER_FLAG_STACK_OLD 2UL
+#define EFX_EF10_FILTER_FLAGS 3UL
+ u64 handle; /* firmware handle */
+ } *entry;
+ wait_queue_head_t waitq;
+/* Shadow of net_device address lists, guarded by mac_lock */
+#define EFX_EF10_FILTER_STACK_UC_MAX 32
+#define EFX_EF10_FILTER_STACK_MC_MAX 256
+ struct {
+ u8 addr[ETH_ALEN];
+ u16 id;
+ } stack_uc_list[EFX_EF10_FILTER_STACK_UC_MAX],
+ stack_mc_list[EFX_EF10_FILTER_STACK_MC_MAX];
+ int stack_uc_count; /* negative for PROMISC */
+ int stack_mc_count; /* negative for PROMISC/ALLMULTI */
+};
+
+/* An arbitrary search limit for the software hash table */
+#define EFX_EF10_FILTER_SEARCH_LIMIT 200
+
+static void efx_ef10_rx_push_indir_table(struct efx_nic *efx);
+static void efx_ef10_rx_free_indir_table(struct efx_nic *efx);
+static void efx_ef10_filter_table_remove(struct efx_nic *efx);
+
+static int efx_ef10_get_warm_boot_count(struct efx_nic *efx)
+{
+ efx_dword_t reg;
+
+ efx_readd(efx, &reg, ER_DZ_BIU_MC_SFT_STATUS);
+ return EFX_DWORD_FIELD(reg, EFX_WORD_1) == 0xb007 ?
+ EFX_DWORD_FIELD(reg, EFX_WORD_0) : -EIO;
+}
+
+static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx)
+{
+ return resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]);
+}
+
+static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_OUT_LEN);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ size_t outlen;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < sizeof(outbuf)) {
+ netif_err(efx, drv, efx->net_dev,
+ "unable to read datapath firmware capabilities\n");
+ return -EIO;
+ }
+
+ nic_data->datapath_caps =
+ MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1);
+
+ if (!(nic_data->datapath_caps &
+ (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN))) {
+ netif_err(efx, drv, efx->net_dev,
+ "current firmware does not support TSO\n");
+ return -ENODEV;
+ }
+
+ if (!(nic_data->datapath_caps &
+ (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN))) {
+ netif_err(efx, probe, efx->net_dev,
+ "current firmware does not support an RX prefix\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int efx_ef10_get_sysclk_freq(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CLOCK_OUT_LEN);
+ int rc;
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_CLOCK, NULL, 0,
+ outbuf, sizeof(outbuf), NULL);
+ if (rc)
+ return rc;
+ rc = MCDI_DWORD(outbuf, GET_CLOCK_OUT_SYS_FREQ);
+ return rc > 0 ? rc : -ERANGE;
+}
+
+static int efx_ef10_get_mac_address(struct efx_nic *efx, u8 *mac_address)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN);
+ size_t outlen;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_GET_MAC_ADDRESSES_IN_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_MAC_ADDRESSES, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN)
+ return -EIO;
+
+ memcpy(mac_address,
+ MCDI_PTR(outbuf, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE), ETH_ALEN);
+ return 0;
+}
+
+static int efx_ef10_probe(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data;
+ int i, rc;
+
+ /* We can have one VI for each 8K region. However we need
+ * multiple TX queues per channel.
+ */
+ efx->max_channels =
+ min_t(unsigned int,
+ EFX_MAX_CHANNELS,
+ resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]) /
+ (EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES));
+ BUG_ON(efx->max_channels == 0);
+
+ nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
+ if (!nic_data)
+ return -ENOMEM;
+ efx->nic_data = nic_data;
+
+ rc = efx_nic_alloc_buffer(efx, &nic_data->mcdi_buf,
+ 8 + MCDI_CTL_SDU_LEN_MAX_V2, GFP_KERNEL);
+ if (rc)
+ goto fail1;
+
+ /* Get the MC's warm boot count. In case it's rebooting right
+ * now, be prepared to retry.
+ */
+ i = 0;
+ for (;;) {
+ rc = efx_ef10_get_warm_boot_count(efx);
+ if (rc >= 0)
+ break;
+ if (++i == 5)
+ goto fail2;
+ ssleep(1);
+ }
+ nic_data->warm_boot_count = rc;
+
+ nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
+
+ /* In case we're recovering from a crash (kexec), we want to
+ * cancel any outstanding request by the previous user of this
+ * function. We send a special message using the least
+ * significant bits of the 'high' (doorbell) register.
+ */
+ _efx_writed(efx, cpu_to_le32(1), ER_DZ_MC_DB_HWRD);
+
+ rc = efx_mcdi_init(efx);
+ if (rc)
+ goto fail2;
+
+ /* Reset (most) configuration for this function */
+ rc = efx_mcdi_reset(efx, RESET_TYPE_ALL);
+ if (rc)
+ goto fail3;
+
+ /* Enable event logging */
+ rc = efx_mcdi_log_ctrl(efx, true, false, 0);
+ if (rc)
+ goto fail3;
+
+ rc = efx_ef10_init_datapath_caps(efx);
+ if (rc < 0)
+ goto fail3;
+
+ efx->rx_packet_len_offset =
+ ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE;
+
+ rc = efx_mcdi_port_get_number(efx);
+ if (rc < 0)
+ goto fail3;
+ efx->port_num = rc;
+
+ rc = efx_ef10_get_mac_address(efx, efx->net_dev->perm_addr);
+ if (rc)
+ goto fail3;
+
+ rc = efx_ef10_get_sysclk_freq(efx);
+ if (rc < 0)
+ goto fail3;
+ efx->timer_quantum_ns = 1536000 / rc; /* 1536 cycles */
+
+ /* Check whether firmware supports bug 35388 workaround */
+ rc = efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG35388, true);
+ if (rc == 0)
+ nic_data->workaround_35388 = true;
+ else if (rc != -ENOSYS && rc != -ENOENT)
+ goto fail3;
+ netif_dbg(efx, probe, efx->net_dev,
+ "workaround for bug 35388 is %sabled\n",
+ nic_data->workaround_35388 ? "en" : "dis");
+
+ rc = efx_mcdi_mon_probe(efx);
+ if (rc)
+ goto fail3;
+
+ return 0;
+
+fail3:
+ efx_mcdi_fini(efx);
+fail2:
+ efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
+fail1:
+ kfree(nic_data);
+ efx->nic_data = NULL;
+ return rc;
+}
+
+static int efx_ef10_free_vis(struct efx_nic *efx)
+{
+ int rc = efx_mcdi_rpc(efx, MC_CMD_FREE_VIS, NULL, 0, NULL, 0, NULL);
+
+ /* -EALREADY means nothing to free, so ignore */
+ if (rc == -EALREADY)
+ rc = 0;
+ return rc;
+}
+
+static void efx_ef10_remove(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ efx_mcdi_mon_remove(efx);
+
+ /* This needs to be after efx_ptp_remove_channel() with no filters */
+ efx_ef10_rx_free_indir_table(efx);
+
+ rc = efx_ef10_free_vis(efx);
+ WARN_ON(rc != 0);
+
+ efx_mcdi_fini(efx);
+ efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
+ kfree(nic_data);
+}
+
+static int efx_ef10_alloc_vis(struct efx_nic *efx,
+ unsigned int min_vis, unsigned int max_vis)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_ALLOC_VIS_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_VIS_OUT_LEN);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MIN_VI_COUNT, min_vis);
+ MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MAX_VI_COUNT, max_vis);
+ rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_VIS, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc != 0)
+ return rc;
+
+ if (outlen < MC_CMD_ALLOC_VIS_OUT_LEN)
+ return -EIO;
+
+ netif_dbg(efx, drv, efx->net_dev, "base VI is A0x%03x\n",
+ MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE));
+
+ nic_data->vi_base = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE);
+ nic_data->n_allocated_vis = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_COUNT);
+ return 0;
+}
+
+static int efx_ef10_dimension_resources(struct efx_nic *efx)
+{
+ unsigned int n_vis =
+ max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
+
+ return efx_ef10_alloc_vis(efx, n_vis, n_vis);
+}
+
+static int efx_ef10_init_nic(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ if (nic_data->must_check_datapath_caps) {
+ rc = efx_ef10_init_datapath_caps(efx);
+ if (rc)
+ return rc;
+ nic_data->must_check_datapath_caps = false;
+ }
+
+ if (nic_data->must_realloc_vis) {
+ /* We cannot let the number of VIs change now */
+ rc = efx_ef10_alloc_vis(efx, nic_data->n_allocated_vis,
+ nic_data->n_allocated_vis);
+ if (rc)
+ return rc;
+ nic_data->must_realloc_vis = false;
+ }
+
+ efx_ef10_rx_push_indir_table(efx);
+ return 0;
+}
+
+static int efx_ef10_map_reset_flags(u32 *flags)
+{
+ enum {
+ EF10_RESET_PORT = ((ETH_RESET_MAC | ETH_RESET_PHY) <<
+ ETH_RESET_SHARED_SHIFT),
+ EF10_RESET_MC = ((ETH_RESET_DMA | ETH_RESET_FILTER |
+ ETH_RESET_OFFLOAD | ETH_RESET_MAC |
+ ETH_RESET_PHY | ETH_RESET_MGMT) <<
+ ETH_RESET_SHARED_SHIFT)
+ };
+
+ /* We assume for now that our PCI function is permitted to
+ * reset everything.
+ */
+
+ if ((*flags & EF10_RESET_MC) == EF10_RESET_MC) {
+ *flags &= ~EF10_RESET_MC;
+ return RESET_TYPE_WORLD;
+ }
+
+ if ((*flags & EF10_RESET_PORT) == EF10_RESET_PORT) {
+ *flags &= ~EF10_RESET_PORT;
+ return RESET_TYPE_ALL;
+ }
+
+ /* no invisible reset implemented */
+
+ return -EINVAL;
+}
+
+#define EF10_DMA_STAT(ext_name, mcdi_name) \
+ [EF10_STAT_ ## ext_name] = \
+ { #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
+#define EF10_DMA_INVIS_STAT(int_name, mcdi_name) \
+ [EF10_STAT_ ## int_name] = \
+ { NULL, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
+#define EF10_OTHER_STAT(ext_name) \
+ [EF10_STAT_ ## ext_name] = { #ext_name, 0, 0 }
+
+static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
+ EF10_DMA_STAT(tx_bytes, TX_BYTES),
+ EF10_DMA_STAT(tx_packets, TX_PKTS),
+ EF10_DMA_STAT(tx_pause, TX_PAUSE_PKTS),
+ EF10_DMA_STAT(tx_control, TX_CONTROL_PKTS),
+ EF10_DMA_STAT(tx_unicast, TX_UNICAST_PKTS),
+ EF10_DMA_STAT(tx_multicast, TX_MULTICAST_PKTS),
+ EF10_DMA_STAT(tx_broadcast, TX_BROADCAST_PKTS),
+ EF10_DMA_STAT(tx_lt64, TX_LT64_PKTS),
+ EF10_DMA_STAT(tx_64, TX_64_PKTS),
+ EF10_DMA_STAT(tx_65_to_127, TX_65_TO_127_PKTS),
+ EF10_DMA_STAT(tx_128_to_255, TX_128_TO_255_PKTS),
+ EF10_DMA_STAT(tx_256_to_511, TX_256_TO_511_PKTS),
+ EF10_DMA_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS),
+ EF10_DMA_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS),
+ EF10_DMA_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS),
+ EF10_DMA_STAT(rx_bytes, RX_BYTES),
+ EF10_DMA_INVIS_STAT(rx_bytes_minus_good_bytes, RX_BAD_BYTES),
+ EF10_OTHER_STAT(rx_good_bytes),
+ EF10_OTHER_STAT(rx_bad_bytes),
+ EF10_DMA_STAT(rx_packets, RX_PKTS),
+ EF10_DMA_STAT(rx_good, RX_GOOD_PKTS),
+ EF10_DMA_STAT(rx_bad, RX_BAD_FCS_PKTS),
+ EF10_DMA_STAT(rx_pause, RX_PAUSE_PKTS),
+ EF10_DMA_STAT(rx_control, RX_CONTROL_PKTS),
+ EF10_DMA_STAT(rx_unicast, RX_UNICAST_PKTS),
+ EF10_DMA_STAT(rx_multicast, RX_MULTICAST_PKTS),
+ EF10_DMA_STAT(rx_broadcast, RX_BROADCAST_PKTS),
+ EF10_DMA_STAT(rx_lt64, RX_UNDERSIZE_PKTS),
+ EF10_DMA_STAT(rx_64, RX_64_PKTS),
+ EF10_DMA_STAT(rx_65_to_127, RX_65_TO_127_PKTS),
+ EF10_DMA_STAT(rx_128_to_255, RX_128_TO_255_PKTS),
+ EF10_DMA_STAT(rx_256_to_511, RX_256_TO_511_PKTS),
+ EF10_DMA_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS),
+ EF10_DMA_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS),
+ EF10_DMA_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS),
+ EF10_DMA_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS),
+ EF10_DMA_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS),
+ EF10_DMA_STAT(rx_overflow, RX_OVERFLOW_PKTS),
+ EF10_DMA_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS),
+ EF10_DMA_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS),
+ EF10_DMA_STAT(rx_nodesc_drops, RX_NODESC_DROPS),
+};
+
+#define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_tx_bytes) | \
+ (1ULL << EF10_STAT_tx_packets) | \
+ (1ULL << EF10_STAT_tx_pause) | \
+ (1ULL << EF10_STAT_tx_unicast) | \
+ (1ULL << EF10_STAT_tx_multicast) | \
+ (1ULL << EF10_STAT_tx_broadcast) | \
+ (1ULL << EF10_STAT_rx_bytes) | \
+ (1ULL << EF10_STAT_rx_bytes_minus_good_bytes) | \
+ (1ULL << EF10_STAT_rx_good_bytes) | \
+ (1ULL << EF10_STAT_rx_bad_bytes) | \
+ (1ULL << EF10_STAT_rx_packets) | \
+ (1ULL << EF10_STAT_rx_good) | \
+ (1ULL << EF10_STAT_rx_bad) | \
+ (1ULL << EF10_STAT_rx_pause) | \
+ (1ULL << EF10_STAT_rx_control) | \
+ (1ULL << EF10_STAT_rx_unicast) | \
+ (1ULL << EF10_STAT_rx_multicast) | \
+ (1ULL << EF10_STAT_rx_broadcast) | \
+ (1ULL << EF10_STAT_rx_lt64) | \
+ (1ULL << EF10_STAT_rx_64) | \
+ (1ULL << EF10_STAT_rx_65_to_127) | \
+ (1ULL << EF10_STAT_rx_128_to_255) | \
+ (1ULL << EF10_STAT_rx_256_to_511) | \
+ (1ULL << EF10_STAT_rx_512_to_1023) | \
+ (1ULL << EF10_STAT_rx_1024_to_15xx) | \
+ (1ULL << EF10_STAT_rx_15xx_to_jumbo) | \
+ (1ULL << EF10_STAT_rx_gtjumbo) | \
+ (1ULL << EF10_STAT_rx_bad_gtjumbo) | \
+ (1ULL << EF10_STAT_rx_overflow) | \
+ (1ULL << EF10_STAT_rx_nodesc_drops))
+
+/* These statistics are only provided by the 10G MAC. For a 10G/40G
+ * switchable port we do not expose these because they might not
+ * include all the packets they should.
+ */
+#define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_tx_control) | \
+ (1ULL << EF10_STAT_tx_lt64) | \
+ (1ULL << EF10_STAT_tx_64) | \
+ (1ULL << EF10_STAT_tx_65_to_127) | \
+ (1ULL << EF10_STAT_tx_128_to_255) | \
+ (1ULL << EF10_STAT_tx_256_to_511) | \
+ (1ULL << EF10_STAT_tx_512_to_1023) | \
+ (1ULL << EF10_STAT_tx_1024_to_15xx) | \
+ (1ULL << EF10_STAT_tx_15xx_to_jumbo))
+
+/* These statistics are only provided by the 40G MAC. For a 10G/40G
+ * switchable port we do expose these because the errors will otherwise
+ * be silent.
+ */
+#define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_rx_align_error) | \
+ (1ULL << EF10_STAT_rx_length_error))
+
+#if BITS_PER_LONG == 64
+#define STAT_MASK_BITMAP(bits) (bits)
+#else
+#define STAT_MASK_BITMAP(bits) (bits) & 0xffffffff, (bits) >> 32
+#endif
+
+static const unsigned long *efx_ef10_stat_mask(struct efx_nic *efx)
+{
+ static const unsigned long hunt_40g_stat_mask[] = {
+ STAT_MASK_BITMAP(HUNT_COMMON_STAT_MASK |
+ HUNT_40G_EXTRA_STAT_MASK)
+ };
+ static const unsigned long hunt_10g_only_stat_mask[] = {
+ STAT_MASK_BITMAP(HUNT_COMMON_STAT_MASK |
+ HUNT_10G_ONLY_STAT_MASK)
+ };
+ u32 port_caps = efx_mcdi_phy_get_caps(efx);
+
+ if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
+ return hunt_40g_stat_mask;
+ else
+ return hunt_10g_only_stat_mask;
+}
+
+static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names)
+{
+ return efx_nic_describe_stats(efx_ef10_stat_desc, EF10_STAT_COUNT,
+ efx_ef10_stat_mask(efx), names);
+}
+
+static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ const unsigned long *stats_mask = efx_ef10_stat_mask(efx);
+ __le64 generation_start, generation_end;
+ u64 *stats = nic_data->stats;
+ __le64 *dma_stats;
+
+ dma_stats = efx->stats_buffer.addr;
+ nic_data = efx->nic_data;
+
+ generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
+ if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
+ return 0;
+ rmb();
+ efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, stats_mask,
+ stats, efx->stats_buffer.addr, false);
+ generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
+ if (generation_end != generation_start)
+ return -EAGAIN;
+
+ /* Update derived statistics */
+ stats[EF10_STAT_rx_good_bytes] =
+ stats[EF10_STAT_rx_bytes] -
+ stats[EF10_STAT_rx_bytes_minus_good_bytes];
+ efx_update_diff_stat(&stats[EF10_STAT_rx_bad_bytes],
+ stats[EF10_STAT_rx_bytes_minus_good_bytes]);
+
+ return 0;
+}
+
+
+static size_t efx_ef10_update_stats(struct efx_nic *efx, u64 *full_stats,
+ struct rtnl_link_stats64 *core_stats)
+{
+ const unsigned long *mask = efx_ef10_stat_mask(efx);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ u64 *stats = nic_data->stats;
+ size_t stats_count = 0, index;
+ int retry;
+
+ /* If we're unlucky enough to read statistics during the DMA, wait
+ * up to 10ms for it to finish (typically takes <500us)
+ */
+ for (retry = 0; retry < 100; ++retry) {
+ if (efx_ef10_try_update_nic_stats(efx) == 0)
+ break;
+ udelay(100);
+ }
+
+ if (full_stats) {
+ for_each_set_bit(index, mask, EF10_STAT_COUNT) {
+ if (efx_ef10_stat_desc[index].name) {
+ *full_stats++ = stats[index];
+ ++stats_count;
+ }
+ }
+ }
+
+ if (core_stats) {
+ core_stats->rx_packets = stats[EF10_STAT_rx_packets];
+ core_stats->tx_packets = stats[EF10_STAT_tx_packets];
+ core_stats->rx_bytes = stats[EF10_STAT_rx_bytes];
+ core_stats->tx_bytes = stats[EF10_STAT_tx_bytes];
+ core_stats->rx_dropped = stats[EF10_STAT_rx_nodesc_drops];
+ core_stats->multicast = stats[EF10_STAT_rx_multicast];
+ core_stats->rx_length_errors =
+ stats[EF10_STAT_rx_gtjumbo] +
+ stats[EF10_STAT_rx_length_error];
+ core_stats->rx_crc_errors = stats[EF10_STAT_rx_bad];
+ core_stats->rx_frame_errors = stats[EF10_STAT_rx_align_error];
+ core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow];
+ core_stats->rx_errors = (core_stats->rx_length_errors +
+ core_stats->rx_crc_errors +
+ core_stats->rx_frame_errors);
+ }
+
+ return stats_count;
+}
+
+static void efx_ef10_push_irq_moderation(struct efx_channel *channel)
+{
+ struct efx_nic *efx = channel->efx;
+ unsigned int mode, value;
+ efx_dword_t timer_cmd;
+
+ if (channel->irq_moderation) {
+ mode = 3;
+ value = channel->irq_moderation - 1;
+ } else {
+ mode = 0;
+ value = 0;
+ }
+
+ if (EFX_EF10_WORKAROUND_35388(efx)) {
+ EFX_POPULATE_DWORD_3(timer_cmd, ERF_DD_EVQ_IND_TIMER_FLAGS,
+ EFE_DD_EVQ_IND_TIMER_FLAGS,
+ ERF_DD_EVQ_IND_TIMER_MODE, mode,
+ ERF_DD_EVQ_IND_TIMER_VAL, value);
+ efx_writed_page(efx, &timer_cmd, ER_DD_EVQ_INDIRECT,
+ channel->channel);
+ } else {
+ EFX_POPULATE_DWORD_2(timer_cmd, ERF_DZ_TC_TIMER_MODE, mode,
+ ERF_DZ_TC_TIMER_VAL, value);
+ efx_writed_page(efx, &timer_cmd, ER_DZ_EVQ_TMR,
+ channel->channel);
+ }
+}
+
+static void efx_ef10_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol)
+{
+ wol->supported = 0;
+ wol->wolopts = 0;
+ memset(&wol->sopass, 0, sizeof(wol->sopass));
+}
+
+static int efx_ef10_set_wol(struct efx_nic *efx, u32 type)
+{
+ if (type != 0)
+ return -EINVAL;
+ return 0;
+}
+
+static void efx_ef10_mcdi_request(struct efx_nic *efx,
+ const efx_dword_t *hdr, size_t hdr_len,
+ const efx_dword_t *sdu, size_t sdu_len)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ u8 *pdu = nic_data->mcdi_buf.addr;
+
+ memcpy(pdu, hdr, hdr_len);
+ memcpy(pdu + hdr_len, sdu, sdu_len);
+ wmb();
+
+ /* The hardware provides 'low' and 'high' (doorbell) registers
+ * for passing the 64-bit address of an MCDI request to
+ * firmware. However the dwords are swapped by firmware. The
+ * least significant bits of the doorbell are then 0 for all
+ * MCDI requests due to alignment.
+ */
+ _efx_writed(efx, cpu_to_le32((u64)nic_data->mcdi_buf.dma_addr >> 32),
+ ER_DZ_MC_DB_LWRD);
+ _efx_writed(efx, cpu_to_le32((u32)nic_data->mcdi_buf.dma_addr),
+ ER_DZ_MC_DB_HWRD);
+}
+
+static bool efx_ef10_mcdi_poll_response(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ const efx_dword_t hdr = *(const efx_dword_t *)nic_data->mcdi_buf.addr;
+
+ rmb();
+ return EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE);
+}
+
+static void
+efx_ef10_mcdi_read_response(struct efx_nic *efx, efx_dword_t *outbuf,
+ size_t offset, size_t outlen)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ const u8 *pdu = nic_data->mcdi_buf.addr;
+
+ memcpy(outbuf, pdu + offset, outlen);
+}
+
+static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ rc = efx_ef10_get_warm_boot_count(efx);
+ if (rc < 0) {
+ /* The firmware is presumably in the process of
+ * rebooting. However, we are supposed to report each
+ * reboot just once, so we must only do that once we
+ * can read and store the updated warm boot count.
+ */
+ return 0;
+ }
+
+ if (rc == nic_data->warm_boot_count)
+ return 0;
+
+ nic_data->warm_boot_count = rc;
+
+ /* All our allocations have been reset */
+ nic_data->must_realloc_vis = true;
+ nic_data->must_restore_filters = true;
+ nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
+
+ /* The datapath firmware might have been changed */
+ nic_data->must_check_datapath_caps = true;
+
+ /* MAC statistics have been cleared on the NIC; clear the local
+ * statistic that we update with efx_update_diff_stat().
+ */
+ nic_data->stats[EF10_STAT_rx_bad_bytes] = 0;
+
+ return -EIO;
+}
+
+/* Handle an MSI interrupt
+ *
+ * Handle an MSI hardware interrupt. This routine schedules event
+ * queue processing. No interrupt acknowledgement cycle is necessary.
+ * Also, we never need to check that the interrupt is for us, since
+ * MSI interrupts cannot be shared.
+ */
+static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id)
+{
+ struct efx_msi_context *context = dev_id;
+ struct efx_nic *efx = context->efx;
+
+ netif_vdbg(efx, intr, efx->net_dev,
+ "IRQ %d on CPU %d\n", irq, raw_smp_processor_id());
+
+ if (likely(ACCESS_ONCE(efx->irq_soft_enabled))) {
+ /* Note test interrupts */
+ if (context->index == efx->irq_level)
+ efx->last_irq_cpu = raw_smp_processor_id();
+
+ /* Schedule processing of the channel */
+ efx_schedule_channel_irq(efx->channel[context->index]);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t efx_ef10_legacy_interrupt(int irq, void *dev_id)
+{
+ struct efx_nic *efx = dev_id;
+ bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled);
+ struct efx_channel *channel;
+ efx_dword_t reg;
+ u32 queues;
+
+ /* Read the ISR which also ACKs the interrupts */
+ efx_readd(efx, &reg, ER_DZ_BIU_INT_ISR);
+ queues = EFX_DWORD_FIELD(reg, ERF_DZ_ISR_REG);
+
+ if (queues == 0)
+ return IRQ_NONE;
+
+ if (likely(soft_enabled)) {
+ /* Note test interrupts */
+ if (queues & (1U << efx->irq_level))
+ efx->last_irq_cpu = raw_smp_processor_id();
+
+ efx_for_each_channel(channel, efx) {
+ if (queues & 1)
+ efx_schedule_channel_irq(channel);
+ queues >>= 1;
+ }
+ }
+
+ netif_vdbg(efx, intr, efx->net_dev,
+ "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
+ irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
+
+ return IRQ_HANDLED;
+}
+
+static void efx_ef10_irq_test_generate(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_TRIGGER_INTERRUPT_IN_LEN);
+
+ BUILD_BUG_ON(MC_CMD_TRIGGER_INTERRUPT_OUT_LEN != 0);
+
+ MCDI_SET_DWORD(inbuf, TRIGGER_INTERRUPT_IN_INTR_LEVEL, efx->irq_level);
+ (void) efx_mcdi_rpc(efx, MC_CMD_TRIGGER_INTERRUPT,
+ inbuf, sizeof(inbuf), NULL, 0, NULL);
+}
+
+static int efx_ef10_tx_probe(struct efx_tx_queue *tx_queue)
+{
+ return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd.buf,
+ (tx_queue->ptr_mask + 1) *
+ sizeof(efx_qword_t),
+ GFP_KERNEL);
+}
+
+/* This writes to the TX_DESC_WPTR and also pushes data */
+static inline void efx_ef10_push_tx_desc(struct efx_tx_queue *tx_queue,
+ const efx_qword_t *txd)
+{
+ unsigned int write_ptr;
+ efx_oword_t reg;
+
+ write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
+ EFX_POPULATE_OWORD_1(reg, ERF_DZ_TX_DESC_WPTR, write_ptr);
+ reg.qword[0] = *txd;
+ efx_writeo_page(tx_queue->efx, &reg,
+ ER_DZ_TX_DESC_UPD, tx_queue->queue);
+}
+
+static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
+ EFX_BUF_SIZE));
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_TXQ_OUT_LEN);
+ bool csum_offload = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
+ size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE;
+ struct efx_channel *channel = tx_queue->channel;
+ struct efx_nic *efx = tx_queue->efx;
+ size_t inlen, outlen;
+ dma_addr_t dma_addr;
+ efx_qword_t *txd;
+ int rc;
+ int i;
+
+ MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1);
+ MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel);
+ MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->queue);
+ MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_INSTANCE, tx_queue->queue);
+ MCDI_POPULATE_DWORD_2(inbuf, INIT_TXQ_IN_FLAGS,
+ INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload,
+ INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload);
+ MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0);
+ MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
+
+ dma_addr = tx_queue->txd.buf.dma_addr;
+
+ netif_dbg(efx, hw, efx->net_dev, "pushing TXQ %d. %zu entries (%llx)\n",
+ tx_queue->queue, entries, (u64)dma_addr);
+
+ for (i = 0; i < entries; ++i) {
+ MCDI_SET_ARRAY_QWORD(inbuf, INIT_TXQ_IN_DMA_ADDR, i, dma_addr);
+ dma_addr += EFX_BUF_SIZE;
+ }
+
+ inlen = MC_CMD_INIT_TXQ_IN_LEN(entries);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_INIT_TXQ, inbuf, inlen,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto fail;
+
+ /* A previous user of this TX queue might have set us up the
+ * bomb by writing a descriptor to the TX push collector but
+ * not the doorbell. (Each collector belongs to a port, not a
+ * queue or function, so cannot easily be reset.) We must
+ * attempt to push a no-op descriptor in its place.
+ */
+ tx_queue->buffer[0].flags = EFX_TX_BUF_OPTION;
+ tx_queue->insert_count = 1;
+ txd = efx_tx_desc(tx_queue, 0);
+ EFX_POPULATE_QWORD_4(*txd,
+ ESF_DZ_TX_DESC_IS_OPT, true,
+ ESF_DZ_TX_OPTION_TYPE,
+ ESE_DZ_TX_OPTION_DESC_CRC_CSUM,
+ ESF_DZ_TX_OPTION_UDP_TCP_CSUM, csum_offload,
+ ESF_DZ_TX_OPTION_IP_CSUM, csum_offload);
+ tx_queue->write_count = 1;
+ wmb();
+ efx_ef10_push_tx_desc(tx_queue, txd);
+
+ return;
+
+fail:
+ WARN_ON(true);
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+}
+
+static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_TXQ_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_TXQ_OUT_LEN);
+ struct efx_nic *efx = tx_queue->efx;
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, FINI_TXQ_IN_INSTANCE,
+ tx_queue->queue);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_FINI_TXQ, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+
+ if (rc && rc != -EALREADY)
+ goto fail;
+
+ return;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+}
+
+static void efx_ef10_tx_remove(struct efx_tx_queue *tx_queue)
+{
+ efx_nic_free_buffer(tx_queue->efx, &tx_queue->txd.buf);
+}
+
+/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
+static inline void efx_ef10_notify_tx_desc(struct efx_tx_queue *tx_queue)
+{
+ unsigned int write_ptr;
+ efx_dword_t reg;
+
+ write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
+ EFX_POPULATE_DWORD_1(reg, ERF_DZ_TX_DESC_WPTR_DWORD, write_ptr);
+ efx_writed_page(tx_queue->efx, &reg,
+ ER_DZ_TX_DESC_UPD_DWORD, tx_queue->queue);
+}
+
+static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue)
+{
+ unsigned int old_write_count = tx_queue->write_count;
+ struct efx_tx_buffer *buffer;
+ unsigned int write_ptr;
+ efx_qword_t *txd;
+
+ BUG_ON(tx_queue->write_count == tx_queue->insert_count);
+
+ do {
+ write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
+ buffer = &tx_queue->buffer[write_ptr];
+ txd = efx_tx_desc(tx_queue, write_ptr);
+ ++tx_queue->write_count;
+
+ /* Create TX descriptor ring entry */
+ if (buffer->flags & EFX_TX_BUF_OPTION) {
+ *txd = buffer->option;
+ } else {
+ BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
+ EFX_POPULATE_QWORD_3(
+ *txd,
+ ESF_DZ_TX_KER_CONT,
+ buffer->flags & EFX_TX_BUF_CONT,
+ ESF_DZ_TX_KER_BYTE_CNT, buffer->len,
+ ESF_DZ_TX_KER_BUF_ADDR, buffer->dma_addr);
+ }
+ } while (tx_queue->write_count != tx_queue->insert_count);
+
+ wmb(); /* Ensure descriptors are written before they are fetched */
+
+ if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) {
+ txd = efx_tx_desc(tx_queue,
+ old_write_count & tx_queue->ptr_mask);
+ efx_ef10_push_tx_desc(tx_queue, txd);
+ ++tx_queue->pushes;
+ } else {
+ efx_ef10_notify_tx_desc(tx_queue);
+ }
+}
+
+static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN);
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID,
+ EVB_PORT_ID_ASSIGNED);
+ MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE,
+ MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE);
+ MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES,
+ EFX_MAX_CHANNELS);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_ALLOC, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc != 0)
+ return rc;
+
+ if (outlen < MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN)
+ return -EIO;
+
+ *context = MCDI_DWORD(outbuf, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID);
+
+ return 0;
+}
+
+static void efx_ef10_free_rss_context(struct efx_nic *efx, u32 context)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_FREE_IN_LEN);
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID,
+ context);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_FREE, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ WARN_ON(rc != 0);
+}
+
+static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context)
+{
+ MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN);
+ MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN);
+ int i, rc;
+
+ MCDI_SET_DWORD(tablebuf, RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID,
+ context);
+ BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
+ MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN);
+
+ for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); ++i)
+ MCDI_PTR(tablebuf,
+ RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE)[i] =
+ (u8) efx->rx_indir_table[i];
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_TABLE, tablebuf,
+ sizeof(tablebuf), NULL, 0, NULL);
+ if (rc != 0)
+ return rc;
+
+ MCDI_SET_DWORD(keybuf, RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID,
+ context);
+ BUILD_BUG_ON(ARRAY_SIZE(efx->rx_hash_key) !=
+ MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
+ for (i = 0; i < ARRAY_SIZE(efx->rx_hash_key); ++i)
+ MCDI_PTR(keybuf, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY)[i] =
+ efx->rx_hash_key[i];
+
+ return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_KEY, keybuf,
+ sizeof(keybuf), NULL, 0, NULL);
+}
+
+static void efx_ef10_rx_free_indir_table(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ if (nic_data->rx_rss_context != EFX_EF10_RSS_CONTEXT_INVALID)
+ efx_ef10_free_rss_context(efx, nic_data->rx_rss_context);
+ nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
+}
+
+static void efx_ef10_rx_push_indir_table(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ netif_dbg(efx, drv, efx->net_dev, "pushing RX indirection table\n");
+
+ if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID) {
+ rc = efx_ef10_alloc_rss_context(efx, &nic_data->rx_rss_context);
+ if (rc != 0)
+ goto fail;
+ }
+
+ rc = efx_ef10_populate_rss_table(efx, nic_data->rx_rss_context);
+ if (rc != 0)
+ goto fail;
+
+ return;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+}
+
+static int efx_ef10_rx_probe(struct efx_rx_queue *rx_queue)
+{
+ return efx_nic_alloc_buffer(rx_queue->efx, &rx_queue->rxd.buf,
+ (rx_queue->ptr_mask + 1) *
+ sizeof(efx_qword_t),
+ GFP_KERNEL);
+}
+
+static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue)
+{
+ MCDI_DECLARE_BUF(inbuf,
+ MC_CMD_INIT_RXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
+ EFX_BUF_SIZE));
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_RXQ_OUT_LEN);
+ struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
+ size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE;
+ struct efx_nic *efx = rx_queue->efx;
+ size_t inlen, outlen;
+ dma_addr_t dma_addr;
+ int rc;
+ int i;
+
+ rx_queue->scatter_n = 0;
+ rx_queue->scatter_len = 0;
+
+ MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_SIZE, rx_queue->ptr_mask + 1);
+ MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_TARGET_EVQ, channel->channel);
+ MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_LABEL, efx_rx_queue_index(rx_queue));
+ MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_INSTANCE,
+ efx_rx_queue_index(rx_queue));
+ MCDI_POPULATE_DWORD_1(inbuf, INIT_RXQ_IN_FLAGS,
+ INIT_RXQ_IN_FLAG_PREFIX, 1);
+ MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0);
+ MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
+
+ dma_addr = rx_queue->rxd.buf.dma_addr;
+
+ netif_dbg(efx, hw, efx->net_dev, "pushing RXQ %d. %zu entries (%llx)\n",
+ efx_rx_queue_index(rx_queue), entries, (u64)dma_addr);
+
+ for (i = 0; i < entries; ++i) {
+ MCDI_SET_ARRAY_QWORD(inbuf, INIT_RXQ_IN_DMA_ADDR, i, dma_addr);
+ dma_addr += EFX_BUF_SIZE;
+ }
+
+ inlen = MC_CMD_INIT_RXQ_IN_LEN(entries);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto fail;
+
+ return;
+
+fail:
+ WARN_ON(true);
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+}
+
+static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_RXQ_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_RXQ_OUT_LEN);
+ struct efx_nic *efx = rx_queue->efx;
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, FINI_RXQ_IN_INSTANCE,
+ efx_rx_queue_index(rx_queue));
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_FINI_RXQ, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+
+ if (rc && rc != -EALREADY)
+ goto fail;
+
+ return;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+}
+
+static void efx_ef10_rx_remove(struct efx_rx_queue *rx_queue)
+{
+ efx_nic_free_buffer(rx_queue->efx, &rx_queue->rxd.buf);
+}
+
+/* This creates an entry in the RX descriptor queue */
+static inline void
+efx_ef10_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
+{
+ struct efx_rx_buffer *rx_buf;
+ efx_qword_t *rxd;
+
+ rxd = efx_rx_desc(rx_queue, index);
+ rx_buf = efx_rx_buffer(rx_queue, index);
+ EFX_POPULATE_QWORD_2(*rxd,
+ ESF_DZ_RX_KER_BYTE_CNT, rx_buf->len,
+ ESF_DZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
+}
+
+static void efx_ef10_rx_write(struct efx_rx_queue *rx_queue)
+{
+ struct efx_nic *efx = rx_queue->efx;
+ unsigned int write_count;
+ efx_dword_t reg;
+
+ /* Firmware requires that RX_DESC_WPTR be a multiple of 8 */
+ write_count = rx_queue->added_count & ~7;
+ if (rx_queue->notified_count == write_count)
+ return;
+
+ do
+ efx_ef10_build_rx_desc(
+ rx_queue,
+ rx_queue->notified_count & rx_queue->ptr_mask);
+ while (++rx_queue->notified_count != write_count);
+
+ wmb();
+ EFX_POPULATE_DWORD_1(reg, ERF_DZ_RX_DESC_WPTR,
+ write_count & rx_queue->ptr_mask);
+ efx_writed_page(efx, &reg, ER_DZ_RX_DESC_UPD,
+ efx_rx_queue_index(rx_queue));
+}
+
+static efx_mcdi_async_completer efx_ef10_rx_defer_refill_complete;
+
+static void efx_ef10_rx_defer_refill(struct efx_rx_queue *rx_queue)
+{
+ struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN);
+ efx_qword_t event;
+
+ EFX_POPULATE_QWORD_2(event,
+ ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV,
+ ESF_DZ_EV_DATA, EFX_EF10_REFILL);
+
+ MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel);
+
+ /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has
+ * already swapped the data to little-endian order.
+ */
+ memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0],
+ sizeof(efx_qword_t));
+
+ efx_mcdi_rpc_async(channel->efx, MC_CMD_DRIVER_EVENT,
+ inbuf, sizeof(inbuf), 0,
+ efx_ef10_rx_defer_refill_complete, 0);
+}
+
+static void
+efx_ef10_rx_defer_refill_complete(struct efx_nic *efx, unsigned long cookie,
+ int rc, efx_dword_t *outbuf,
+ size_t outlen_actual)
+{
+ /* nothing to do */
+}
+
+static int efx_ef10_ev_probe(struct efx_channel *channel)
+{
+ return efx_nic_alloc_buffer(channel->efx, &channel->eventq.buf,
+ (channel->eventq_mask + 1) *
+ sizeof(efx_qword_t),
+ GFP_KERNEL);
+}
+
+static int efx_ef10_ev_init(struct efx_channel *channel)
+{
+ MCDI_DECLARE_BUF(inbuf,
+ MC_CMD_INIT_EVQ_IN_LEN(EFX_MAX_EVQ_SIZE * 8 /
+ EFX_BUF_SIZE));
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_EVQ_OUT_LEN);
+ size_t entries = channel->eventq.buf.len / EFX_BUF_SIZE;
+ struct efx_nic *efx = channel->efx;
+ struct efx_ef10_nic_data *nic_data;
+ bool supports_rx_merge;
+ size_t inlen, outlen;
+ dma_addr_t dma_addr;
+ int rc;
+ int i;
+
+ nic_data = efx->nic_data;
+ supports_rx_merge =
+ !!(nic_data->datapath_caps &
+ 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN);
+
+ /* Fill event queue with all ones (i.e. empty events) */
+ memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len);
+
+ MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_SIZE, channel->eventq_mask + 1);
+ MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_INSTANCE, channel->channel);
+ /* INIT_EVQ expects index in vector table, not absolute */
+ MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_IRQ_NUM, channel->channel);
+ MCDI_POPULATE_DWORD_4(inbuf, INIT_EVQ_IN_FLAGS,
+ INIT_EVQ_IN_FLAG_INTERRUPTING, 1,
+ INIT_EVQ_IN_FLAG_RX_MERGE, 1,
+ INIT_EVQ_IN_FLAG_TX_MERGE, 1,
+ INIT_EVQ_IN_FLAG_CUT_THRU, !supports_rx_merge);
+ MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_MODE,
+ MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS);
+ MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_LOAD, 0);
+ MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_RELOAD, 0);
+ MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_MODE,
+ MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS);
+ MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_THRSHLD, 0);
+
+ dma_addr = channel->eventq.buf.dma_addr;
+ for (i = 0; i < entries; ++i) {
+ MCDI_SET_ARRAY_QWORD(inbuf, INIT_EVQ_IN_DMA_ADDR, i, dma_addr);
+ dma_addr += EFX_BUF_SIZE;
+ }
+
+ inlen = MC_CMD_INIT_EVQ_IN_LEN(entries);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto fail;
+
+ /* IRQ return is ignored */
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+static void efx_ef10_ev_fini(struct efx_channel *channel)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_EVQ_OUT_LEN);
+ struct efx_nic *efx = channel->efx;
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+
+ if (rc && rc != -EALREADY)
+ goto fail;
+
+ return;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+}
+
+static void efx_ef10_ev_remove(struct efx_channel *channel)
+{
+ efx_nic_free_buffer(channel->efx, &channel->eventq.buf);
+}
+
+static void efx_ef10_handle_rx_wrong_queue(struct efx_rx_queue *rx_queue,
+ unsigned int rx_queue_label)
+{
+ struct efx_nic *efx = rx_queue->efx;
+
+ netif_info(efx, hw, efx->net_dev,
+ "rx event arrived on queue %d labeled as queue %u\n",
+ efx_rx_queue_index(rx_queue), rx_queue_label);
+
+ efx_schedule_reset(efx, RESET_TYPE_DISABLE);
+}
+
+static void
+efx_ef10_handle_rx_bad_lbits(struct efx_rx_queue *rx_queue,
+ unsigned int actual, unsigned int expected)
+{
+ unsigned int dropped = (actual - expected) & rx_queue->ptr_mask;
+ struct efx_nic *efx = rx_queue->efx;
+
+ netif_info(efx, hw, efx->net_dev,
+ "dropped %d events (index=%d expected=%d)\n",
+ dropped, actual, expected);
+
+ efx_schedule_reset(efx, RESET_TYPE_DISABLE);
+}
+
+/* partially received RX was aborted. clean up. */
+static void efx_ef10_handle_rx_abort(struct efx_rx_queue *rx_queue)
+{
+ unsigned int rx_desc_ptr;
+
+ WARN_ON(rx_queue->scatter_n == 0);
+
+ netif_dbg(rx_queue->efx, hw, rx_queue->efx->net_dev,
+ "scattered RX aborted (dropping %u buffers)\n",
+ rx_queue->scatter_n);
+
+ rx_desc_ptr = rx_queue->removed_count & rx_queue->ptr_mask;
+
+ efx_rx_packet(rx_queue, rx_desc_ptr, rx_queue->scatter_n,
+ 0, EFX_RX_PKT_DISCARD);
+
+ rx_queue->removed_count += rx_queue->scatter_n;
+ rx_queue->scatter_n = 0;
+ rx_queue->scatter_len = 0;
+ ++efx_rx_queue_channel(rx_queue)->n_rx_nodesc_trunc;
+}
+
+static int efx_ef10_handle_rx_event(struct efx_channel *channel,
+ const efx_qword_t *event)
+{
+ unsigned int rx_bytes, next_ptr_lbits, rx_queue_label, rx_l4_class;
+ unsigned int n_descs, n_packets, i;
+ struct efx_nic *efx = channel->efx;
+ struct efx_rx_queue *rx_queue;
+ bool rx_cont;
+ u16 flags = 0;
+
+ if (unlikely(ACCESS_ONCE(efx->reset_pending)))
+ return 0;
+
+ /* Basic packet information */
+ rx_bytes = EFX_QWORD_FIELD(*event, ESF_DZ_RX_BYTES);
+ next_ptr_lbits = EFX_QWORD_FIELD(*event, ESF_DZ_RX_DSC_PTR_LBITS);
+ rx_queue_label = EFX_QWORD_FIELD(*event, ESF_DZ_RX_QLABEL);
+ rx_l4_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L4_CLASS);
+ rx_cont = EFX_QWORD_FIELD(*event, ESF_DZ_RX_CONT);
+
+ WARN_ON(EFX_QWORD_FIELD(*event, ESF_DZ_RX_DROP_EVENT));
+
+ rx_queue = efx_channel_get_rx_queue(channel);
+
+ if (unlikely(rx_queue_label != efx_rx_queue_index(rx_queue)))
+ efx_ef10_handle_rx_wrong_queue(rx_queue, rx_queue_label);
+
+ n_descs = ((next_ptr_lbits - rx_queue->removed_count) &
+ ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1));
+
+ if (n_descs != rx_queue->scatter_n + 1) {
+ /* detect rx abort */
+ if (unlikely(n_descs == rx_queue->scatter_n)) {
+ WARN_ON(rx_bytes != 0);
+ efx_ef10_handle_rx_abort(rx_queue);
+ return 0;
+ }
+
+ if (unlikely(rx_queue->scatter_n != 0)) {
+ /* Scattered packet completions cannot be
+ * merged, so something has gone wrong.
+ */
+ efx_ef10_handle_rx_bad_lbits(
+ rx_queue, next_ptr_lbits,
+ (rx_queue->removed_count +
+ rx_queue->scatter_n + 1) &
+ ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1));
+ return 0;
+ }
+
+ /* Merged completion for multiple non-scattered packets */
+ rx_queue->scatter_n = 1;
+ rx_queue->scatter_len = 0;
+ n_packets = n_descs;
+ ++channel->n_rx_merge_events;
+ channel->n_rx_merge_packets += n_packets;
+ flags |= EFX_RX_PKT_PREFIX_LEN;
+ } else {
+ ++rx_queue->scatter_n;
+ rx_queue->scatter_len += rx_bytes;
+ if (rx_cont)
+ return 0;
+ n_packets = 1;
+ }
+
+ if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_RX_ECRC_ERR)))
+ flags |= EFX_RX_PKT_DISCARD;
+
+ if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_RX_IPCKSUM_ERR))) {
+ channel->n_rx_ip_hdr_chksum_err += n_packets;
+ } else if (unlikely(EFX_QWORD_FIELD(*event,
+ ESF_DZ_RX_TCPUDP_CKSUM_ERR))) {
+ channel->n_rx_tcp_udp_chksum_err += n_packets;
+ } else if (rx_l4_class == ESE_DZ_L4_CLASS_TCP ||
+ rx_l4_class == ESE_DZ_L4_CLASS_UDP) {
+ flags |= EFX_RX_PKT_CSUMMED;
+ }
+
+ if (rx_l4_class == ESE_DZ_L4_CLASS_TCP)
+ flags |= EFX_RX_PKT_TCP;
+
+ channel->irq_mod_score += 2 * n_packets;
+
+ /* Handle received packet(s) */
+ for (i = 0; i < n_packets; i++) {
+ efx_rx_packet(rx_queue,
+ rx_queue->removed_count & rx_queue->ptr_mask,
+ rx_queue->scatter_n, rx_queue->scatter_len,
+ flags);
+ rx_queue->removed_count += rx_queue->scatter_n;
+ }
+
+ rx_queue->scatter_n = 0;
+ rx_queue->scatter_len = 0;
+
+ return n_packets;
+}
+
+static int
+efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
+{
+ struct efx_nic *efx = channel->efx;
+ struct efx_tx_queue *tx_queue;
+ unsigned int tx_ev_desc_ptr;
+ unsigned int tx_ev_q_label;
+ int tx_descs = 0;
+
+ if (unlikely(ACCESS_ONCE(efx->reset_pending)))
+ return 0;
+
+ if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT)))
+ return 0;
+
+ /* Transmit completion */
+ tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, ESF_DZ_TX_DESCR_INDX);
+ tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL);
+ tx_queue = efx_channel_get_tx_queue(channel,
+ tx_ev_q_label % EFX_TXQ_TYPES);
+ tx_descs = ((tx_ev_desc_ptr + 1 - tx_queue->read_count) &
+ tx_queue->ptr_mask);
+ efx_xmit_done(tx_queue, tx_ev_desc_ptr & tx_queue->ptr_mask);
+
+ return tx_descs;
+}
+
+static void
+efx_ef10_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
+{
+ struct efx_nic *efx = channel->efx;
+ int subcode;
+
+ subcode = EFX_QWORD_FIELD(*event, ESF_DZ_DRV_SUB_CODE);
+
+ switch (subcode) {
+ case ESE_DZ_DRV_TIMER_EV:
+ case ESE_DZ_DRV_WAKE_UP_EV:
+ break;
+ case ESE_DZ_DRV_START_UP_EV:
+ /* event queue init complete. ok. */
+ break;
+ default:
+ netif_err(efx, hw, efx->net_dev,
+ "channel %d unknown driver event type %d"
+ " (data " EFX_QWORD_FMT ")\n",
+ channel->channel, subcode,
+ EFX_QWORD_VAL(*event));
+
+ }
+}
+
+static void efx_ef10_handle_driver_generated_event(struct efx_channel *channel,
+ efx_qword_t *event)
+{
+ struct efx_nic *efx = channel->efx;
+ u32 subcode;
+
+ subcode = EFX_QWORD_FIELD(*event, EFX_DWORD_0);
+
+ switch (subcode) {
+ case EFX_EF10_TEST:
+ channel->event_test_cpu = raw_smp_processor_id();
+ break;
+ case EFX_EF10_REFILL:
+ /* The queue must be empty, so we won't receive any rx
+ * events, so efx_process_channel() won't refill the
+ * queue. Refill it here
+ */
+ efx_fast_push_rx_descriptors(&channel->rx_queue);
+ break;
+ default:
+ netif_err(efx, hw, efx->net_dev,
+ "channel %d unknown driver event type %u"
+ " (data " EFX_QWORD_FMT ")\n",
+ channel->channel, (unsigned) subcode,
+ EFX_QWORD_VAL(*event));
+ }
+}
+
+static int efx_ef10_ev_process(struct efx_channel *channel, int quota)
+{
+ struct efx_nic *efx = channel->efx;
+ efx_qword_t event, *p_event;
+ unsigned int read_ptr;
+ int ev_code;
+ int tx_descs = 0;
+ int spent = 0;
+
+ read_ptr = channel->eventq_read_ptr;
+
+ for (;;) {
+ p_event = efx_event(channel, read_ptr);
+ event = *p_event;
+
+ if (!efx_event_present(&event))
+ break;
+
+ EFX_SET_QWORD(*p_event);
+
+ ++read_ptr;
+
+ ev_code = EFX_QWORD_FIELD(event, ESF_DZ_EV_CODE);
+
+ netif_vdbg(efx, drv, efx->net_dev,
+ "processing event on %d " EFX_QWORD_FMT "\n",
+ channel->channel, EFX_QWORD_VAL(event));
+
+ switch (ev_code) {
+ case ESE_DZ_EV_CODE_MCDI_EV:
+ efx_mcdi_process_event(channel, &event);
+ break;
+ case ESE_DZ_EV_CODE_RX_EV:
+ spent += efx_ef10_handle_rx_event(channel, &event);
+ if (spent >= quota) {
+ /* XXX can we split a merged event to
+ * avoid going over-quota?
+ */
+ spent = quota;
+ goto out;
+ }
+ break;
+ case ESE_DZ_EV_CODE_TX_EV:
+ tx_descs += efx_ef10_handle_tx_event(channel, &event);
+ if (tx_descs > efx->txq_entries) {
+ spent = quota;
+ goto out;
+ } else if (++spent == quota) {
+ goto out;
+ }
+ break;
+ case ESE_DZ_EV_CODE_DRIVER_EV:
+ efx_ef10_handle_driver_event(channel, &event);
+ if (++spent == quota)
+ goto out;
+ break;
+ case EFX_EF10_DRVGEN_EV:
+ efx_ef10_handle_driver_generated_event(channel, &event);
+ break;
+ default:
+ netif_err(efx, hw, efx->net_dev,
+ "channel %d unknown event type %d"
+ " (data " EFX_QWORD_FMT ")\n",
+ channel->channel, ev_code,
+ EFX_QWORD_VAL(event));
+ }
+ }
+
+out:
+ channel->eventq_read_ptr = read_ptr;
+ return spent;
+}
+
+static void efx_ef10_ev_read_ack(struct efx_channel *channel)
+{
+ struct efx_nic *efx = channel->efx;
+ efx_dword_t rptr;
+
+ if (EFX_EF10_WORKAROUND_35388(efx)) {
+ BUILD_BUG_ON(EFX_MIN_EVQ_SIZE <
+ (1 << ERF_DD_EVQ_IND_RPTR_WIDTH));
+ BUILD_BUG_ON(EFX_MAX_EVQ_SIZE >
+ (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH));
+
+ EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS,
+ EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH,
+ ERF_DD_EVQ_IND_RPTR,
+ (channel->eventq_read_ptr &
+ channel->eventq_mask) >>
+ ERF_DD_EVQ_IND_RPTR_WIDTH);
+ efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT,
+ channel->channel);
+ EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS,
+ EFE_DD_EVQ_IND_RPTR_FLAGS_LOW,
+ ERF_DD_EVQ_IND_RPTR,
+ channel->eventq_read_ptr &
+ ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1));
+ efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT,
+ channel->channel);
+ } else {
+ EFX_POPULATE_DWORD_1(rptr, ERF_DZ_EVQ_RPTR,
+ channel->eventq_read_ptr &
+ channel->eventq_mask);
+ efx_writed_page(efx, &rptr, ER_DZ_EVQ_RPTR, channel->channel);
+ }
+}
+
+static void efx_ef10_ev_test_generate(struct efx_channel *channel)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN);
+ struct efx_nic *efx = channel->efx;
+ efx_qword_t event;
+ int rc;
+
+ EFX_POPULATE_QWORD_2(event,
+ ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV,
+ ESF_DZ_EV_DATA, EFX_EF10_TEST);
+
+ MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel);
+
+ /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has
+ * already swapped the data to little-endian order.
+ */
+ memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0],
+ sizeof(efx_qword_t));
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_DRIVER_EVENT, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ if (rc != 0)
+ goto fail;
+
+ return;
+
+fail:
+ WARN_ON(true);
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+}
+
+void efx_ef10_handle_drain_event(struct efx_nic *efx)
+{
+ if (atomic_dec_and_test(&efx->active_queues))
+ wake_up(&efx->flush_wq);
+
+ WARN_ON(atomic_read(&efx->active_queues) < 0);
+}
+
+static int efx_ef10_fini_dmaq(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct efx_channel *channel;
+ struct efx_tx_queue *tx_queue;
+ struct efx_rx_queue *rx_queue;
+ int pending;
+
+ /* If the MC has just rebooted, the TX/RX queues will have already been
+ * torn down, but efx->active_queues needs to be set to zero.
+ */
+ if (nic_data->must_realloc_vis) {
+ atomic_set(&efx->active_queues, 0);
+ return 0;
+ }
+
+ /* Do not attempt to write to the NIC during EEH recovery */
+ if (efx->state != STATE_RECOVERY) {
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_rx_queue(rx_queue, channel)
+ efx_ef10_rx_fini(rx_queue);
+ efx_for_each_channel_tx_queue(tx_queue, channel)
+ efx_ef10_tx_fini(tx_queue);
+ }
+
+ wait_event_timeout(efx->flush_wq,
+ atomic_read(&efx->active_queues) == 0,
+ msecs_to_jiffies(EFX_MAX_FLUSH_TIME));
+ pending = atomic_read(&efx->active_queues);
+ if (pending) {
+ netif_err(efx, hw, efx->net_dev, "failed to flush %d queues\n",
+ pending);
+ return -ETIMEDOUT;
+ }
+ }
+
+ return 0;
+}
+
+static bool efx_ef10_filter_equal(const struct efx_filter_spec *left,
+ const struct efx_filter_spec *right)
+{
+ if ((left->match_flags ^ right->match_flags) |
+ ((left->flags ^ right->flags) &
+ (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
+ return false;
+
+ return memcmp(&left->outer_vid, &right->outer_vid,
+ sizeof(struct efx_filter_spec) -
+ offsetof(struct efx_filter_spec, outer_vid)) == 0;
+}
+
+static unsigned int efx_ef10_filter_hash(const struct efx_filter_spec *spec)
+{
+ BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
+ return jhash2((const u32 *)&spec->outer_vid,
+ (sizeof(struct efx_filter_spec) -
+ offsetof(struct efx_filter_spec, outer_vid)) / 4,
+ 0);
+ /* XXX should we randomise the initval? */
+}
+
+/* Decide whether a filter should be exclusive or else should allow
+ * delivery to additional recipients. Currently we decide that
+ * filters for specific local unicast MAC and IP addresses are
+ * exclusive.
+ */
+static bool efx_ef10_filter_is_exclusive(const struct efx_filter_spec *spec)
+{
+ if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC &&
+ !is_multicast_ether_addr(spec->loc_mac))
+ return true;
+
+ if ((spec->match_flags &
+ (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
+ (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
+ if (spec->ether_type == htons(ETH_P_IP) &&
+ !ipv4_is_multicast(spec->loc_host[0]))
+ return true;
+ if (spec->ether_type == htons(ETH_P_IPV6) &&
+ ((const u8 *)spec->loc_host)[0] != 0xff)
+ return true;
+ }
+
+ return false;
+}
+
+static struct efx_filter_spec *
+efx_ef10_filter_entry_spec(const struct efx_ef10_filter_table *table,
+ unsigned int filter_idx)
+{
+ return (struct efx_filter_spec *)(table->entry[filter_idx].spec &
+ ~EFX_EF10_FILTER_FLAGS);
+}
+
+static unsigned int
+efx_ef10_filter_entry_flags(const struct efx_ef10_filter_table *table,
+ unsigned int filter_idx)
+{
+ return table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAGS;
+}
+
+static void
+efx_ef10_filter_set_entry(struct efx_ef10_filter_table *table,
+ unsigned int filter_idx,
+ const struct efx_filter_spec *spec,
+ unsigned int flags)
+{
+ table->entry[filter_idx].spec = (unsigned long)spec | flags;
+}
+
+static void efx_ef10_filter_push_prep(struct efx_nic *efx,
+ const struct efx_filter_spec *spec,
+ efx_dword_t *inbuf, u64 handle,
+ bool replacing)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ memset(inbuf, 0, MC_CMD_FILTER_OP_IN_LEN);
+
+ if (replacing) {
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
+ MC_CMD_FILTER_OP_IN_OP_REPLACE);
+ MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, handle);
+ } else {
+ u32 match_fields = 0;
+
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
+ efx_ef10_filter_is_exclusive(spec) ?
+ MC_CMD_FILTER_OP_IN_OP_INSERT :
+ MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE);
+
+ /* Convert match flags and values. Unlike almost
+ * everything else in MCDI, these fields are in
+ * network byte order.
+ */
+ if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC_IG)
+ match_fields |=
+ is_multicast_ether_addr(spec->loc_mac) ?
+ 1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN :
+ 1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN;
+#define COPY_FIELD(gen_flag, gen_field, mcdi_field) \
+ if (spec->match_flags & EFX_FILTER_MATCH_ ## gen_flag) { \
+ match_fields |= \
+ 1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \
+ mcdi_field ## _LBN; \
+ BUILD_BUG_ON( \
+ MC_CMD_FILTER_OP_IN_ ## mcdi_field ## _LEN < \
+ sizeof(spec->gen_field)); \
+ memcpy(MCDI_PTR(inbuf, FILTER_OP_IN_ ## mcdi_field), \
+ &spec->gen_field, sizeof(spec->gen_field)); \
+ }
+ COPY_FIELD(REM_HOST, rem_host, SRC_IP);
+ COPY_FIELD(LOC_HOST, loc_host, DST_IP);
+ COPY_FIELD(REM_MAC, rem_mac, SRC_MAC);
+ COPY_FIELD(REM_PORT, rem_port, SRC_PORT);
+ COPY_FIELD(LOC_MAC, loc_mac, DST_MAC);
+ COPY_FIELD(LOC_PORT, loc_port, DST_PORT);
+ COPY_FIELD(ETHER_TYPE, ether_type, ETHER_TYPE);
+ COPY_FIELD(INNER_VID, inner_vid, INNER_VLAN);
+ COPY_FIELD(OUTER_VID, outer_vid, OUTER_VLAN);
+ COPY_FIELD(IP_PROTO, ip_proto, IP_PROTO);
+#undef COPY_FIELD
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_MATCH_FIELDS,
+ match_fields);
+ }
+
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST,
+ spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
+ MC_CMD_FILTER_OP_IN_RX_DEST_DROP :
+ MC_CMD_FILTER_OP_IN_RX_DEST_HOST);
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DEST,
+ MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT);
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE, spec->dmaq_id);
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE,
+ (spec->flags & EFX_FILTER_FLAG_RX_RSS) ?
+ MC_CMD_FILTER_OP_IN_RX_MODE_RSS :
+ MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE);
+ if (spec->flags & EFX_FILTER_FLAG_RX_RSS)
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_CONTEXT,
+ spec->rss_context !=
+ EFX_FILTER_RSS_CONTEXT_DEFAULT ?
+ spec->rss_context : nic_data->rx_rss_context);
+}
+
+static int efx_ef10_filter_push(struct efx_nic *efx,
+ const struct efx_filter_spec *spec,
+ u64 *handle, bool replacing)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_OUT_LEN);
+ int rc;
+
+ efx_ef10_filter_push_prep(efx, spec, inbuf, *handle, replacing);
+ rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), NULL);
+ if (rc == 0)
+ *handle = MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE);
+ return rc;
+}
+
+static int efx_ef10_filter_rx_match_pri(struct efx_ef10_filter_table *table,
+ enum efx_filter_match_flags match_flags)
+{
+ unsigned int match_pri;
+
+ for (match_pri = 0;
+ match_pri < table->rx_match_count;
+ match_pri++)
+ if (table->rx_match_flags[match_pri] == match_flags)
+ return match_pri;
+
+ return -EPROTONOSUPPORT;
+}
+
+static s32 efx_ef10_filter_insert(struct efx_nic *efx,
+ struct efx_filter_spec *spec,
+ bool replace_equal)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
+ struct efx_filter_spec *saved_spec;
+ unsigned int match_pri, hash;
+ unsigned int priv_flags;
+ bool replacing = false;
+ int ins_index = -1;
+ DEFINE_WAIT(wait);
+ bool is_mc_recip;
+ s32 rc;
+
+ /* For now, only support RX filters */
+ if ((spec->flags & (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)) !=
+ EFX_FILTER_FLAG_RX)
+ return -EINVAL;
+
+ rc = efx_ef10_filter_rx_match_pri(table, spec->match_flags);
+ if (rc < 0)
+ return rc;
+ match_pri = rc;
+
+ hash = efx_ef10_filter_hash(spec);
+ is_mc_recip = efx_filter_is_mc_recipient(spec);
+ if (is_mc_recip)
+ bitmap_zero(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
+
+ /* Find any existing filters with the same match tuple or
+ * else a free slot to insert at. If any of them are busy,
+ * we have to wait and retry.
+ */
+ for (;;) {
+ unsigned int depth = 1;
+ unsigned int i;
+
+ spin_lock_bh(&efx->filter_lock);
+
+ for (;;) {
+ i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
+ saved_spec = efx_ef10_filter_entry_spec(table, i);
+
+ if (!saved_spec) {
+ if (ins_index < 0)
+ ins_index = i;
+ } else if (efx_ef10_filter_equal(spec, saved_spec)) {
+ if (table->entry[i].spec &
+ EFX_EF10_FILTER_FLAG_BUSY)
+ break;
+ if (spec->priority < saved_spec->priority &&
+ !(saved_spec->priority ==
+ EFX_FILTER_PRI_REQUIRED &&
+ saved_spec->flags &
+ EFX_FILTER_FLAG_RX_STACK)) {
+ rc = -EPERM;
+ goto out_unlock;
+ }
+ if (!is_mc_recip) {
+ /* This is the only one */
+ if (spec->priority ==
+ saved_spec->priority &&
+ !replace_equal) {
+ rc = -EEXIST;
+ goto out_unlock;
+ }
+ ins_index = i;
+ goto found;
+ } else if (spec->priority >
+ saved_spec->priority ||
+ (spec->priority ==
+ saved_spec->priority &&
+ replace_equal)) {
+ if (ins_index < 0)
+ ins_index = i;
+ else
+ __set_bit(depth, mc_rem_map);
+ }
+ }
+
+ /* Once we reach the maximum search depth, use
+ * the first suitable slot or return -EBUSY if
+ * there was none
+ */
+ if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) {
+ if (ins_index < 0) {
+ rc = -EBUSY;
+ goto out_unlock;
+ }
+ goto found;
+ }
+
+ ++depth;
+ }
+
+ prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE);
+ spin_unlock_bh(&efx->filter_lock);
+ schedule();
+ }
+
+found:
+ /* Create a software table entry if necessary, and mark it
+ * busy. We might yet fail to insert, but any attempt to
+ * insert a conflicting filter while we're waiting for the
+ * firmware must find the busy entry.
+ */
+ saved_spec = efx_ef10_filter_entry_spec(table, ins_index);
+ if (saved_spec) {
+ if (spec->flags & EFX_FILTER_FLAG_RX_STACK) {
+ /* Just make sure it won't be removed */
+ saved_spec->flags |= EFX_FILTER_FLAG_RX_STACK;
+ table->entry[ins_index].spec &=
+ ~EFX_EF10_FILTER_FLAG_STACK_OLD;
+ rc = ins_index;
+ goto out_unlock;
+ }
+ replacing = true;
+ priv_flags = efx_ef10_filter_entry_flags(table, ins_index);
+ } else {
+ saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC);
+ if (!saved_spec) {
+ rc = -ENOMEM;
+ goto out_unlock;
+ }
+ *saved_spec = *spec;
+ priv_flags = 0;
+ }
+ efx_ef10_filter_set_entry(table, ins_index, saved_spec,
+ priv_flags | EFX_EF10_FILTER_FLAG_BUSY);
+
+ /* Mark lower-priority multicast recipients busy prior to removal */
+ if (is_mc_recip) {
+ unsigned int depth, i;
+
+ for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
+ i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
+ if (test_bit(depth, mc_rem_map))
+ table->entry[i].spec |=
+ EFX_EF10_FILTER_FLAG_BUSY;
+ }
+ }
+
+ spin_unlock_bh(&efx->filter_lock);
+
+ rc = efx_ef10_filter_push(efx, spec, &table->entry[ins_index].handle,
+ replacing);
+
+ /* Finalise the software table entry */
+ spin_lock_bh(&efx->filter_lock);
+ if (rc == 0) {
+ if (replacing) {
+ /* Update the fields that may differ */
+ saved_spec->priority = spec->priority;
+ saved_spec->flags &= EFX_FILTER_FLAG_RX_STACK;
+ saved_spec->flags |= spec->flags;
+ saved_spec->rss_context = spec->rss_context;
+ saved_spec->dmaq_id = spec->dmaq_id;
+ }
+ } else if (!replacing) {
+ kfree(saved_spec);
+ saved_spec = NULL;
+ }
+ efx_ef10_filter_set_entry(table, ins_index, saved_spec, priv_flags);
+
+ /* Remove and finalise entries for lower-priority multicast
+ * recipients
+ */
+ if (is_mc_recip) {
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
+ unsigned int depth, i;
+
+ memset(inbuf, 0, sizeof(inbuf));
+
+ for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
+ if (!test_bit(depth, mc_rem_map))
+ continue;
+
+ i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
+ saved_spec = efx_ef10_filter_entry_spec(table, i);
+ priv_flags = efx_ef10_filter_entry_flags(table, i);
+
+ if (rc == 0) {
+ spin_unlock_bh(&efx->filter_lock);
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
+ MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
+ MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
+ table->entry[i].handle);
+ rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP,
+ inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ spin_lock_bh(&efx->filter_lock);
+ }
+
+ if (rc == 0) {
+ kfree(saved_spec);
+ saved_spec = NULL;
+ priv_flags = 0;
+ } else {
+ priv_flags &= ~EFX_EF10_FILTER_FLAG_BUSY;
+ }
+ efx_ef10_filter_set_entry(table, i, saved_spec,
+ priv_flags);
+ }
+ }
+
+ /* If successful, return the inserted filter ID */
+ if (rc == 0)
+ rc = match_pri * HUNT_FILTER_TBL_ROWS + ins_index;
+
+ wake_up_all(&table->waitq);
+out_unlock:
+ spin_unlock_bh(&efx->filter_lock);
+ finish_wait(&table->waitq, &wait);
+ return rc;
+}
+
+void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx)
+{
+ /* no need to do anything here on EF10 */
+}
+
+/* Remove a filter.
+ * If !stack_requested, remove by ID
+ * If stack_requested, remove by index
+ * Filter ID may come from userland and must be range-checked.
+ */
+static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id, bool stack_requested)
+{
+ unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS;
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ MCDI_DECLARE_BUF(inbuf,
+ MC_CMD_FILTER_OP_IN_HANDLE_OFST +
+ MC_CMD_FILTER_OP_IN_HANDLE_LEN);
+ struct efx_filter_spec *spec;
+ DEFINE_WAIT(wait);
+ int rc;
+
+ /* Find the software table entry and mark it busy. Don't
+ * remove it yet; any attempt to update while we're waiting
+ * for the firmware must find the busy entry.
+ */
+ for (;;) {
+ spin_lock_bh(&efx->filter_lock);
+ if (!(table->entry[filter_idx].spec &
+ EFX_EF10_FILTER_FLAG_BUSY))
+ break;
+ prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE);
+ spin_unlock_bh(&efx->filter_lock);
+ schedule();
+ }
+ spec = efx_ef10_filter_entry_spec(table, filter_idx);
+ if (!spec || spec->priority > priority ||
+ (!stack_requested &&
+ efx_ef10_filter_rx_match_pri(table, spec->match_flags) !=
+ filter_id / HUNT_FILTER_TBL_ROWS)) {
+ rc = -ENOENT;
+ goto out_unlock;
+ }
+ table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
+ spin_unlock_bh(&efx->filter_lock);
+
+ if (spec->flags & EFX_FILTER_FLAG_RX_STACK && !stack_requested) {
+ /* Reset steering of a stack-owned filter */
+
+ struct efx_filter_spec new_spec = *spec;
+
+ new_spec.priority = EFX_FILTER_PRI_REQUIRED;
+ new_spec.flags = (EFX_FILTER_FLAG_RX |
+ EFX_FILTER_FLAG_RX_RSS |
+ EFX_FILTER_FLAG_RX_STACK);
+ new_spec.dmaq_id = 0;
+ new_spec.rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT;
+ rc = efx_ef10_filter_push(efx, &new_spec,
+ &table->entry[filter_idx].handle,
+ true);
+
+ spin_lock_bh(&efx->filter_lock);
+ if (rc == 0)
+ *spec = new_spec;
+ } else {
+ /* Really remove the filter */
+
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
+ efx_ef10_filter_is_exclusive(spec) ?
+ MC_CMD_FILTER_OP_IN_OP_REMOVE :
+ MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
+ MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
+ table->entry[filter_idx].handle);
+ rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP,
+ inbuf, sizeof(inbuf), NULL, 0, NULL);
+
+ spin_lock_bh(&efx->filter_lock);
+ if (rc == 0) {
+ kfree(spec);
+ efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
+ }
+ }
+ table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY;
+ wake_up_all(&table->waitq);
+out_unlock:
+ spin_unlock_bh(&efx->filter_lock);
+ finish_wait(&table->waitq, &wait);
+ return rc;
+}
+
+static int efx_ef10_filter_remove_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id)
+{
+ return efx_ef10_filter_remove_internal(efx, priority, filter_id, false);
+}
+
+static int efx_ef10_filter_get_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id, struct efx_filter_spec *spec)
+{
+ unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS;
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ const struct efx_filter_spec *saved_spec;
+ int rc;
+
+ spin_lock_bh(&efx->filter_lock);
+ saved_spec = efx_ef10_filter_entry_spec(table, filter_idx);
+ if (saved_spec && saved_spec->priority == priority &&
+ efx_ef10_filter_rx_match_pri(table, saved_spec->match_flags) ==
+ filter_id / HUNT_FILTER_TBL_ROWS) {
+ *spec = *saved_spec;
+ rc = 0;
+ } else {
+ rc = -ENOENT;
+ }
+ spin_unlock_bh(&efx->filter_lock);
+ return rc;
+}
+
+static void efx_ef10_filter_clear_rx(struct efx_nic *efx,
+ enum efx_filter_priority priority)
+{
+ /* TODO */
+}
+
+static u32 efx_ef10_filter_count_rx_used(struct efx_nic *efx,
+ enum efx_filter_priority priority)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ unsigned int filter_idx;
+ s32 count = 0;
+
+ spin_lock_bh(&efx->filter_lock);
+ for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
+ if (table->entry[filter_idx].spec &&
+ efx_ef10_filter_entry_spec(table, filter_idx)->priority ==
+ priority)
+ ++count;
+ }
+ spin_unlock_bh(&efx->filter_lock);
+ return count;
+}
+
+static u32 efx_ef10_filter_get_rx_id_limit(struct efx_nic *efx)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+
+ return table->rx_match_count * HUNT_FILTER_TBL_ROWS;
+}
+
+static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 *buf, u32 size)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ struct efx_filter_spec *spec;
+ unsigned int filter_idx;
+ s32 count = 0;
+
+ spin_lock_bh(&efx->filter_lock);
+ for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
+ spec = efx_ef10_filter_entry_spec(table, filter_idx);
+ if (spec && spec->priority == priority) {
+ if (count == size) {
+ count = -EMSGSIZE;
+ break;
+ }
+ buf[count++] = (efx_ef10_filter_rx_match_pri(
+ table, spec->match_flags) *
+ HUNT_FILTER_TBL_ROWS +
+ filter_idx);
+ }
+ }
+ spin_unlock_bh(&efx->filter_lock);
+ return count;
+}
+
+#ifdef CONFIG_RFS_ACCEL
+
+static efx_mcdi_async_completer efx_ef10_filter_rfs_insert_complete;
+
+static s32 efx_ef10_filter_rfs_insert(struct efx_nic *efx,
+ struct efx_filter_spec *spec)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
+ struct efx_filter_spec *saved_spec;
+ unsigned int hash, i, depth = 1;
+ bool replacing = false;
+ int ins_index = -1;
+ u64 cookie;
+ s32 rc;
+
+ /* Must be an RX filter without RSS and not for a multicast
+ * destination address (RFS only works for connected sockets).
+ * These restrictions allow us to pass only a tiny amount of
+ * data through to the completion function.
+ */
+ EFX_WARN_ON_PARANOID(spec->flags !=
+ (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_SCATTER));
+ EFX_WARN_ON_PARANOID(spec->priority != EFX_FILTER_PRI_HINT);
+ EFX_WARN_ON_PARANOID(efx_filter_is_mc_recipient(spec));
+
+ hash = efx_ef10_filter_hash(spec);
+
+ spin_lock_bh(&efx->filter_lock);
+
+ /* Find any existing filter with the same match tuple or else
+ * a free slot to insert at. If an existing filter is busy,
+ * we have to give up.
+ */
+ for (;;) {
+ i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
+ saved_spec = efx_ef10_filter_entry_spec(table, i);
+
+ if (!saved_spec) {
+ if (ins_index < 0)
+ ins_index = i;
+ } else if (efx_ef10_filter_equal(spec, saved_spec)) {
+ if (table->entry[i].spec & EFX_EF10_FILTER_FLAG_BUSY) {
+ rc = -EBUSY;
+ goto fail_unlock;
+ }
+ EFX_WARN_ON_PARANOID(saved_spec->flags &
+ EFX_FILTER_FLAG_RX_STACK);
+ if (spec->priority < saved_spec->priority) {
+ rc = -EPERM;
+ goto fail_unlock;
+ }
+ ins_index = i;
+ break;
+ }
+
+ /* Once we reach the maximum search depth, use the
+ * first suitable slot or return -EBUSY if there was
+ * none
+ */
+ if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) {
+ if (ins_index < 0) {
+ rc = -EBUSY;
+ goto fail_unlock;
+ }
+ break;
+ }
+
+ ++depth;
+ }
+
+ /* Create a software table entry if necessary, and mark it
+ * busy. We might yet fail to insert, but any attempt to
+ * insert a conflicting filter while we're waiting for the
+ * firmware must find the busy entry.
+ */
+ saved_spec = efx_ef10_filter_entry_spec(table, ins_index);
+ if (saved_spec) {
+ replacing = true;
+ } else {
+ saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC);
+ if (!saved_spec) {
+ rc = -ENOMEM;
+ goto fail_unlock;
+ }
+ *saved_spec = *spec;
+ }
+ efx_ef10_filter_set_entry(table, ins_index, saved_spec,
+ EFX_EF10_FILTER_FLAG_BUSY);
+
+ spin_unlock_bh(&efx->filter_lock);
+
+ /* Pack up the variables needed on completion */
+ cookie = replacing << 31 | ins_index << 16 | spec->dmaq_id;
+
+ efx_ef10_filter_push_prep(efx, spec, inbuf,
+ table->entry[ins_index].handle, replacing);
+ efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
+ MC_CMD_FILTER_OP_OUT_LEN,
+ efx_ef10_filter_rfs_insert_complete, cookie);
+
+ return ins_index;
+
+fail_unlock:
+ spin_unlock_bh(&efx->filter_lock);
+ return rc;
+}
+
+static void
+efx_ef10_filter_rfs_insert_complete(struct efx_nic *efx, unsigned long cookie,
+ int rc, efx_dword_t *outbuf,
+ size_t outlen_actual)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ unsigned int ins_index, dmaq_id;
+ struct efx_filter_spec *spec;
+ bool replacing;
+
+ /* Unpack the cookie */
+ replacing = cookie >> 31;
+ ins_index = (cookie >> 16) & (HUNT_FILTER_TBL_ROWS - 1);
+ dmaq_id = cookie & 0xffff;
+
+ spin_lock_bh(&efx->filter_lock);
+ spec = efx_ef10_filter_entry_spec(table, ins_index);
+ if (rc == 0) {
+ table->entry[ins_index].handle =
+ MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE);
+ if (replacing)
+ spec->dmaq_id = dmaq_id;
+ } else if (!replacing) {
+ kfree(spec);
+ spec = NULL;
+ }
+ efx_ef10_filter_set_entry(table, ins_index, spec, 0);
+ spin_unlock_bh(&efx->filter_lock);
+
+ wake_up_all(&table->waitq);
+}
+
+static void
+efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx,
+ unsigned long filter_idx,
+ int rc, efx_dword_t *outbuf,
+ size_t outlen_actual);
+
+static bool efx_ef10_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
+ unsigned int filter_idx)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ struct efx_filter_spec *spec =
+ efx_ef10_filter_entry_spec(table, filter_idx);
+ MCDI_DECLARE_BUF(inbuf,
+ MC_CMD_FILTER_OP_IN_HANDLE_OFST +
+ MC_CMD_FILTER_OP_IN_HANDLE_LEN);
+
+ if (!spec ||
+ (table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAG_BUSY) ||
+ spec->priority != EFX_FILTER_PRI_HINT ||
+ !rps_may_expire_flow(efx->net_dev, spec->dmaq_id,
+ flow_id, filter_idx))
+ return false;
+
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
+ MC_CMD_FILTER_OP_IN_OP_REMOVE);
+ MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
+ table->entry[filter_idx].handle);
+ if (efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), 0,
+ efx_ef10_filter_rfs_expire_complete, filter_idx))
+ return false;
+
+ table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
+ return true;
+}
+
+static void
+efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx,
+ unsigned long filter_idx,
+ int rc, efx_dword_t *outbuf,
+ size_t outlen_actual)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ struct efx_filter_spec *spec =
+ efx_ef10_filter_entry_spec(table, filter_idx);
+
+ spin_lock_bh(&efx->filter_lock);
+ if (rc == 0) {
+ kfree(spec);
+ efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
+ }
+ table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY;
+ wake_up_all(&table->waitq);
+ spin_unlock_bh(&efx->filter_lock);
+}
+
+#endif /* CONFIG_RFS_ACCEL */
+
+static int efx_ef10_filter_match_flags_from_mcdi(u32 mcdi_flags)
+{
+ int match_flags = 0;
+
+#define MAP_FLAG(gen_flag, mcdi_field) { \
+ u32 old_mcdi_flags = mcdi_flags; \
+ mcdi_flags &= ~(1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \
+ mcdi_field ## _LBN); \
+ if (mcdi_flags != old_mcdi_flags) \
+ match_flags |= EFX_FILTER_MATCH_ ## gen_flag; \
+ }
+ MAP_FLAG(LOC_MAC_IG, UNKNOWN_UCAST_DST);
+ MAP_FLAG(LOC_MAC_IG, UNKNOWN_MCAST_DST);
+ MAP_FLAG(REM_HOST, SRC_IP);
+ MAP_FLAG(LOC_HOST, DST_IP);
+ MAP_FLAG(REM_MAC, SRC_MAC);
+ MAP_FLAG(REM_PORT, SRC_PORT);
+ MAP_FLAG(LOC_MAC, DST_MAC);
+ MAP_FLAG(LOC_PORT, DST_PORT);
+ MAP_FLAG(ETHER_TYPE, ETHER_TYPE);
+ MAP_FLAG(INNER_VID, INNER_VLAN);
+ MAP_FLAG(OUTER_VID, OUTER_VLAN);
+ MAP_FLAG(IP_PROTO, IP_PROTO);
+#undef MAP_FLAG
+
+ /* Did we map them all? */
+ if (mcdi_flags)
+ return -EINVAL;
+
+ return match_flags;
+}
+
+static int efx_ef10_filter_table_probe(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PARSER_DISP_INFO_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX);
+ unsigned int pd_match_pri, pd_match_count;
+ struct efx_ef10_filter_table *table;
+ size_t outlen;
+ int rc;
+
+ table = kzalloc(sizeof(*table), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ /* Find out which RX filter types are supported, and their priorities */
+ MCDI_SET_DWORD(inbuf, GET_PARSER_DISP_INFO_IN_OP,
+ MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES);
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_PARSER_DISP_INFO,
+ inbuf, sizeof(inbuf), outbuf, sizeof(outbuf),
+ &outlen);
+ if (rc)
+ goto fail;
+ pd_match_count = MCDI_VAR_ARRAY_LEN(
+ outlen, GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES);
+ table->rx_match_count = 0;
+
+ for (pd_match_pri = 0; pd_match_pri < pd_match_count; pd_match_pri++) {
+ u32 mcdi_flags =
+ MCDI_ARRAY_DWORD(
+ outbuf,
+ GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES,
+ pd_match_pri);
+ rc = efx_ef10_filter_match_flags_from_mcdi(mcdi_flags);
+ if (rc < 0) {
+ netif_dbg(efx, probe, efx->net_dev,
+ "%s: fw flags %#x pri %u not supported in driver\n",
+ __func__, mcdi_flags, pd_match_pri);
+ } else {
+ netif_dbg(efx, probe, efx->net_dev,
+ "%s: fw flags %#x pri %u supported as driver flags %#x pri %u\n",
+ __func__, mcdi_flags, pd_match_pri,
+ rc, table->rx_match_count);
+ table->rx_match_flags[table->rx_match_count++] = rc;
+ }
+ }
+
+ table->entry = vzalloc(HUNT_FILTER_TBL_ROWS * sizeof(*table->entry));
+ if (!table->entry) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ efx->filter_state = table;
+ init_waitqueue_head(&table->waitq);
+ return 0;
+
+fail:
+ kfree(table);
+ return rc;
+}
+
+static void efx_ef10_filter_table_restore(struct efx_nic *efx)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct efx_filter_spec *spec;
+ unsigned int filter_idx;
+ bool failed = false;
+ int rc;
+
+ if (!nic_data->must_restore_filters)
+ return;
+
+ spin_lock_bh(&efx->filter_lock);
+
+ for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
+ spec = efx_ef10_filter_entry_spec(table, filter_idx);
+ if (!spec)
+ continue;
+
+ table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
+ spin_unlock_bh(&efx->filter_lock);
+
+ rc = efx_ef10_filter_push(efx, spec,
+ &table->entry[filter_idx].handle,
+ false);
+ if (rc)
+ failed = true;
+
+ spin_lock_bh(&efx->filter_lock);
+ if (rc) {
+ kfree(spec);
+ efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
+ } else {
+ table->entry[filter_idx].spec &=
+ ~EFX_EF10_FILTER_FLAG_BUSY;
+ }
+ }
+
+ spin_unlock_bh(&efx->filter_lock);
+
+ if (failed)
+ netif_err(efx, hw, efx->net_dev,
+ "unable to restore all filters\n");
+ else
+ nic_data->must_restore_filters = false;
+}
+
+static void efx_ef10_filter_table_remove(struct efx_nic *efx)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
+ struct efx_filter_spec *spec;
+ unsigned int filter_idx;
+ int rc;
+
+ for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
+ spec = efx_ef10_filter_entry_spec(table, filter_idx);
+ if (!spec)
+ continue;
+
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
+ efx_ef10_filter_is_exclusive(spec) ?
+ MC_CMD_FILTER_OP_IN_OP_REMOVE :
+ MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
+ MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
+ table->entry[filter_idx].handle);
+ rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+
+ WARN_ON(rc != 0);
+ kfree(spec);
+ }
+
+ vfree(table->entry);
+ kfree(table);
+}
+
+static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ struct net_device *net_dev = efx->net_dev;
+ struct efx_filter_spec spec;
+ bool remove_failed = false;
+ struct netdev_hw_addr *uc;
+ struct netdev_hw_addr *mc;
+ unsigned int filter_idx;
+ int i, n, rc;
+
+ if (!efx_dev_registered(efx))
+ return;
+
+ /* Mark old filters that may need to be removed */
+ spin_lock_bh(&efx->filter_lock);
+ n = table->stack_uc_count < 0 ? 1 : table->stack_uc_count;
+ for (i = 0; i < n; i++) {
+ filter_idx = table->stack_uc_list[i].id % HUNT_FILTER_TBL_ROWS;
+ table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_STACK_OLD;
+ }
+ n = table->stack_mc_count < 0 ? 1 : table->stack_mc_count;
+ for (i = 0; i < n; i++) {
+ filter_idx = table->stack_mc_list[i].id % HUNT_FILTER_TBL_ROWS;
+ table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_STACK_OLD;
+ }
+ spin_unlock_bh(&efx->filter_lock);
+
+ /* Copy/convert the address lists; add the primary station
+ * address and broadcast address
+ */
+ netif_addr_lock_bh(net_dev);
+ if (net_dev->flags & IFF_PROMISC ||
+ netdev_uc_count(net_dev) >= EFX_EF10_FILTER_STACK_UC_MAX) {
+ table->stack_uc_count = -1;
+ } else {
+ table->stack_uc_count = 1 + netdev_uc_count(net_dev);
+ memcpy(table->stack_uc_list[0].addr, net_dev->dev_addr,
+ ETH_ALEN);
+ i = 1;
+ netdev_for_each_uc_addr(uc, net_dev) {
+ memcpy(table->stack_uc_list[i].addr,
+ uc->addr, ETH_ALEN);
+ i++;
+ }
+ }
+ if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI) ||
+ netdev_mc_count(net_dev) >= EFX_EF10_FILTER_STACK_MC_MAX) {
+ table->stack_mc_count = -1;
+ } else {
+ table->stack_mc_count = 1 + netdev_mc_count(net_dev);
+ eth_broadcast_addr(table->stack_mc_list[0].addr);
+ i = 1;
+ netdev_for_each_mc_addr(mc, net_dev) {
+ memcpy(table->stack_mc_list[i].addr,
+ mc->addr, ETH_ALEN);
+ i++;
+ }
+ }
+ netif_addr_unlock_bh(net_dev);
+
+ /* Insert/renew unicast filters */
+ if (table->stack_uc_count >= 0) {
+ for (i = 0; i < table->stack_uc_count; i++) {
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED,
+ EFX_FILTER_FLAG_RX_RSS |
+ EFX_FILTER_FLAG_RX_STACK,
+ 0);
+ efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
+ table->stack_uc_list[i].addr);
+ rc = efx_ef10_filter_insert(efx, &spec, true);
+ if (rc < 0) {
+ /* Fall back to unicast-promisc */
+ while (i--)
+ efx_ef10_filter_remove_safe(
+ efx, EFX_FILTER_PRI_REQUIRED,
+ table->stack_uc_list[i].id);
+ table->stack_uc_count = -1;
+ break;
+ }
+ table->stack_uc_list[i].id = rc;
+ }
+ }
+ if (table->stack_uc_count < 0) {
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED,
+ EFX_FILTER_FLAG_RX_RSS |
+ EFX_FILTER_FLAG_RX_STACK,
+ 0);
+ efx_filter_set_uc_def(&spec);
+ rc = efx_ef10_filter_insert(efx, &spec, true);
+ if (rc < 0) {
+ WARN_ON(1);
+ table->stack_uc_count = 0;
+ } else {
+ table->stack_uc_list[0].id = rc;
+ }
+ }
+
+ /* Insert/renew multicast filters */
+ if (table->stack_mc_count >= 0) {
+ for (i = 0; i < table->stack_mc_count; i++) {
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED,
+ EFX_FILTER_FLAG_RX_RSS |
+ EFX_FILTER_FLAG_RX_STACK,
+ 0);
+ efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
+ table->stack_mc_list[i].addr);
+ rc = efx_ef10_filter_insert(efx, &spec, true);
+ if (rc < 0) {
+ /* Fall back to multicast-promisc */
+ while (i--)
+ efx_ef10_filter_remove_safe(
+ efx, EFX_FILTER_PRI_REQUIRED,
+ table->stack_mc_list[i].id);
+ table->stack_mc_count = -1;
+ break;
+ }
+ table->stack_mc_list[i].id = rc;
+ }
+ }
+ if (table->stack_mc_count < 0) {
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED,
+ EFX_FILTER_FLAG_RX_RSS |
+ EFX_FILTER_FLAG_RX_STACK,
+ 0);
+ efx_filter_set_mc_def(&spec);
+ rc = efx_ef10_filter_insert(efx, &spec, true);
+ if (rc < 0) {
+ WARN_ON(1);
+ table->stack_mc_count = 0;
+ } else {
+ table->stack_mc_list[0].id = rc;
+ }
+ }
+
+ /* Remove filters that weren't renewed. Since nothing else
+ * changes the STACK_OLD flag or removes these filters, we
+ * don't need to hold the filter_lock while scanning for
+ * these filters.
+ */
+ for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
+ if (ACCESS_ONCE(table->entry[i].spec) &
+ EFX_EF10_FILTER_FLAG_STACK_OLD) {
+ if (efx_ef10_filter_remove_internal(efx,
+ EFX_FILTER_PRI_REQUIRED,
+ i, true) < 0)
+ remove_failed = true;
+ }
+ }
+ WARN_ON(remove_failed);
+}
+
+static int efx_ef10_mac_reconfigure(struct efx_nic *efx)
+{
+ efx_ef10_filter_sync_rx_mode(efx);
+
+ return efx_mcdi_set_mac(efx);
+}
+
+#ifdef CONFIG_SFC_MTD
+
+struct efx_ef10_nvram_type_info {
+ u16 type, type_mask;
+ u8 port;
+ const char *name;
+};
+
+static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = {
+ { NVRAM_PARTITION_TYPE_MC_FIRMWARE, 0, 0, "sfc_mcfw" },
+ { NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 0, 0, "sfc_mcfw_backup" },
+ { NVRAM_PARTITION_TYPE_EXPANSION_ROM, 0, 0, "sfc_exp_rom" },
+ { NVRAM_PARTITION_TYPE_STATIC_CONFIG, 0, 0, "sfc_static_cfg" },
+ { NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 0, 0, "sfc_dynamic_cfg" },
+ { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 0, 0, "sfc_exp_rom_cfg" },
+ { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1, 0, 1, "sfc_exp_rom_cfg" },
+ { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2, 0, 2, "sfc_exp_rom_cfg" },
+ { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 0, 3, "sfc_exp_rom_cfg" },
+ { NVRAM_PARTITION_TYPE_PHY_MIN, 0xff, 0, "sfc_phy_fw" },
+};
+
+static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
+ struct efx_mcdi_mtd_partition *part,
+ unsigned int type)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_METADATA_OUT_LENMAX);
+ const struct efx_ef10_nvram_type_info *info;
+ size_t size, erase_size, outlen;
+ bool protected;
+ int rc;
+
+ for (info = efx_ef10_nvram_types; ; info++) {
+ if (info ==
+ efx_ef10_nvram_types + ARRAY_SIZE(efx_ef10_nvram_types))
+ return -ENODEV;
+ if ((type & ~info->type_mask) == info->type)
+ break;
+ }
+ if (info->port != efx_port_num(efx))
+ return -ENODEV;
+
+ rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected);
+ if (rc)
+ return rc;
+ if (protected)
+ return -ENODEV; /* hide it */
+
+ part->nvram_type = type;
+
+ MCDI_SET_DWORD(inbuf, NVRAM_METADATA_IN_TYPE, type);
+ rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_METADATA, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < MC_CMD_NVRAM_METADATA_OUT_LENMIN)
+ return -EIO;
+ if (MCDI_DWORD(outbuf, NVRAM_METADATA_OUT_FLAGS) &
+ (1 << MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN))
+ part->fw_subtype = MCDI_DWORD(outbuf,
+ NVRAM_METADATA_OUT_SUBTYPE);
+
+ part->common.dev_type_name = "EF10 NVRAM manager";
+ part->common.type_name = info->name;
+
+ part->common.mtd.type = MTD_NORFLASH;
+ part->common.mtd.flags = MTD_CAP_NORFLASH;
+ part->common.mtd.size = size;
+ part->common.mtd.erasesize = erase_size;
+
+ return 0;
+}
+
+static int efx_ef10_mtd_probe(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX);
+ struct efx_mcdi_mtd_partition *parts;
+ size_t outlen, n_parts_total, i, n_parts;
+ unsigned int type;
+ int rc;
+
+ ASSERT_RTNL();
+
+ BUILD_BUG_ON(MC_CMD_NVRAM_PARTITIONS_IN_LEN != 0);
+ rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_PARTITIONS, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN)
+ return -EIO;
+
+ n_parts_total = MCDI_DWORD(outbuf, NVRAM_PARTITIONS_OUT_NUM_PARTITIONS);
+ if (n_parts_total >
+ MCDI_VAR_ARRAY_LEN(outlen, NVRAM_PARTITIONS_OUT_TYPE_ID))
+ return -EIO;
+
+ parts = kcalloc(n_parts_total, sizeof(*parts), GFP_KERNEL);
+ if (!parts)
+ return -ENOMEM;
+
+ n_parts = 0;
+ for (i = 0; i < n_parts_total; i++) {
+ type = MCDI_ARRAY_DWORD(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID,
+ i);
+ rc = efx_ef10_mtd_probe_partition(efx, &parts[n_parts], type);
+ if (rc == 0)
+ n_parts++;
+ else if (rc != -ENODEV)
+ goto fail;
+ }
+
+ rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
+fail:
+ if (rc)
+ kfree(parts);
+ return rc;
+}
+
+#endif /* CONFIG_SFC_MTD */
+
+static void efx_ef10_ptp_write_host_time(struct efx_nic *efx, u32 host_time)
+{
+ _efx_writed(efx, cpu_to_le32(host_time), ER_DZ_MC_DB_LWRD);
+}
+
+const struct efx_nic_type efx_hunt_a0_nic_type = {
+ .mem_map_size = efx_ef10_mem_map_size,
+ .probe = efx_ef10_probe,
+ .remove = efx_ef10_remove,
+ .dimension_resources = efx_ef10_dimension_resources,
+ .init = efx_ef10_init_nic,
+ .fini = efx_port_dummy_op_void,
+ .map_reset_reason = efx_mcdi_map_reset_reason,
+ .map_reset_flags = efx_ef10_map_reset_flags,
+ .reset = efx_mcdi_reset,
+ .probe_port = efx_mcdi_port_probe,
+ .remove_port = efx_mcdi_port_remove,
+ .fini_dmaq = efx_ef10_fini_dmaq,
+ .describe_stats = efx_ef10_describe_stats,
+ .update_stats = efx_ef10_update_stats,
+ .start_stats = efx_mcdi_mac_start_stats,
+ .stop_stats = efx_mcdi_mac_stop_stats,
+ .set_id_led = efx_mcdi_set_id_led,
+ .push_irq_moderation = efx_ef10_push_irq_moderation,
+ .reconfigure_mac = efx_ef10_mac_reconfigure,
+ .check_mac_fault = efx_mcdi_mac_check_fault,
+ .reconfigure_port = efx_mcdi_port_reconfigure,
+ .get_wol = efx_ef10_get_wol,
+ .set_wol = efx_ef10_set_wol,
+ .resume_wol = efx_port_dummy_op_void,
+ /* TODO: test_chip */
+ .test_nvram = efx_mcdi_nvram_test_all,
+ .mcdi_request = efx_ef10_mcdi_request,
+ .mcdi_poll_response = efx_ef10_mcdi_poll_response,
+ .mcdi_read_response = efx_ef10_mcdi_read_response,
+ .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot,
+ .irq_enable_master = efx_port_dummy_op_void,
+ .irq_test_generate = efx_ef10_irq_test_generate,
+ .irq_disable_non_ev = efx_port_dummy_op_void,
+ .irq_handle_msi = efx_ef10_msi_interrupt,
+ .irq_handle_legacy = efx_ef10_legacy_interrupt,
+ .tx_probe = efx_ef10_tx_probe,
+ .tx_init = efx_ef10_tx_init,
+ .tx_remove = efx_ef10_tx_remove,
+ .tx_write = efx_ef10_tx_write,
+ .rx_push_indir_table = efx_ef10_rx_push_indir_table,
+ .rx_probe = efx_ef10_rx_probe,
+ .rx_init = efx_ef10_rx_init,
+ .rx_remove = efx_ef10_rx_remove,
+ .rx_write = efx_ef10_rx_write,
+ .rx_defer_refill = efx_ef10_rx_defer_refill,
+ .ev_probe = efx_ef10_ev_probe,
+ .ev_init = efx_ef10_ev_init,
+ .ev_fini = efx_ef10_ev_fini,
+ .ev_remove = efx_ef10_ev_remove,
+ .ev_process = efx_ef10_ev_process,
+ .ev_read_ack = efx_ef10_ev_read_ack,
+ .ev_test_generate = efx_ef10_ev_test_generate,
+ .filter_table_probe = efx_ef10_filter_table_probe,
+ .filter_table_restore = efx_ef10_filter_table_restore,
+ .filter_table_remove = efx_ef10_filter_table_remove,
+ .filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter,
+ .filter_insert = efx_ef10_filter_insert,
+ .filter_remove_safe = efx_ef10_filter_remove_safe,
+ .filter_get_safe = efx_ef10_filter_get_safe,
+ .filter_clear_rx = efx_ef10_filter_clear_rx,
+ .filter_count_rx_used = efx_ef10_filter_count_rx_used,
+ .filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit,
+ .filter_get_rx_ids = efx_ef10_filter_get_rx_ids,
+#ifdef CONFIG_RFS_ACCEL
+ .filter_rfs_insert = efx_ef10_filter_rfs_insert,
+ .filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one,
+#endif
+#ifdef CONFIG_SFC_MTD
+ .mtd_probe = efx_ef10_mtd_probe,
+ .mtd_rename = efx_mcdi_mtd_rename,
+ .mtd_read = efx_mcdi_mtd_read,
+ .mtd_erase = efx_mcdi_mtd_erase,
+ .mtd_write = efx_mcdi_mtd_write,
+ .mtd_sync = efx_mcdi_mtd_sync,
+#endif
+ .ptp_write_host_time = efx_ef10_ptp_write_host_time,
+
+ .revision = EFX_REV_HUNT_A0,
+ .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
+ .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE,
+ .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST,
+ .can_rx_scatter = true,
+ .always_rx_scatter = true,
+ .max_interrupt_mode = EFX_INT_MODE_MSIX,
+ .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
+ .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_RXHASH | NETIF_F_NTUPLE),
+ .mcdi_max_ver = 2,
+ .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
+};
diff --git a/drivers/net/ethernet/sfc/ef10_regs.h b/drivers/net/ethernet/sfc/ef10_regs.h
new file mode 100644
index 000000000000..b3f4e3755fd9
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ef10_regs.h
@@ -0,0 +1,415 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2012-2013 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_EF10_REGS_H
+#define EFX_EF10_REGS_H
+
+/* EF10 hardware architecture definitions have a name prefix following
+ * the format:
+ *
+ * E<type>_<min-rev><max-rev>_
+ *
+ * The following <type> strings are used:
+ *
+ * MMIO register Host memory structure
+ * -------------------------------------------------------------
+ * Address R
+ * Bitfield RF SF
+ * Enumerator FE SE
+ *
+ * <min-rev> is the first revision to which the definition applies:
+ *
+ * D: Huntington A0
+ *
+ * If the definition has been changed or removed in later revisions
+ * then <max-rev> is the last revision to which the definition applies;
+ * otherwise it is "Z".
+ */
+
+/**************************************************************************
+ *
+ * EF10 registers and descriptors
+ *
+ **************************************************************************
+ */
+
+/* BIU_HW_REV_ID_REG: */
+#define ER_DZ_BIU_HW_REV_ID 0x00000000
+#define ERF_DZ_HW_REV_ID_LBN 0
+#define ERF_DZ_HW_REV_ID_WIDTH 32
+
+/* BIU_MC_SFT_STATUS_REG: */
+#define ER_DZ_BIU_MC_SFT_STATUS 0x00000010
+#define ER_DZ_BIU_MC_SFT_STATUS_STEP 4
+#define ER_DZ_BIU_MC_SFT_STATUS_ROWS 8
+#define ERF_DZ_MC_SFT_STATUS_LBN 0
+#define ERF_DZ_MC_SFT_STATUS_WIDTH 32
+
+/* BIU_INT_ISR_REG: */
+#define ER_DZ_BIU_INT_ISR 0x00000090
+#define ERF_DZ_ISR_REG_LBN 0
+#define ERF_DZ_ISR_REG_WIDTH 32
+
+/* MC_DB_LWRD_REG: */
+#define ER_DZ_MC_DB_LWRD 0x00000200
+#define ERF_DZ_MC_DOORBELL_L_LBN 0
+#define ERF_DZ_MC_DOORBELL_L_WIDTH 32
+
+/* MC_DB_HWRD_REG: */
+#define ER_DZ_MC_DB_HWRD 0x00000204
+#define ERF_DZ_MC_DOORBELL_H_LBN 0
+#define ERF_DZ_MC_DOORBELL_H_WIDTH 32
+
+/* EVQ_RPTR_REG: */
+#define ER_DZ_EVQ_RPTR 0x00000400
+#define ER_DZ_EVQ_RPTR_STEP 8192
+#define ER_DZ_EVQ_RPTR_ROWS 2048
+#define ERF_DZ_EVQ_RPTR_VLD_LBN 15
+#define ERF_DZ_EVQ_RPTR_VLD_WIDTH 1
+#define ERF_DZ_EVQ_RPTR_LBN 0
+#define ERF_DZ_EVQ_RPTR_WIDTH 15
+
+/* EVQ_TMR_REG: */
+#define ER_DZ_EVQ_TMR 0x00000420
+#define ER_DZ_EVQ_TMR_STEP 8192
+#define ER_DZ_EVQ_TMR_ROWS 2048
+#define ERF_DZ_TC_TIMER_MODE_LBN 14
+#define ERF_DZ_TC_TIMER_MODE_WIDTH 2
+#define ERF_DZ_TC_TIMER_VAL_LBN 0
+#define ERF_DZ_TC_TIMER_VAL_WIDTH 14
+
+/* RX_DESC_UPD_REG: */
+#define ER_DZ_RX_DESC_UPD 0x00000830
+#define ER_DZ_RX_DESC_UPD_STEP 8192
+#define ER_DZ_RX_DESC_UPD_ROWS 2048
+#define ERF_DZ_RX_DESC_WPTR_LBN 0
+#define ERF_DZ_RX_DESC_WPTR_WIDTH 12
+
+/* TX_DESC_UPD_REG: */
+#define ER_DZ_TX_DESC_UPD 0x00000a10
+#define ER_DZ_TX_DESC_UPD_STEP 8192
+#define ER_DZ_TX_DESC_UPD_ROWS 2048
+#define ERF_DZ_RSVD_LBN 76
+#define ERF_DZ_RSVD_WIDTH 20
+#define ERF_DZ_TX_DESC_WPTR_LBN 64
+#define ERF_DZ_TX_DESC_WPTR_WIDTH 12
+#define ERF_DZ_TX_DESC_HWORD_LBN 32
+#define ERF_DZ_TX_DESC_HWORD_WIDTH 32
+#define ERF_DZ_TX_DESC_LWORD_LBN 0
+#define ERF_DZ_TX_DESC_LWORD_WIDTH 32
+
+/* DRIVER_EV */
+#define ESF_DZ_DRV_CODE_LBN 60
+#define ESF_DZ_DRV_CODE_WIDTH 4
+#define ESF_DZ_DRV_SUB_CODE_LBN 56
+#define ESF_DZ_DRV_SUB_CODE_WIDTH 4
+#define ESE_DZ_DRV_TIMER_EV 3
+#define ESE_DZ_DRV_START_UP_EV 2
+#define ESE_DZ_DRV_WAKE_UP_EV 1
+#define ESF_DZ_DRV_SUB_DATA_LBN 0
+#define ESF_DZ_DRV_SUB_DATA_WIDTH 56
+#define ESF_DZ_DRV_EVQ_ID_LBN 0
+#define ESF_DZ_DRV_EVQ_ID_WIDTH 14
+#define ESF_DZ_DRV_TMR_ID_LBN 0
+#define ESF_DZ_DRV_TMR_ID_WIDTH 14
+
+/* EVENT_ENTRY */
+#define ESF_DZ_EV_CODE_LBN 60
+#define ESF_DZ_EV_CODE_WIDTH 4
+#define ESE_DZ_EV_CODE_MCDI_EV 12
+#define ESE_DZ_EV_CODE_DRIVER_EV 5
+#define ESE_DZ_EV_CODE_TX_EV 2
+#define ESE_DZ_EV_CODE_RX_EV 0
+#define ESE_DZ_OTHER other
+#define ESF_DZ_EV_DATA_LBN 0
+#define ESF_DZ_EV_DATA_WIDTH 60
+
+/* MC_EVENT */
+#define ESF_DZ_MC_CODE_LBN 60
+#define ESF_DZ_MC_CODE_WIDTH 4
+#define ESF_DZ_MC_OVERRIDE_HOLDOFF_LBN 59
+#define ESF_DZ_MC_OVERRIDE_HOLDOFF_WIDTH 1
+#define ESF_DZ_MC_DROP_EVENT_LBN 58
+#define ESF_DZ_MC_DROP_EVENT_WIDTH 1
+#define ESF_DZ_MC_SOFT_LBN 0
+#define ESF_DZ_MC_SOFT_WIDTH 58
+
+/* RX_EVENT */
+#define ESF_DZ_RX_CODE_LBN 60
+#define ESF_DZ_RX_CODE_WIDTH 4
+#define ESF_DZ_RX_OVERRIDE_HOLDOFF_LBN 59
+#define ESF_DZ_RX_OVERRIDE_HOLDOFF_WIDTH 1
+#define ESF_DZ_RX_DROP_EVENT_LBN 58
+#define ESF_DZ_RX_DROP_EVENT_WIDTH 1
+#define ESF_DZ_RX_EV_RSVD2_LBN 54
+#define ESF_DZ_RX_EV_RSVD2_WIDTH 4
+#define ESF_DZ_RX_EV_SOFT2_LBN 52
+#define ESF_DZ_RX_EV_SOFT2_WIDTH 2
+#define ESF_DZ_RX_DSC_PTR_LBITS_LBN 48
+#define ESF_DZ_RX_DSC_PTR_LBITS_WIDTH 4
+#define ESF_DZ_RX_L4_CLASS_LBN 45
+#define ESF_DZ_RX_L4_CLASS_WIDTH 3
+#define ESE_DZ_L4_CLASS_RSVD7 7
+#define ESE_DZ_L4_CLASS_RSVD6 6
+#define ESE_DZ_L4_CLASS_RSVD5 5
+#define ESE_DZ_L4_CLASS_RSVD4 4
+#define ESE_DZ_L4_CLASS_RSVD3 3
+#define ESE_DZ_L4_CLASS_UDP 2
+#define ESE_DZ_L4_CLASS_TCP 1
+#define ESE_DZ_L4_CLASS_UNKNOWN 0
+#define ESF_DZ_RX_L3_CLASS_LBN 42
+#define ESF_DZ_RX_L3_CLASS_WIDTH 3
+#define ESE_DZ_L3_CLASS_RSVD7 7
+#define ESE_DZ_L3_CLASS_IP6_FRAG 6
+#define ESE_DZ_L3_CLASS_ARP 5
+#define ESE_DZ_L3_CLASS_IP4_FRAG 4
+#define ESE_DZ_L3_CLASS_FCOE 3
+#define ESE_DZ_L3_CLASS_IP6 2
+#define ESE_DZ_L3_CLASS_IP4 1
+#define ESE_DZ_L3_CLASS_UNKNOWN 0
+#define ESF_DZ_RX_ETH_TAG_CLASS_LBN 39
+#define ESF_DZ_RX_ETH_TAG_CLASS_WIDTH 3
+#define ESE_DZ_ETH_TAG_CLASS_RSVD7 7
+#define ESE_DZ_ETH_TAG_CLASS_RSVD6 6
+#define ESE_DZ_ETH_TAG_CLASS_RSVD5 5
+#define ESE_DZ_ETH_TAG_CLASS_RSVD4 4
+#define ESE_DZ_ETH_TAG_CLASS_RSVD3 3
+#define ESE_DZ_ETH_TAG_CLASS_VLAN2 2
+#define ESE_DZ_ETH_TAG_CLASS_VLAN1 1
+#define ESE_DZ_ETH_TAG_CLASS_NONE 0
+#define ESF_DZ_RX_ETH_BASE_CLASS_LBN 36
+#define ESF_DZ_RX_ETH_BASE_CLASS_WIDTH 3
+#define ESE_DZ_ETH_BASE_CLASS_LLC_SNAP 2
+#define ESE_DZ_ETH_BASE_CLASS_LLC 1
+#define ESE_DZ_ETH_BASE_CLASS_ETH2 0
+#define ESF_DZ_RX_MAC_CLASS_LBN 35
+#define ESF_DZ_RX_MAC_CLASS_WIDTH 1
+#define ESE_DZ_MAC_CLASS_MCAST 1
+#define ESE_DZ_MAC_CLASS_UCAST 0
+#define ESF_DZ_RX_EV_SOFT1_LBN 32
+#define ESF_DZ_RX_EV_SOFT1_WIDTH 3
+#define ESF_DZ_RX_EV_RSVD1_LBN 31
+#define ESF_DZ_RX_EV_RSVD1_WIDTH 1
+#define ESF_DZ_RX_ABORT_LBN 30
+#define ESF_DZ_RX_ABORT_WIDTH 1
+#define ESF_DZ_RX_ECC_ERR_LBN 29
+#define ESF_DZ_RX_ECC_ERR_WIDTH 1
+#define ESF_DZ_RX_CRC1_ERR_LBN 28
+#define ESF_DZ_RX_CRC1_ERR_WIDTH 1
+#define ESF_DZ_RX_CRC0_ERR_LBN 27
+#define ESF_DZ_RX_CRC0_ERR_WIDTH 1
+#define ESF_DZ_RX_TCPUDP_CKSUM_ERR_LBN 26
+#define ESF_DZ_RX_TCPUDP_CKSUM_ERR_WIDTH 1
+#define ESF_DZ_RX_IPCKSUM_ERR_LBN 25
+#define ESF_DZ_RX_IPCKSUM_ERR_WIDTH 1
+#define ESF_DZ_RX_ECRC_ERR_LBN 24
+#define ESF_DZ_RX_ECRC_ERR_WIDTH 1
+#define ESF_DZ_RX_QLABEL_LBN 16
+#define ESF_DZ_RX_QLABEL_WIDTH 5
+#define ESF_DZ_RX_PARSE_INCOMPLETE_LBN 15
+#define ESF_DZ_RX_PARSE_INCOMPLETE_WIDTH 1
+#define ESF_DZ_RX_CONT_LBN 14
+#define ESF_DZ_RX_CONT_WIDTH 1
+#define ESF_DZ_RX_BYTES_LBN 0
+#define ESF_DZ_RX_BYTES_WIDTH 14
+
+/* RX_KER_DESC */
+#define ESF_DZ_RX_KER_RESERVED_LBN 62
+#define ESF_DZ_RX_KER_RESERVED_WIDTH 2
+#define ESF_DZ_RX_KER_BYTE_CNT_LBN 48
+#define ESF_DZ_RX_KER_BYTE_CNT_WIDTH 14
+#define ESF_DZ_RX_KER_BUF_ADDR_LBN 0
+#define ESF_DZ_RX_KER_BUF_ADDR_WIDTH 48
+
+/* RX_USER_DESC */
+#define ESF_DZ_RX_USR_RESERVED_LBN 62
+#define ESF_DZ_RX_USR_RESERVED_WIDTH 2
+#define ESF_DZ_RX_USR_BYTE_CNT_LBN 48
+#define ESF_DZ_RX_USR_BYTE_CNT_WIDTH 14
+#define ESF_DZ_RX_USR_BUF_PAGE_SIZE_LBN 44
+#define ESF_DZ_RX_USR_BUF_PAGE_SIZE_WIDTH 4
+#define ESE_DZ_USR_BUF_PAGE_SZ_4MB 10
+#define ESE_DZ_USR_BUF_PAGE_SZ_1MB 8
+#define ESE_DZ_USR_BUF_PAGE_SZ_64KB 4
+#define ESE_DZ_USR_BUF_PAGE_SZ_4KB 0
+#define ESF_DZ_RX_USR_BUF_ID_OFFSET_LBN 0
+#define ESF_DZ_RX_USR_BUF_ID_OFFSET_WIDTH 44
+#define ESF_DZ_RX_USR_4KBPS_BUF_ID_LBN 12
+#define ESF_DZ_RX_USR_4KBPS_BUF_ID_WIDTH 32
+#define ESF_DZ_RX_USR_64KBPS_BUF_ID_LBN 16
+#define ESF_DZ_RX_USR_64KBPS_BUF_ID_WIDTH 28
+#define ESF_DZ_RX_USR_1MBPS_BUF_ID_LBN 20
+#define ESF_DZ_RX_USR_1MBPS_BUF_ID_WIDTH 24
+#define ESF_DZ_RX_USR_4MBPS_BUF_ID_LBN 22
+#define ESF_DZ_RX_USR_4MBPS_BUF_ID_WIDTH 22
+#define ESF_DZ_RX_USR_4MBPS_BYTE_OFFSET_LBN 0
+#define ESF_DZ_RX_USR_4MBPS_BYTE_OFFSET_WIDTH 22
+#define ESF_DZ_RX_USR_1MBPS_BYTE_OFFSET_LBN 0
+#define ESF_DZ_RX_USR_1MBPS_BYTE_OFFSET_WIDTH 20
+#define ESF_DZ_RX_USR_64KBPS_BYTE_OFFSET_LBN 0
+#define ESF_DZ_RX_USR_64KBPS_BYTE_OFFSET_WIDTH 16
+#define ESF_DZ_RX_USR_4KBPS_BYTE_OFFSET_LBN 0
+#define ESF_DZ_RX_USR_4KBPS_BYTE_OFFSET_WIDTH 12
+
+/* TX_CSUM_TSTAMP_DESC */
+#define ESF_DZ_TX_DESC_IS_OPT_LBN 63
+#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
+#define ESF_DZ_TX_OPTION_TYPE_LBN 60
+#define ESF_DZ_TX_OPTION_TYPE_WIDTH 3
+#define ESE_DZ_TX_OPTION_DESC_TSO 7
+#define ESE_DZ_TX_OPTION_DESC_VLAN 6
+#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0
+#define ESF_DZ_TX_TIMESTAMP_LBN 5
+#define ESF_DZ_TX_TIMESTAMP_WIDTH 1
+#define ESF_DZ_TX_OPTION_CRC_MODE_LBN 2
+#define ESF_DZ_TX_OPTION_CRC_MODE_WIDTH 3
+#define ESE_DZ_TX_OPTION_CRC_FCOIP_MPA 5
+#define ESE_DZ_TX_OPTION_CRC_FCOIP_FCOE 4
+#define ESE_DZ_TX_OPTION_CRC_ISCSI_HDR_AND_PYLD 3
+#define ESE_DZ_TX_OPTION_CRC_ISCSI_HDR 2
+#define ESE_DZ_TX_OPTION_CRC_FCOE 1
+#define ESE_DZ_TX_OPTION_CRC_OFF 0
+#define ESF_DZ_TX_OPTION_UDP_TCP_CSUM_LBN 1
+#define ESF_DZ_TX_OPTION_UDP_TCP_CSUM_WIDTH 1
+#define ESF_DZ_TX_OPTION_IP_CSUM_LBN 0
+#define ESF_DZ_TX_OPTION_IP_CSUM_WIDTH 1
+
+/* TX_EVENT */
+#define ESF_DZ_TX_CODE_LBN 60
+#define ESF_DZ_TX_CODE_WIDTH 4
+#define ESF_DZ_TX_OVERRIDE_HOLDOFF_LBN 59
+#define ESF_DZ_TX_OVERRIDE_HOLDOFF_WIDTH 1
+#define ESF_DZ_TX_DROP_EVENT_LBN 58
+#define ESF_DZ_TX_DROP_EVENT_WIDTH 1
+#define ESF_DZ_TX_EV_RSVD_LBN 48
+#define ESF_DZ_TX_EV_RSVD_WIDTH 10
+#define ESF_DZ_TX_SOFT2_LBN 32
+#define ESF_DZ_TX_SOFT2_WIDTH 16
+#define ESF_DZ_TX_CAN_MERGE_LBN 31
+#define ESF_DZ_TX_CAN_MERGE_WIDTH 1
+#define ESF_DZ_TX_SOFT1_LBN 24
+#define ESF_DZ_TX_SOFT1_WIDTH 7
+#define ESF_DZ_TX_QLABEL_LBN 16
+#define ESF_DZ_TX_QLABEL_WIDTH 5
+#define ESF_DZ_TX_DESCR_INDX_LBN 0
+#define ESF_DZ_TX_DESCR_INDX_WIDTH 16
+
+/* TX_KER_DESC */
+#define ESF_DZ_TX_KER_TYPE_LBN 63
+#define ESF_DZ_TX_KER_TYPE_WIDTH 1
+#define ESF_DZ_TX_KER_CONT_LBN 62
+#define ESF_DZ_TX_KER_CONT_WIDTH 1
+#define ESF_DZ_TX_KER_BYTE_CNT_LBN 48
+#define ESF_DZ_TX_KER_BYTE_CNT_WIDTH 14
+#define ESF_DZ_TX_KER_BUF_ADDR_LBN 0
+#define ESF_DZ_TX_KER_BUF_ADDR_WIDTH 48
+
+/* TX_PIO_DESC */
+#define ESF_DZ_TX_PIO_TYPE_LBN 63
+#define ESF_DZ_TX_PIO_TYPE_WIDTH 1
+#define ESF_DZ_TX_PIO_OPT_LBN 60
+#define ESF_DZ_TX_PIO_OPT_WIDTH 3
+#define ESF_DZ_TX_PIO_CONT_LBN 59
+#define ESF_DZ_TX_PIO_CONT_WIDTH 1
+#define ESF_DZ_TX_PIO_BYTE_CNT_LBN 32
+#define ESF_DZ_TX_PIO_BYTE_CNT_WIDTH 12
+#define ESF_DZ_TX_PIO_BUF_ADDR_LBN 0
+#define ESF_DZ_TX_PIO_BUF_ADDR_WIDTH 12
+
+/* TX_TSO_DESC */
+#define ESF_DZ_TX_DESC_IS_OPT_LBN 63
+#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
+#define ESF_DZ_TX_OPTION_TYPE_LBN 60
+#define ESF_DZ_TX_OPTION_TYPE_WIDTH 3
+#define ESE_DZ_TX_OPTION_DESC_TSO 7
+#define ESE_DZ_TX_OPTION_DESC_VLAN 6
+#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0
+#define ESF_DZ_TX_TSO_TCP_FLAGS_LBN 48
+#define ESF_DZ_TX_TSO_TCP_FLAGS_WIDTH 8
+#define ESF_DZ_TX_TSO_IP_ID_LBN 32
+#define ESF_DZ_TX_TSO_IP_ID_WIDTH 16
+#define ESF_DZ_TX_TSO_TCP_SEQNO_LBN 0
+#define ESF_DZ_TX_TSO_TCP_SEQNO_WIDTH 32
+
+/* TX_USER_DESC */
+#define ESF_DZ_TX_USR_TYPE_LBN 63
+#define ESF_DZ_TX_USR_TYPE_WIDTH 1
+#define ESF_DZ_TX_USR_CONT_LBN 62
+#define ESF_DZ_TX_USR_CONT_WIDTH 1
+#define ESF_DZ_TX_USR_BYTE_CNT_LBN 48
+#define ESF_DZ_TX_USR_BYTE_CNT_WIDTH 14
+#define ESF_DZ_TX_USR_BUF_PAGE_SIZE_LBN 44
+#define ESF_DZ_TX_USR_BUF_PAGE_SIZE_WIDTH 4
+#define ESE_DZ_USR_BUF_PAGE_SZ_4MB 10
+#define ESE_DZ_USR_BUF_PAGE_SZ_1MB 8
+#define ESE_DZ_USR_BUF_PAGE_SZ_64KB 4
+#define ESE_DZ_USR_BUF_PAGE_SZ_4KB 0
+#define ESF_DZ_TX_USR_BUF_ID_OFFSET_LBN 0
+#define ESF_DZ_TX_USR_BUF_ID_OFFSET_WIDTH 44
+#define ESF_DZ_TX_USR_4KBPS_BUF_ID_LBN 12
+#define ESF_DZ_TX_USR_4KBPS_BUF_ID_WIDTH 32
+#define ESF_DZ_TX_USR_64KBPS_BUF_ID_LBN 16
+#define ESF_DZ_TX_USR_64KBPS_BUF_ID_WIDTH 28
+#define ESF_DZ_TX_USR_1MBPS_BUF_ID_LBN 20
+#define ESF_DZ_TX_USR_1MBPS_BUF_ID_WIDTH 24
+#define ESF_DZ_TX_USR_4MBPS_BUF_ID_LBN 22
+#define ESF_DZ_TX_USR_4MBPS_BUF_ID_WIDTH 22
+#define ESF_DZ_TX_USR_4MBPS_BYTE_OFFSET_LBN 0
+#define ESF_DZ_TX_USR_4MBPS_BYTE_OFFSET_WIDTH 22
+#define ESF_DZ_TX_USR_1MBPS_BYTE_OFFSET_LBN 0
+#define ESF_DZ_TX_USR_1MBPS_BYTE_OFFSET_WIDTH 20
+#define ESF_DZ_TX_USR_64KBPS_BYTE_OFFSET_LBN 0
+#define ESF_DZ_TX_USR_64KBPS_BYTE_OFFSET_WIDTH 16
+#define ESF_DZ_TX_USR_4KBPS_BYTE_OFFSET_LBN 0
+#define ESF_DZ_TX_USR_4KBPS_BYTE_OFFSET_WIDTH 12
+/*************************************************************************/
+
+/* TX_DESC_UPD_REG: Transmit descriptor update register.
+ * We may write just one dword of these registers.
+ */
+#define ER_DZ_TX_DESC_UPD_DWORD (ER_DZ_TX_DESC_UPD + 2 * 4)
+#define ERF_DZ_TX_DESC_WPTR_DWORD_LBN (ERF_DZ_TX_DESC_WPTR_LBN - 2 * 32)
+#define ERF_DZ_TX_DESC_WPTR_DWORD_WIDTH ERF_DZ_TX_DESC_WPTR_WIDTH
+
+/* The workaround for bug 35388 requires multiplexing writes through
+ * the TX_DESC_UPD_DWORD address.
+ * TX_DESC_UPD: 0ppppppppppp (bit 11 lost)
+ * EVQ_RPTR: 1000hhhhhhhh, 1001llllllll (split into high and low bits)
+ * EVQ_TMR: 11mmvvvvvvvv (bits 8:13 of value lost)
+ */
+#define ER_DD_EVQ_INDIRECT ER_DZ_TX_DESC_UPD_DWORD
+#define ERF_DD_EVQ_IND_RPTR_FLAGS_LBN 8
+#define ERF_DD_EVQ_IND_RPTR_FLAGS_WIDTH 4
+#define EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH 8
+#define EFE_DD_EVQ_IND_RPTR_FLAGS_LOW 9
+#define ERF_DD_EVQ_IND_RPTR_LBN 0
+#define ERF_DD_EVQ_IND_RPTR_WIDTH 8
+#define ERF_DD_EVQ_IND_TIMER_FLAGS_LBN 10
+#define ERF_DD_EVQ_IND_TIMER_FLAGS_WIDTH 2
+#define EFE_DD_EVQ_IND_TIMER_FLAGS 3
+#define ERF_DD_EVQ_IND_TIMER_MODE_LBN 8
+#define ERF_DD_EVQ_IND_TIMER_MODE_WIDTH 2
+#define ERF_DD_EVQ_IND_TIMER_VAL_LBN 0
+#define ERF_DD_EVQ_IND_TIMER_VAL_WIDTH 8
+
+/* TX_PIOBUF
+ * PIO buffer aperture (paged)
+ */
+#define ER_DZ_TX_PIOBUF 4096
+#define ER_DZ_TX_PIOBUF_SIZE 2048
+
+/* RX packet prefix */
+#define ES_DZ_RX_PREFIX_HASH_OFST 0
+#define ES_DZ_RX_PREFIX_VLAN1_OFST 4
+#define ES_DZ_RX_PREFIX_VLAN2_OFST 6
+#define ES_DZ_RX_PREFIX_PKTLEN_OFST 8
+#define ES_DZ_RX_PREFIX_TSTAMP_OFST 10
+#define ES_DZ_RX_PREFIX_SIZE 14
+
+#endif /* EFX_EF10_REGS_H */
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index c72968840f1a..07c9bc4c61bc 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -1,7 +1,7 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2005-2011 Solarflare Communications Inc.
+ * Copyright 2005-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -17,7 +17,6 @@
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/in.h>
-#include <linux/crc32.h>
#include <linux/ethtool.h>
#include <linux/topology.h>
#include <linux/gfp.h>
@@ -81,8 +80,7 @@ const char *const efx_reset_type_names[] = {
[RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG",
[RESET_TYPE_INT_ERROR] = "INT_ERROR",
[RESET_TYPE_RX_RECOVERY] = "RX_RECOVERY",
- [RESET_TYPE_RX_DESC_FETCH] = "RX_DESC_FETCH",
- [RESET_TYPE_TX_DESC_FETCH] = "TX_DESC_FETCH",
+ [RESET_TYPE_DMA_ERROR] = "DMA_ERROR",
[RESET_TYPE_TX_SKIP] = "TX_SKIP",
[RESET_TYPE_MC_FAILURE] = "MC_FAILURE",
};
@@ -191,8 +189,8 @@ MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
*
*************************************************************************/
-static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq);
-static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq);
+static int efx_soft_enable_interrupts(struct efx_nic *efx);
+static void efx_soft_disable_interrupts(struct efx_nic *efx);
static void efx_remove_channel(struct efx_channel *channel);
static void efx_remove_channels(struct efx_nic *efx);
static const struct efx_channel_type efx_default_channel_type;
@@ -248,30 +246,12 @@ static int efx_process_channel(struct efx_channel *channel, int budget)
efx_channel_get_rx_queue(channel);
efx_rx_flush_packet(channel);
- if (rx_queue->enabled)
- efx_fast_push_rx_descriptors(rx_queue);
+ efx_fast_push_rx_descriptors(rx_queue);
}
return spent;
}
-/* Mark channel as finished processing
- *
- * Note that since we will not receive further interrupts for this
- * channel before we finish processing and call the eventq_read_ack()
- * method, there is no need to use the interrupt hold-off timers.
- */
-static inline void efx_channel_processed(struct efx_channel *channel)
-{
- /* The interrupt handler for this channel may set work_pending
- * as soon as we acknowledge the events we've seen. Make sure
- * it's cleared before then. */
- channel->work_pending = false;
- smp_wmb();
-
- efx_nic_eventq_read_ack(channel);
-}
-
/* NAPI poll handler
*
* NAPI guarantees serialisation of polls of the same device, which
@@ -316,58 +296,16 @@ static int efx_poll(struct napi_struct *napi, int budget)
/* There is no race here; although napi_disable() will
* only wait for napi_complete(), this isn't a problem
- * since efx_channel_processed() will have no effect if
+ * since efx_nic_eventq_read_ack() will have no effect if
* interrupts have already been disabled.
*/
napi_complete(napi);
- efx_channel_processed(channel);
+ efx_nic_eventq_read_ack(channel);
}
return spent;
}
-/* Process the eventq of the specified channel immediately on this CPU
- *
- * Disable hardware generated interrupts, wait for any existing
- * processing to finish, then directly poll (and ack ) the eventq.
- * Finally reenable NAPI and interrupts.
- *
- * This is for use only during a loopback self-test. It must not
- * deliver any packets up the stack as this can result in deadlock.
- */
-void efx_process_channel_now(struct efx_channel *channel)
-{
- struct efx_nic *efx = channel->efx;
-
- BUG_ON(channel->channel >= efx->n_channels);
- BUG_ON(!channel->enabled);
- BUG_ON(!efx->loopback_selftest);
-
- /* Disable interrupts and wait for ISRs to complete */
- efx_nic_disable_interrupts(efx);
- if (efx->legacy_irq) {
- synchronize_irq(efx->legacy_irq);
- efx->legacy_irq_enabled = false;
- }
- if (channel->irq)
- synchronize_irq(channel->irq);
-
- /* Wait for any NAPI processing to complete */
- napi_disable(&channel->napi_str);
-
- /* Poll the channel */
- efx_process_channel(channel, channel->eventq_mask + 1);
-
- /* Ack the eventq. This may cause an interrupt to be generated
- * when they are reenabled */
- efx_channel_processed(channel);
-
- napi_enable(&channel->napi_str);
- if (efx->legacy_irq)
- efx->legacy_irq_enabled = true;
- efx_nic_enable_interrupts(efx);
-}
-
/* Create event queue
* Event queue memory allocations are done only once. If the channel
* is reset, the memory buffer will be reused; this guards against
@@ -391,14 +329,23 @@ static int efx_probe_eventq(struct efx_channel *channel)
}
/* Prepare channel's event queue */
-static void efx_init_eventq(struct efx_channel *channel)
+static int efx_init_eventq(struct efx_channel *channel)
{
- netif_dbg(channel->efx, drv, channel->efx->net_dev,
- "chan %d init event queue\n", channel->channel);
+ struct efx_nic *efx = channel->efx;
+ int rc;
- channel->eventq_read_ptr = 0;
+ EFX_WARN_ON_PARANOID(channel->eventq_init);
- efx_nic_init_eventq(channel);
+ netif_dbg(efx, drv, efx->net_dev,
+ "chan %d init event queue\n", channel->channel);
+
+ rc = efx_nic_init_eventq(channel);
+ if (rc == 0) {
+ efx->type->push_irq_moderation(channel);
+ channel->eventq_read_ptr = 0;
+ channel->eventq_init = true;
+ }
+ return rc;
}
/* Enable event queue processing and NAPI */
@@ -407,11 +354,7 @@ static void efx_start_eventq(struct efx_channel *channel)
netif_dbg(channel->efx, ifup, channel->efx->net_dev,
"chan %d start event queue\n", channel->channel);
- /* The interrupt handler for this channel may set work_pending
- * as soon as we enable it. Make sure it's cleared before
- * then. Similarly, make sure it sees the enabled flag set.
- */
- channel->work_pending = false;
+ /* Make sure the NAPI handler sees the enabled flag set */
channel->enabled = true;
smp_wmb();
@@ -431,10 +374,14 @@ static void efx_stop_eventq(struct efx_channel *channel)
static void efx_fini_eventq(struct efx_channel *channel)
{
+ if (!channel->eventq_init)
+ return;
+
netif_dbg(channel->efx, drv, channel->efx->net_dev,
"chan %d fini event queue\n", channel->channel);
efx_nic_fini_eventq(channel);
+ channel->eventq_init = false;
}
static void efx_remove_eventq(struct efx_channel *channel)
@@ -583,8 +530,8 @@ static void efx_set_channel_names(struct efx_nic *efx)
efx_for_each_channel(channel, efx)
channel->type->get_name(channel,
- efx->channel_name[channel->channel],
- sizeof(efx->channel_name[0]));
+ efx->msi_context[channel->channel].name,
+ sizeof(efx->msi_context[0].name));
}
static int efx_probe_channels(struct efx_nic *efx)
@@ -634,13 +581,13 @@ static void efx_start_datapath(struct efx_nic *efx)
* support the current MTU, including padding for header
* alignment and overruns.
*/
- efx->rx_dma_len = (efx->type->rx_buffer_hash_size +
+ efx->rx_dma_len = (efx->rx_prefix_size +
EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
efx->type->rx_buffer_padding);
rx_buf_len = (sizeof(struct efx_rx_page_state) +
NET_IP_ALIGN + efx->rx_dma_len);
if (rx_buf_len <= PAGE_SIZE) {
- efx->rx_scatter = false;
+ efx->rx_scatter = efx->type->always_rx_scatter;
efx->rx_buffer_order = 0;
} else if (efx->type->can_rx_scatter) {
BUILD_BUG_ON(EFX_RX_USR_BUF_SIZE % L1_CACHE_BYTES);
@@ -668,9 +615,9 @@ static void efx_start_datapath(struct efx_nic *efx)
efx->rx_dma_len, efx->rx_page_buf_step,
efx->rx_bufs_per_page, efx->rx_pages_per_batch);
- /* RX filters also have scatter-enabled flags */
+ /* RX filters may also have scatter-enabled flags */
if (efx->rx_scatter != old_rx_scatter)
- efx_filter_update_rx_scatter(efx);
+ efx->type->filter_update_rx_scatter(efx);
/* We must keep at least one descriptor in a TX ring empty.
* We could avoid this when the queue size does not exactly
@@ -684,11 +631,14 @@ static void efx_start_datapath(struct efx_nic *efx)
/* Initialise the channels */
efx_for_each_channel(channel, efx) {
- efx_for_each_channel_tx_queue(tx_queue, channel)
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
efx_init_tx_queue(tx_queue);
+ atomic_inc(&efx->active_queues);
+ }
efx_for_each_channel_rx_queue(rx_queue, channel) {
efx_init_rx_queue(rx_queue);
+ atomic_inc(&efx->active_queues);
efx_nic_generate_fill_event(rx_queue);
}
@@ -704,30 +654,15 @@ static void efx_stop_datapath(struct efx_nic *efx)
struct efx_channel *channel;
struct efx_tx_queue *tx_queue;
struct efx_rx_queue *rx_queue;
- struct pci_dev *dev = efx->pci_dev;
int rc;
EFX_ASSERT_RESET_SERIALISED(efx);
BUG_ON(efx->port_enabled);
- /* Only perform flush if dma is enabled */
- if (dev->is_busmaster && efx->state != STATE_RECOVERY) {
- rc = efx_nic_flush_queues(efx);
-
- if (rc && EFX_WORKAROUND_7803(efx)) {
- /* Schedule a reset to recover from the flush failure. The
- * descriptor caches reference memory we're about to free,
- * but falcon_reconfigure_mac_wrapper() won't reconnect
- * the MACs because of the pending reset. */
- netif_err(efx, drv, efx->net_dev,
- "Resetting to recover from flush failure\n");
- efx_schedule_reset(efx, RESET_TYPE_ALL);
- } else if (rc) {
- netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
- } else {
- netif_dbg(efx, drv, efx->net_dev,
- "successfully flushed all queues\n");
- }
+ /* Stop RX refill */
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_rx_queue(rx_queue, channel)
+ rx_queue->refill_enabled = false;
}
efx_for_each_channel(channel, efx) {
@@ -741,7 +676,26 @@ static void efx_stop_datapath(struct efx_nic *efx)
efx_stop_eventq(channel);
efx_start_eventq(channel);
}
+ }
+
+ rc = efx->type->fini_dmaq(efx);
+ if (rc && EFX_WORKAROUND_7803(efx)) {
+ /* Schedule a reset to recover from the flush failure. The
+ * descriptor caches reference memory we're about to free,
+ * but falcon_reconfigure_mac_wrapper() won't reconnect
+ * the MACs because of the pending reset.
+ */
+ netif_err(efx, drv, efx->net_dev,
+ "Resetting to recover from flush failure\n");
+ efx_schedule_reset(efx, RESET_TYPE_ALL);
+ } else if (rc) {
+ netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
+ } else {
+ netif_dbg(efx, drv, efx->net_dev,
+ "successfully flushed all queues\n");
+ }
+ efx_for_each_channel(channel, efx) {
efx_for_each_channel_rx_queue(rx_queue, channel)
efx_fini_rx_queue(rx_queue);
efx_for_each_possible_channel_tx_queue(tx_queue, channel)
@@ -779,7 +733,7 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
u32 old_rxq_entries, old_txq_entries;
unsigned i, next_buffer_table = 0;
- int rc;
+ int rc, rc2;
rc = efx_check_disabled(efx);
if (rc)
@@ -809,7 +763,7 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
efx_device_detach_sync(efx);
efx_stop_all(efx);
- efx_stop_interrupts(efx, true);
+ efx_soft_disable_interrupts(efx);
/* Clone channels (where possible) */
memset(other_channel, 0, sizeof(other_channel));
@@ -859,9 +813,16 @@ out:
}
}
- efx_start_interrupts(efx, true);
- efx_start_all(efx);
- netif_device_attach(efx->net_dev);
+ rc2 = efx_soft_enable_interrupts(efx);
+ if (rc2) {
+ rc = rc ? rc : rc2;
+ netif_err(efx, drv, efx->net_dev,
+ "unable to restart interrupts on channel reallocation\n");
+ efx_schedule_reset(efx, RESET_TYPE_DISABLE);
+ } else {
+ efx_start_all(efx);
+ netif_device_attach(efx->net_dev);
+ }
return rc;
rollback:
@@ -931,10 +892,9 @@ void efx_link_status_changed(struct efx_nic *efx)
/* Status message for kernel log */
if (link_state->up)
netif_info(efx, link, efx->net_dev,
- "link up at %uMbps %s-duplex (MTU %d)%s\n",
+ "link up at %uMbps %s-duplex (MTU %d)\n",
link_state->speed, link_state->fd ? "full" : "half",
- efx->net_dev->mtu,
- (efx->promiscuous ? " [PROMISC]" : ""));
+ efx->net_dev->mtu);
else
netif_info(efx, link, efx->net_dev, "link down\n");
}
@@ -983,10 +943,6 @@ int __efx_reconfigure_port(struct efx_nic *efx)
WARN_ON(!mutex_is_locked(&efx->mac_lock));
- /* Serialise the promiscuous flag with efx_set_rx_mode. */
- netif_addr_lock_bh(efx->net_dev);
- netif_addr_unlock_bh(efx->net_dev);
-
/* Disable PHY transmit in mac level loopbacks */
phy_mode = efx->phy_mode;
if (LOOPBACK_INTERNAL(efx))
@@ -1144,6 +1100,7 @@ static int efx_init_io(struct efx_nic *efx)
{
struct pci_dev *pci_dev = efx->pci_dev;
dma_addr_t dma_mask = efx->type->max_dma_mask;
+ unsigned int mem_map_size = efx->type->mem_map_size(efx);
int rc;
netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
@@ -1196,20 +1153,18 @@ static int efx_init_io(struct efx_nic *efx)
rc = -EIO;
goto fail3;
}
- efx->membase = ioremap_nocache(efx->membase_phys,
- efx->type->mem_map_size);
+ efx->membase = ioremap_nocache(efx->membase_phys, mem_map_size);
if (!efx->membase) {
netif_err(efx, probe, efx->net_dev,
"could not map memory BAR at %llx+%x\n",
- (unsigned long long)efx->membase_phys,
- efx->type->mem_map_size);
+ (unsigned long long)efx->membase_phys, mem_map_size);
rc = -ENOMEM;
goto fail4;
}
netif_dbg(efx, probe, efx->net_dev,
"memory BAR at %llx+%x (virtual %p)\n",
- (unsigned long long)efx->membase_phys,
- efx->type->mem_map_size, efx->membase);
+ (unsigned long long)efx->membase_phys, mem_map_size,
+ efx->membase);
return 0;
@@ -1288,8 +1243,6 @@ static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
*/
static int efx_probe_interrupts(struct efx_nic *efx)
{
- unsigned int max_channels =
- min(efx->type->phys_addr_channels, EFX_MAX_CHANNELS);
unsigned int extra_channels = 0;
unsigned int i, j;
int rc;
@@ -1306,7 +1259,7 @@ static int efx_probe_interrupts(struct efx_nic *efx)
if (separate_tx_channels)
n_channels *= 2;
n_channels += extra_channels;
- n_channels = min(n_channels, max_channels);
+ n_channels = min(n_channels, efx->max_channels);
for (i = 0; i < n_channels; i++)
xentries[i].entry = i;
@@ -1392,31 +1345,42 @@ static int efx_probe_interrupts(struct efx_nic *efx)
return 0;
}
-/* Enable interrupts, then probe and start the event queues */
-static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq)
+static int efx_soft_enable_interrupts(struct efx_nic *efx)
{
- struct efx_channel *channel;
+ struct efx_channel *channel, *end_channel;
+ int rc;
BUG_ON(efx->state == STATE_DISABLED);
- if (efx->eeh_disabled_legacy_irq) {
- enable_irq(efx->legacy_irq);
- efx->eeh_disabled_legacy_irq = false;
- }
- if (efx->legacy_irq)
- efx->legacy_irq_enabled = true;
- efx_nic_enable_interrupts(efx);
+ efx->irq_soft_enabled = true;
+ smp_wmb();
efx_for_each_channel(channel, efx) {
- if (!channel->type->keep_eventq || !may_keep_eventq)
- efx_init_eventq(channel);
+ if (!channel->type->keep_eventq) {
+ rc = efx_init_eventq(channel);
+ if (rc)
+ goto fail;
+ }
efx_start_eventq(channel);
}
efx_mcdi_mode_event(efx);
+
+ return 0;
+fail:
+ end_channel = channel;
+ efx_for_each_channel(channel, efx) {
+ if (channel == end_channel)
+ break;
+ efx_stop_eventq(channel);
+ if (!channel->type->keep_eventq)
+ efx_fini_eventq(channel);
+ }
+
+ return rc;
}
-static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq)
+static void efx_soft_disable_interrupts(struct efx_nic *efx)
{
struct efx_channel *channel;
@@ -1425,20 +1389,79 @@ static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq)
efx_mcdi_mode_poll(efx);
- efx_nic_disable_interrupts(efx);
- if (efx->legacy_irq) {
+ efx->irq_soft_enabled = false;
+ smp_wmb();
+
+ if (efx->legacy_irq)
synchronize_irq(efx->legacy_irq);
- efx->legacy_irq_enabled = false;
- }
efx_for_each_channel(channel, efx) {
if (channel->irq)
synchronize_irq(channel->irq);
efx_stop_eventq(channel);
- if (!channel->type->keep_eventq || !may_keep_eventq)
+ if (!channel->type->keep_eventq)
+ efx_fini_eventq(channel);
+ }
+
+ /* Flush the asynchronous MCDI request queue */
+ efx_mcdi_flush_async(efx);
+}
+
+static int efx_enable_interrupts(struct efx_nic *efx)
+{
+ struct efx_channel *channel, *end_channel;
+ int rc;
+
+ BUG_ON(efx->state == STATE_DISABLED);
+
+ if (efx->eeh_disabled_legacy_irq) {
+ enable_irq(efx->legacy_irq);
+ efx->eeh_disabled_legacy_irq = false;
+ }
+
+ efx->type->irq_enable_master(efx);
+
+ efx_for_each_channel(channel, efx) {
+ if (channel->type->keep_eventq) {
+ rc = efx_init_eventq(channel);
+ if (rc)
+ goto fail;
+ }
+ }
+
+ rc = efx_soft_enable_interrupts(efx);
+ if (rc)
+ goto fail;
+
+ return 0;
+
+fail:
+ end_channel = channel;
+ efx_for_each_channel(channel, efx) {
+ if (channel == end_channel)
+ break;
+ if (channel->type->keep_eventq)
efx_fini_eventq(channel);
}
+
+ efx->type->irq_disable_non_ev(efx);
+
+ return rc;
+}
+
+static void efx_disable_interrupts(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ efx_soft_disable_interrupts(efx);
+
+ efx_for_each_channel(channel, efx) {
+ if (channel->type->keep_eventq)
+ efx_fini_eventq(channel);
+ }
+
+ efx->type->irq_disable_non_ev(efx);
}
static void efx_remove_interrupts(struct efx_nic *efx)
@@ -1495,9 +1518,11 @@ static int efx_probe_nic(struct efx_nic *efx)
* in MSI-X interrupts. */
rc = efx_probe_interrupts(efx);
if (rc)
- goto fail;
+ goto fail1;
- efx->type->dimension_resources(efx);
+ rc = efx->type->dimension_resources(efx);
+ if (rc)
+ goto fail2;
if (efx->n_channels > 1)
get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key));
@@ -1515,7 +1540,9 @@ static int efx_probe_nic(struct efx_nic *efx)
return 0;
-fail:
+fail2:
+ efx_remove_interrupts(efx);
+fail1:
efx->type->remove(efx);
return rc;
}
@@ -1528,6 +1555,44 @@ static void efx_remove_nic(struct efx_nic *efx)
efx->type->remove(efx);
}
+static int efx_probe_filters(struct efx_nic *efx)
+{
+ int rc;
+
+ spin_lock_init(&efx->filter_lock);
+
+ rc = efx->type->filter_table_probe(efx);
+ if (rc)
+ return rc;
+
+#ifdef CONFIG_RFS_ACCEL
+ if (efx->type->offload_features & NETIF_F_NTUPLE) {
+ efx->rps_flow_id = kcalloc(efx->type->max_rx_ip_filters,
+ sizeof(*efx->rps_flow_id),
+ GFP_KERNEL);
+ if (!efx->rps_flow_id) {
+ efx->type->filter_table_remove(efx);
+ return -ENOMEM;
+ }
+ }
+#endif
+
+ return 0;
+}
+
+static void efx_remove_filters(struct efx_nic *efx)
+{
+#ifdef CONFIG_RFS_ACCEL
+ kfree(efx->rps_flow_id);
+#endif
+ efx->type->filter_table_remove(efx);
+}
+
+static void efx_restore_filters(struct efx_nic *efx)
+{
+ efx->type->filter_table_restore(efx);
+}
+
/**************************************************************************
*
* NIC startup/shutdown
@@ -1917,34 +1982,9 @@ static struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev,
struct rtnl_link_stats64 *stats)
{
struct efx_nic *efx = netdev_priv(net_dev);
- struct efx_mac_stats *mac_stats = &efx->mac_stats;
spin_lock_bh(&efx->stats_lock);
-
- efx->type->update_stats(efx);
-
- stats->rx_packets = mac_stats->rx_packets;
- stats->tx_packets = mac_stats->tx_packets;
- stats->rx_bytes = mac_stats->rx_bytes;
- stats->tx_bytes = mac_stats->tx_bytes;
- stats->rx_dropped = efx->n_rx_nodesc_drop_cnt;
- stats->multicast = mac_stats->rx_multicast;
- stats->collisions = mac_stats->tx_collision;
- stats->rx_length_errors = (mac_stats->rx_gtjumbo +
- mac_stats->rx_length_error);
- stats->rx_crc_errors = mac_stats->rx_bad;
- stats->rx_frame_errors = mac_stats->rx_align_error;
- stats->rx_fifo_errors = mac_stats->rx_overflow;
- stats->rx_missed_errors = mac_stats->rx_missed;
- stats->tx_window_errors = mac_stats->tx_late_collision;
-
- stats->rx_errors = (stats->rx_length_errors +
- stats->rx_crc_errors +
- stats->rx_frame_errors +
- mac_stats->rx_symbol_error);
- stats->tx_errors = (stats->tx_window_errors +
- mac_stats->tx_bad);
-
+ efx->type->update_stats(efx, NULL, stats);
spin_unlock_bh(&efx->stats_lock);
return stats;
@@ -2018,30 +2058,6 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
static void efx_set_rx_mode(struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
- struct netdev_hw_addr *ha;
- union efx_multicast_hash *mc_hash = &efx->multicast_hash;
- u32 crc;
- int bit;
-
- efx->promiscuous = !!(net_dev->flags & IFF_PROMISC);
-
- /* Build multicast hash table */
- if (efx->promiscuous || (net_dev->flags & IFF_ALLMULTI)) {
- memset(mc_hash, 0xff, sizeof(*mc_hash));
- } else {
- memset(mc_hash, 0x00, sizeof(*mc_hash));
- netdev_for_each_mc_addr(ha, net_dev) {
- crc = ether_crc_le(ETH_ALEN, ha->addr);
- bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
- __set_bit_le(bit, mc_hash);
- }
-
- /* Broadcast packets go through the multicast hash filter.
- * ether_crc_le() of the broadcast address is 0xbe2612ff
- * so we always add bit 0xff to the mask.
- */
- __set_bit_le(0xff, mc_hash);
- }
if (efx->port_enabled)
queue_work(efx->workqueue, &efx->mac_work);
@@ -2059,7 +2075,7 @@ static int efx_set_features(struct net_device *net_dev, netdev_features_t data)
return 0;
}
-static const struct net_device_ops efx_netdev_ops = {
+static const struct net_device_ops efx_farch_netdev_ops = {
.ndo_open = efx_net_open,
.ndo_stop = efx_net_stop,
.ndo_get_stats64 = efx_net_stats,
@@ -2086,6 +2102,26 @@ static const struct net_device_ops efx_netdev_ops = {
#endif
};
+static const struct net_device_ops efx_ef10_netdev_ops = {
+ .ndo_open = efx_net_open,
+ .ndo_stop = efx_net_stop,
+ .ndo_get_stats64 = efx_net_stats,
+ .ndo_tx_timeout = efx_watchdog,
+ .ndo_start_xmit = efx_hard_start_xmit,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = efx_ioctl,
+ .ndo_change_mtu = efx_change_mtu,
+ .ndo_set_mac_address = efx_set_mac_address,
+ .ndo_set_rx_mode = efx_set_rx_mode,
+ .ndo_set_features = efx_set_features,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = efx_netpoll,
+#endif
+#ifdef CONFIG_RFS_ACCEL
+ .ndo_rx_flow_steer = efx_filter_rfs,
+#endif
+};
+
static void efx_update_name(struct efx_nic *efx)
{
strcpy(efx->name, efx->net_dev->name);
@@ -2098,7 +2134,8 @@ static int efx_netdev_event(struct notifier_block *this,
{
struct net_device *net_dev = netdev_notifier_info_to_dev(ptr);
- if (net_dev->netdev_ops == &efx_netdev_ops &&
+ if ((net_dev->netdev_ops == &efx_farch_netdev_ops ||
+ net_dev->netdev_ops == &efx_ef10_netdev_ops) &&
event == NETDEV_CHANGENAME)
efx_update_name(netdev_priv(net_dev));
@@ -2125,7 +2162,12 @@ static int efx_register_netdev(struct efx_nic *efx)
net_dev->watchdog_timeo = 5 * HZ;
net_dev->irq = efx->pci_dev->irq;
- net_dev->netdev_ops = &efx_netdev_ops;
+ if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) {
+ net_dev->netdev_ops = &efx_ef10_netdev_ops;
+ net_dev->priv_flags |= IFF_UNICAST_FLT;
+ } else {
+ net_dev->netdev_ops = &efx_farch_netdev_ops;
+ }
SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops);
net_dev->gso_max_segs = EFX_TSO_MAX_SEGS;
@@ -2185,22 +2227,11 @@ fail_locked:
static void efx_unregister_netdev(struct efx_nic *efx)
{
- struct efx_channel *channel;
- struct efx_tx_queue *tx_queue;
-
if (!efx->net_dev)
return;
BUG_ON(netdev_priv(efx->net_dev) != efx);
- /* Free up any skbs still remaining. This has to happen before
- * we try to unregister the netdev as running their destructors
- * may be needed to get the device ref. count to 0. */
- efx_for_each_channel(channel, efx) {
- efx_for_each_channel_tx_queue(tx_queue, channel)
- efx_release_tx_buffers(tx_queue);
- }
-
strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
@@ -2223,7 +2254,7 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method)
EFX_ASSERT_RESET_SERIALISED(efx);
efx_stop_all(efx);
- efx_stop_interrupts(efx, false);
+ efx_disable_interrupts(efx);
mutex_lock(&efx->mac_lock);
if (efx->port_initialized && method != RESET_TYPE_INVISIBLE)
@@ -2260,9 +2291,9 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
"could not restore PHY settings\n");
}
- efx->type->reconfigure_mac(efx);
-
- efx_start_interrupts(efx, false);
+ rc = efx_enable_interrupts(efx);
+ if (rc)
+ goto fail;
efx_restore_filters(efx);
efx_sriov_reset(efx);
@@ -2458,6 +2489,8 @@ static DEFINE_PCI_DEVICE_TABLE(efx_pci_table) = {
.driver_data = (unsigned long) &siena_a0_nic_type},
{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0813), /* SFL9021 */
.driver_data = (unsigned long) &siena_a0_nic_type},
+ {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0903), /* SFC9120 PF */
+ .driver_data = (unsigned long) &efx_hunt_a0_nic_type},
{0} /* end of list */
};
@@ -2516,6 +2549,9 @@ static int efx_init_struct(struct efx_nic *efx,
strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
efx->net_dev = net_dev;
+ efx->rx_prefix_size = efx->type->rx_prefix_size;
+ efx->rx_packet_hash_offset =
+ efx->type->rx_hash_offset - efx->type->rx_prefix_size;
spin_lock_init(&efx->stats_lock);
mutex_init(&efx->mac_lock);
efx->phy_op = &efx_dummy_phy_operations;
@@ -2527,10 +2563,10 @@ static int efx_init_struct(struct efx_nic *efx,
efx->channel[i] = efx_alloc_channel(efx, i, NULL);
if (!efx->channel[i])
goto fail;
+ efx->msi_context[i].efx = efx;
+ efx->msi_context[i].index = i;
}
- EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);
-
/* Higher numbered interrupt modes are less capable! */
efx->interrupt_mode = max(efx->type->max_interrupt_mode,
interrupt_mode);
@@ -2579,7 +2615,7 @@ static void efx_pci_remove_main(struct efx_nic *efx)
BUG_ON(efx->state == STATE_READY);
cancel_work_sync(&efx->reset_work);
- efx_stop_interrupts(efx, false);
+ efx_disable_interrupts(efx);
efx_nic_fini_interrupt(efx);
efx_fini_port(efx);
efx->type->fini(efx);
@@ -2601,7 +2637,7 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
/* Mark the NIC as fini, then stop the interface */
rtnl_lock();
dev_close(efx->net_dev);
- efx_stop_interrupts(efx, false);
+ efx_disable_interrupts(efx);
rtnl_unlock();
efx_sriov_fini(efx);
@@ -2703,10 +2739,14 @@ static int efx_pci_probe_main(struct efx_nic *efx)
rc = efx_nic_init_interrupt(efx);
if (rc)
goto fail5;
- efx_start_interrupts(efx, false);
+ rc = efx_enable_interrupts(efx);
+ if (rc)
+ goto fail6;
return 0;
+ fail6:
+ efx_nic_fini_interrupt(efx);
fail5:
efx_fini_port(efx);
fail4:
@@ -2824,7 +2864,7 @@ static int efx_pm_freeze(struct device *dev)
efx_device_detach_sync(efx);
efx_stop_all(efx);
- efx_stop_interrupts(efx, false);
+ efx_disable_interrupts(efx);
}
rtnl_unlock();
@@ -2834,12 +2874,15 @@ static int efx_pm_freeze(struct device *dev)
static int efx_pm_thaw(struct device *dev)
{
+ int rc;
struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
rtnl_lock();
if (efx->state != STATE_DISABLED) {
- efx_start_interrupts(efx, false);
+ rc = efx_enable_interrupts(efx);
+ if (rc)
+ goto fail;
mutex_lock(&efx->mac_lock);
efx->phy_op->reconfigure(efx);
@@ -2860,6 +2903,11 @@ static int efx_pm_thaw(struct device *dev)
queue_work(reset_workqueue, &efx->reset_work);
return 0;
+
+fail:
+ rtnl_unlock();
+
+ return rc;
}
static int efx_pm_poweroff(struct device *dev)
@@ -2896,8 +2944,8 @@ static int efx_pm_resume(struct device *dev)
rc = efx->type->init(efx);
if (rc)
return rc;
- efx_pm_thaw(dev);
- return 0;
+ rc = efx_pm_thaw(dev);
+ return rc;
}
static int efx_pm_suspend(struct device *dev)
@@ -2942,7 +2990,7 @@ static pci_ers_result_t efx_io_error_detected(struct pci_dev *pdev,
efx_device_detach_sync(efx);
efx_stop_all(efx);
- efx_stop_interrupts(efx, false);
+ efx_disable_interrupts(efx);
status = PCI_ERS_RESULT_NEED_RESET;
} else {
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index bdb30bbb0c97..34d00f5771fe 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -1,7 +1,7 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2010 Solarflare Communications Inc.
+ * Copyright 2006-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -23,7 +23,6 @@ extern void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
extern void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
extern void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue);
extern void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
-extern void efx_release_tx_buffers(struct efx_tx_queue *tx_queue);
extern netdev_tx_t
efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev);
extern netdev_tx_t
@@ -69,27 +68,99 @@ extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
#define EFX_TXQ_MIN_ENT(efx) (2 * efx_tx_max_skb_descs(efx))
/* Filters */
-extern int efx_probe_filters(struct efx_nic *efx);
-extern void efx_restore_filters(struct efx_nic *efx);
-extern void efx_remove_filters(struct efx_nic *efx);
-extern void efx_filter_update_rx_scatter(struct efx_nic *efx);
-extern s32 efx_filter_insert_filter(struct efx_nic *efx,
- struct efx_filter_spec *spec,
- bool replace);
-extern int efx_filter_remove_id_safe(struct efx_nic *efx,
- enum efx_filter_priority priority,
- u32 filter_id);
-extern int efx_filter_get_filter_safe(struct efx_nic *efx,
- enum efx_filter_priority priority,
- u32 filter_id, struct efx_filter_spec *);
-extern void efx_filter_clear_rx(struct efx_nic *efx,
- enum efx_filter_priority priority);
-extern u32 efx_filter_count_rx_used(struct efx_nic *efx,
- enum efx_filter_priority priority);
-extern u32 efx_filter_get_rx_id_limit(struct efx_nic *efx);
-extern s32 efx_filter_get_rx_ids(struct efx_nic *efx,
- enum efx_filter_priority priority,
- u32 *buf, u32 size);
+
+/**
+ * efx_filter_insert_filter - add or replace a filter
+ * @efx: NIC in which to insert the filter
+ * @spec: Specification for the filter
+ * @replace_equal: Flag for whether the specified filter may replace an
+ * existing filter with equal priority
+ *
+ * On success, return the filter ID.
+ * On failure, return a negative error code.
+ *
+ * If existing filters have equal match values to the new filter spec,
+ * then the new filter might replace them or the function might fail,
+ * as follows.
+ *
+ * 1. If the existing filters have lower priority, or @replace_equal
+ * is set and they have equal priority, replace them.
+ *
+ * 2. If the existing filters have higher priority, return -%EPERM.
+ *
+ * 3. If !efx_filter_is_mc_recipient(@spec), or the NIC does not
+ * support delivery to multiple recipients, return -%EEXIST.
+ *
+ * This implies that filters for multiple multicast recipients must
+ * all be inserted with the same priority and @replace_equal = %false.
+ */
+static inline s32 efx_filter_insert_filter(struct efx_nic *efx,
+ struct efx_filter_spec *spec,
+ bool replace_equal)
+{
+ return efx->type->filter_insert(efx, spec, replace_equal);
+}
+
+/**
+ * efx_filter_remove_id_safe - remove a filter by ID, carefully
+ * @efx: NIC from which to remove the filter
+ * @priority: Priority of filter, as passed to @efx_filter_insert_filter
+ * @filter_id: ID of filter, as returned by @efx_filter_insert_filter
+ *
+ * This function will range-check @filter_id, so it is safe to call
+ * with a value passed from userland.
+ */
+static inline int efx_filter_remove_id_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id)
+{
+ return efx->type->filter_remove_safe(efx, priority, filter_id);
+}
+
+/**
+ * efx_filter_get_filter_safe - retrieve a filter by ID, carefully
+ * @efx: NIC from which to remove the filter
+ * @priority: Priority of filter, as passed to @efx_filter_insert_filter
+ * @filter_id: ID of filter, as returned by @efx_filter_insert_filter
+ * @spec: Buffer in which to store filter specification
+ *
+ * This function will range-check @filter_id, so it is safe to call
+ * with a value passed from userland.
+ */
+static inline int
+efx_filter_get_filter_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id, struct efx_filter_spec *spec)
+{
+ return efx->type->filter_get_safe(efx, priority, filter_id, spec);
+}
+
+/**
+ * efx_farch_filter_clear_rx - remove RX filters by priority
+ * @efx: NIC from which to remove the filters
+ * @priority: Maximum priority to remove
+ */
+static inline void efx_filter_clear_rx(struct efx_nic *efx,
+ enum efx_filter_priority priority)
+{
+ return efx->type->filter_clear_rx(efx, priority);
+}
+
+static inline u32 efx_filter_count_rx_used(struct efx_nic *efx,
+ enum efx_filter_priority priority)
+{
+ return efx->type->filter_count_rx_used(efx, priority);
+}
+static inline u32 efx_filter_get_rx_id_limit(struct efx_nic *efx)
+{
+ return efx->type->filter_get_rx_id_limit(efx);
+}
+static inline s32 efx_filter_get_rx_ids(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 *buf, u32 size)
+{
+ return efx->type->filter_get_rx_ids(efx, priority, buf, size);
+}
#ifdef CONFIG_RFS_ACCEL
extern int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id);
@@ -105,11 +176,11 @@ static inline void efx_filter_rfs_expire(struct efx_channel *channel)
static inline void efx_filter_rfs_expire(struct efx_channel *channel) {}
#define efx_filter_rfs_enabled() 0
#endif
+extern bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec);
/* Channels */
extern int efx_channel_dummy_op_int(struct efx_channel *channel);
extern void efx_channel_dummy_op_void(struct efx_channel *channel);
-extern void efx_process_channel_now(struct efx_channel *channel);
extern int
efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries);
@@ -141,7 +212,12 @@ extern void efx_port_dummy_op_void(struct efx_nic *efx);
/* MTD */
#ifdef CONFIG_SFC_MTD
-extern int efx_mtd_probe(struct efx_nic *efx);
+extern int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts,
+ size_t n_parts, size_t sizeof_part);
+static inline int efx_mtd_probe(struct efx_nic *efx)
+{
+ return efx->type->mtd_probe(efx);
+}
extern void efx_mtd_rename(struct efx_nic *efx);
extern void efx_mtd_remove(struct efx_nic *efx);
#else
@@ -155,7 +231,6 @@ static inline void efx_schedule_channel(struct efx_channel *channel)
netif_vdbg(channel->efx, intr, channel->efx->net_dev,
"channel %d scheduling NAPI poll on CPU%d\n",
channel->channel, raw_smp_processor_id());
- channel->work_pending = true;
napi_schedule(&channel->napi_str);
}
diff --git a/drivers/net/ethernet/sfc/enum.h b/drivers/net/ethernet/sfc/enum.h
index ab8fb5889e55..7fdfee019092 100644
--- a/drivers/net/ethernet/sfc/enum.h
+++ b/drivers/net/ethernet/sfc/enum.h
@@ -1,6 +1,6 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2007-2009 Solarflare Communications Inc.
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2007-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -147,8 +147,7 @@ enum efx_loopback_mode {
* @RESET_TYPE_TX_WATCHDOG: reset due to TX watchdog
* @RESET_TYPE_INT_ERROR: reset due to internal error
* @RESET_TYPE_RX_RECOVERY: reset to recover from RX datapath errors
- * @RESET_TYPE_RX_DESC_FETCH: pcie error during rx descriptor fetch
- * @RESET_TYPE_TX_DESC_FETCH: pcie error during tx descriptor fetch
+ * @RESET_TYPE_DMA_ERROR: DMA error
* @RESET_TYPE_TX_SKIP: hardware completed empty tx descriptors
* @RESET_TYPE_MC_FAILURE: MC reboot/assertion
*/
@@ -163,8 +162,7 @@ enum reset_type {
RESET_TYPE_TX_WATCHDOG,
RESET_TYPE_INT_ERROR,
RESET_TYPE_RX_RECOVERY,
- RESET_TYPE_RX_DESC_FETCH,
- RESET_TYPE_TX_DESC_FETCH,
+ RESET_TYPE_DMA_ERROR,
RESET_TYPE_TX_SKIP,
RESET_TYPE_MC_FAILURE,
RESET_TYPE_MAX,
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 1fc21458413d..5b471cf5c323 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -1,7 +1,7 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2010 Solarflare Communications Inc.
+ * Copyright 2006-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -19,14 +19,9 @@
#include "filter.h"
#include "nic.h"
-struct ethtool_string {
- char name[ETH_GSTRING_LEN];
-};
-
-struct efx_ethtool_stat {
+struct efx_sw_stat_desc {
const char *name;
enum {
- EFX_ETHTOOL_STAT_SOURCE_mac_stats,
EFX_ETHTOOL_STAT_SOURCE_nic,
EFX_ETHTOOL_STAT_SOURCE_channel,
EFX_ETHTOOL_STAT_SOURCE_tx_queue
@@ -35,7 +30,7 @@ struct efx_ethtool_stat {
u64(*get_stat) (void *field); /* Reader function */
};
-/* Initialiser for a struct #efx_ethtool_stat with type-checking */
+/* Initialiser for a struct efx_sw_stat_desc with type-checking */
#define EFX_ETHTOOL_STAT(stat_name, source_name, field, field_type, \
get_stat_function) { \
.name = #stat_name, \
@@ -52,24 +47,11 @@ static u64 efx_get_uint_stat(void *field)
return *(unsigned int *)field;
}
-static u64 efx_get_u64_stat(void *field)
-{
- return *(u64 *) field;
-}
-
static u64 efx_get_atomic_stat(void *field)
{
return atomic_read((atomic_t *) field);
}
-#define EFX_ETHTOOL_U64_MAC_STAT(field) \
- EFX_ETHTOOL_STAT(field, mac_stats, field, \
- u64, efx_get_u64_stat)
-
-#define EFX_ETHTOOL_UINT_NIC_STAT(name) \
- EFX_ETHTOOL_STAT(name, nic, n_##name, \
- unsigned int, efx_get_uint_stat)
-
#define EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(field) \
EFX_ETHTOOL_STAT(field, nic, field, \
atomic_t, efx_get_atomic_stat)
@@ -82,72 +64,12 @@ static u64 efx_get_atomic_stat(void *field)
EFX_ETHTOOL_STAT(tx_##field, tx_queue, field, \
unsigned int, efx_get_uint_stat)
-static const struct efx_ethtool_stat efx_ethtool_stats[] = {
- EFX_ETHTOOL_U64_MAC_STAT(tx_bytes),
- EFX_ETHTOOL_U64_MAC_STAT(tx_good_bytes),
- EFX_ETHTOOL_U64_MAC_STAT(tx_bad_bytes),
- EFX_ETHTOOL_U64_MAC_STAT(tx_packets),
- EFX_ETHTOOL_U64_MAC_STAT(tx_bad),
- EFX_ETHTOOL_U64_MAC_STAT(tx_pause),
- EFX_ETHTOOL_U64_MAC_STAT(tx_control),
- EFX_ETHTOOL_U64_MAC_STAT(tx_unicast),
- EFX_ETHTOOL_U64_MAC_STAT(tx_multicast),
- EFX_ETHTOOL_U64_MAC_STAT(tx_broadcast),
- EFX_ETHTOOL_U64_MAC_STAT(tx_lt64),
- EFX_ETHTOOL_U64_MAC_STAT(tx_64),
- EFX_ETHTOOL_U64_MAC_STAT(tx_65_to_127),
- EFX_ETHTOOL_U64_MAC_STAT(tx_128_to_255),
- EFX_ETHTOOL_U64_MAC_STAT(tx_256_to_511),
- EFX_ETHTOOL_U64_MAC_STAT(tx_512_to_1023),
- EFX_ETHTOOL_U64_MAC_STAT(tx_1024_to_15xx),
- EFX_ETHTOOL_U64_MAC_STAT(tx_15xx_to_jumbo),
- EFX_ETHTOOL_U64_MAC_STAT(tx_gtjumbo),
- EFX_ETHTOOL_U64_MAC_STAT(tx_collision),
- EFX_ETHTOOL_U64_MAC_STAT(tx_single_collision),
- EFX_ETHTOOL_U64_MAC_STAT(tx_multiple_collision),
- EFX_ETHTOOL_U64_MAC_STAT(tx_excessive_collision),
- EFX_ETHTOOL_U64_MAC_STAT(tx_deferred),
- EFX_ETHTOOL_U64_MAC_STAT(tx_late_collision),
- EFX_ETHTOOL_U64_MAC_STAT(tx_excessive_deferred),
- EFX_ETHTOOL_U64_MAC_STAT(tx_non_tcpudp),
- EFX_ETHTOOL_U64_MAC_STAT(tx_mac_src_error),
- EFX_ETHTOOL_U64_MAC_STAT(tx_ip_src_error),
+static const struct efx_sw_stat_desc efx_sw_stat_desc[] = {
+ EFX_ETHTOOL_UINT_TXQ_STAT(merge_events),
EFX_ETHTOOL_UINT_TXQ_STAT(tso_bursts),
EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers),
EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets),
EFX_ETHTOOL_UINT_TXQ_STAT(pushes),
- EFX_ETHTOOL_U64_MAC_STAT(rx_bytes),
- EFX_ETHTOOL_U64_MAC_STAT(rx_good_bytes),
- EFX_ETHTOOL_U64_MAC_STAT(rx_bad_bytes),
- EFX_ETHTOOL_U64_MAC_STAT(rx_packets),
- EFX_ETHTOOL_U64_MAC_STAT(rx_good),
- EFX_ETHTOOL_U64_MAC_STAT(rx_bad),
- EFX_ETHTOOL_U64_MAC_STAT(rx_pause),
- EFX_ETHTOOL_U64_MAC_STAT(rx_control),
- EFX_ETHTOOL_U64_MAC_STAT(rx_unicast),
- EFX_ETHTOOL_U64_MAC_STAT(rx_multicast),
- EFX_ETHTOOL_U64_MAC_STAT(rx_broadcast),
- EFX_ETHTOOL_U64_MAC_STAT(rx_lt64),
- EFX_ETHTOOL_U64_MAC_STAT(rx_64),
- EFX_ETHTOOL_U64_MAC_STAT(rx_65_to_127),
- EFX_ETHTOOL_U64_MAC_STAT(rx_128_to_255),
- EFX_ETHTOOL_U64_MAC_STAT(rx_256_to_511),
- EFX_ETHTOOL_U64_MAC_STAT(rx_512_to_1023),
- EFX_ETHTOOL_U64_MAC_STAT(rx_1024_to_15xx),
- EFX_ETHTOOL_U64_MAC_STAT(rx_15xx_to_jumbo),
- EFX_ETHTOOL_U64_MAC_STAT(rx_gtjumbo),
- EFX_ETHTOOL_U64_MAC_STAT(rx_bad_lt64),
- EFX_ETHTOOL_U64_MAC_STAT(rx_bad_64_to_15xx),
- EFX_ETHTOOL_U64_MAC_STAT(rx_bad_15xx_to_jumbo),
- EFX_ETHTOOL_U64_MAC_STAT(rx_bad_gtjumbo),
- EFX_ETHTOOL_U64_MAC_STAT(rx_overflow),
- EFX_ETHTOOL_U64_MAC_STAT(rx_missed),
- EFX_ETHTOOL_U64_MAC_STAT(rx_false_carrier),
- EFX_ETHTOOL_U64_MAC_STAT(rx_symbol_error),
- EFX_ETHTOOL_U64_MAC_STAT(rx_align_error),
- EFX_ETHTOOL_U64_MAC_STAT(rx_length_error),
- EFX_ETHTOOL_U64_MAC_STAT(rx_internal_error),
- EFX_ETHTOOL_UINT_NIC_STAT(rx_nodesc_drop_cnt),
EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err),
@@ -155,10 +77,11 @@ static const struct efx_ethtool_stat efx_ethtool_stats[] = {
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_nodesc_trunc),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_events),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_packets),
};
-/* Number of ethtool statistics */
-#define EFX_ETHTOOL_NUM_STATS ARRAY_SIZE(efx_ethtool_stats)
+#define EFX_ETHTOOL_SW_STAT_COUNT ARRAY_SIZE(efx_sw_stat_desc)
#define EFX_ETHTOOL_EEPROM_MAGIC 0xEFAB
@@ -205,8 +128,6 @@ static int efx_ethtool_get_settings(struct net_device *net_dev,
efx->phy_op->get_settings(efx, ecmd);
mutex_unlock(&efx->mac_lock);
- /* GMAC does not support 1000Mbps HD */
- ecmd->supported &= ~SUPPORTED_1000baseT_Half;
/* Both MACs support pause frames (bidirectional and respond-only) */
ecmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
@@ -291,12 +212,11 @@ static void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable)
*
* Fill in an individual self-test entry.
*/
-static void efx_fill_test(unsigned int test_index,
- struct ethtool_string *strings, u64 *data,
+static void efx_fill_test(unsigned int test_index, u8 *strings, u64 *data,
int *test, const char *unit_format, int unit_id,
const char *test_format, const char *test_id)
{
- struct ethtool_string unit_str, test_str;
+ char unit_str[ETH_GSTRING_LEN], test_str[ETH_GSTRING_LEN];
/* Fill data value, if applicable */
if (data)
@@ -305,15 +225,14 @@ static void efx_fill_test(unsigned int test_index,
/* Fill string, if applicable */
if (strings) {
if (strchr(unit_format, '%'))
- snprintf(unit_str.name, sizeof(unit_str.name),
+ snprintf(unit_str, sizeof(unit_str),
unit_format, unit_id);
else
- strcpy(unit_str.name, unit_format);
- snprintf(test_str.name, sizeof(test_str.name),
- test_format, test_id);
- snprintf(strings[test_index].name,
- sizeof(strings[test_index].name),
- "%-6s %-24s", unit_str.name, test_str.name);
+ strcpy(unit_str, unit_format);
+ snprintf(test_str, sizeof(test_str), test_format, test_id);
+ snprintf(strings + test_index * ETH_GSTRING_LEN,
+ ETH_GSTRING_LEN,
+ "%-6s %-24s", unit_str, test_str);
}
}
@@ -336,7 +255,7 @@ static int efx_fill_loopback_test(struct efx_nic *efx,
struct efx_loopback_self_tests *lb_tests,
enum efx_loopback_mode mode,
unsigned int test_index,
- struct ethtool_string *strings, u64 *data)
+ u8 *strings, u64 *data)
{
struct efx_channel *channel =
efx_get_channel(efx, efx->tx_channel_offset);
@@ -373,8 +292,7 @@ static int efx_fill_loopback_test(struct efx_nic *efx,
*/
static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
struct efx_self_tests *tests,
- struct ethtool_string *strings,
- u64 *data)
+ u8 *strings, u64 *data)
{
struct efx_channel *channel;
unsigned int n = 0, i;
@@ -433,12 +351,14 @@ static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
static int efx_ethtool_get_sset_count(struct net_device *net_dev,
int string_set)
{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
switch (string_set) {
case ETH_SS_STATS:
- return EFX_ETHTOOL_NUM_STATS;
+ return efx->type->describe_stats(efx, NULL) +
+ EFX_ETHTOOL_SW_STAT_COUNT;
case ETH_SS_TEST:
- return efx_ethtool_fill_self_tests(netdev_priv(net_dev),
- NULL, NULL, NULL);
+ return efx_ethtool_fill_self_tests(efx, NULL, NULL, NULL);
default:
return -EINVAL;
}
@@ -448,20 +368,18 @@ static void efx_ethtool_get_strings(struct net_device *net_dev,
u32 string_set, u8 *strings)
{
struct efx_nic *efx = netdev_priv(net_dev);
- struct ethtool_string *ethtool_strings =
- (struct ethtool_string *)strings;
int i;
switch (string_set) {
case ETH_SS_STATS:
- for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++)
- strlcpy(ethtool_strings[i].name,
- efx_ethtool_stats[i].name,
- sizeof(ethtool_strings[i].name));
+ strings += (efx->type->describe_stats(efx, strings) *
+ ETH_GSTRING_LEN);
+ for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++)
+ strlcpy(strings + i * ETH_GSTRING_LEN,
+ efx_sw_stat_desc[i].name, ETH_GSTRING_LEN);
break;
case ETH_SS_TEST:
- efx_ethtool_fill_self_tests(efx, NULL,
- ethtool_strings, NULL);
+ efx_ethtool_fill_self_tests(efx, NULL, strings, NULL);
break;
default:
/* No other string sets */
@@ -474,27 +392,20 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
u64 *data)
{
struct efx_nic *efx = netdev_priv(net_dev);
- struct efx_mac_stats *mac_stats = &efx->mac_stats;
- const struct efx_ethtool_stat *stat;
+ const struct efx_sw_stat_desc *stat;
struct efx_channel *channel;
struct efx_tx_queue *tx_queue;
int i;
- EFX_BUG_ON_PARANOID(stats->n_stats != EFX_ETHTOOL_NUM_STATS);
-
spin_lock_bh(&efx->stats_lock);
- /* Update MAC and NIC statistics */
- efx->type->update_stats(efx);
+ /* Get NIC statistics */
+ data += efx->type->update_stats(efx, data, NULL);
- /* Fill detailed statistics buffer */
- for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++) {
- stat = &efx_ethtool_stats[i];
+ /* Get software statistics */
+ for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++) {
+ stat = &efx_sw_stat_desc[i];
switch (stat->source) {
- case EFX_ETHTOOL_STAT_SOURCE_mac_stats:
- data[i] = stat->get_stat((void *)mac_stats +
- stat->offset);
- break;
case EFX_ETHTOOL_STAT_SOURCE_nic:
data[i] = stat->get_stat((void *)efx + stat->offset);
break;
@@ -709,7 +620,6 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
struct efx_nic *efx = netdev_priv(net_dev);
u8 wanted_fc, old_fc;
u32 old_adv;
- bool reset;
int rc = 0;
mutex_lock(&efx->mac_lock);
@@ -732,24 +642,10 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
goto out;
}
- /* TX flow control may automatically turn itself off if the
- * link partner (intermittently) stops responding to pause
- * frames. There isn't any indication that this has happened,
- * so the best we do is leave it up to the user to spot this
- * and fix it be cycling transmit flow control on this end. */
- reset = (wanted_fc & EFX_FC_TX) && !(efx->wanted_fc & EFX_FC_TX);
- if (EFX_WORKAROUND_11482(efx) && reset) {
- if (efx_nic_rev(efx) == EFX_REV_FALCON_B0) {
- /* Recover by resetting the EM block */
- falcon_stop_nic_stats(efx);
- falcon_drain_tx_fifo(efx);
- falcon_reconfigure_xmac(efx);
- falcon_start_nic_stats(efx);
- } else {
- /* Schedule a reset to recover */
- efx_schedule_reset(efx, RESET_TYPE_INVISIBLE);
- }
- }
+ /* Hook for Falcon bug 11482 workaround */
+ if (efx->type->prepare_enable_fc_tx &&
+ (wanted_fc & EFX_FC_TX) && !(efx->wanted_fc & EFX_FC_TX))
+ efx->type->prepare_enable_fc_tx(efx);
old_adv = efx->link_advertising;
old_fc = efx->wanted_fc;
@@ -814,11 +710,12 @@ static int efx_ethtool_reset(struct net_device *net_dev, u32 *flags)
return efx_reset(efx, rc);
}
-/* MAC address mask including only MC flag */
-static const u8 mac_addr_mc_mask[ETH_ALEN] = { 0x01, 0, 0, 0, 0, 0 };
+/* MAC address mask including only I/G bit */
+static const u8 mac_addr_ig_mask[ETH_ALEN] = { 0x01, 0, 0, 0, 0, 0 };
#define IP4_ADDR_FULL_MASK ((__force __be32)~0)
#define PORT_FULL_MASK ((__force __be16)~0)
+#define ETHER_TYPE_FULL_MASK ((__force __be16)~0)
static int efx_ethtool_get_class_rule(struct efx_nic *efx,
struct ethtool_rx_flow_spec *rule)
@@ -828,8 +725,6 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx,
struct ethhdr *mac_entry = &rule->h_u.ether_spec;
struct ethhdr *mac_mask = &rule->m_u.ether_spec;
struct efx_filter_spec spec;
- u16 vid;
- u8 proto;
int rc;
rc = efx_filter_get_filter_safe(efx, EFX_FILTER_PRI_MANUAL,
@@ -837,44 +732,72 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx,
if (rc)
return rc;
- if (spec.dmaq_id == 0xfff)
+ if (spec.dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
rule->ring_cookie = RX_CLS_FLOW_DISC;
else
rule->ring_cookie = spec.dmaq_id;
- if (spec.type == EFX_FILTER_MC_DEF || spec.type == EFX_FILTER_UC_DEF) {
- rule->flow_type = ETHER_FLOW;
- memcpy(mac_mask->h_dest, mac_addr_mc_mask, ETH_ALEN);
- if (spec.type == EFX_FILTER_MC_DEF)
- memcpy(mac_entry->h_dest, mac_addr_mc_mask, ETH_ALEN);
- return 0;
- }
-
- rc = efx_filter_get_eth_local(&spec, &vid, mac_entry->h_dest);
- if (rc == 0) {
+ if ((spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) &&
+ spec.ether_type == htons(ETH_P_IP) &&
+ (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) &&
+ (spec.ip_proto == IPPROTO_TCP || spec.ip_proto == IPPROTO_UDP) &&
+ !(spec.match_flags &
+ ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST |
+ EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_REM_PORT))) {
+ rule->flow_type = ((spec.ip_proto == IPPROTO_TCP) ?
+ TCP_V4_FLOW : UDP_V4_FLOW);
+ if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) {
+ ip_entry->ip4dst = spec.loc_host[0];
+ ip_mask->ip4dst = IP4_ADDR_FULL_MASK;
+ }
+ if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) {
+ ip_entry->ip4src = spec.rem_host[0];
+ ip_mask->ip4src = IP4_ADDR_FULL_MASK;
+ }
+ if (spec.match_flags & EFX_FILTER_MATCH_LOC_PORT) {
+ ip_entry->pdst = spec.loc_port;
+ ip_mask->pdst = PORT_FULL_MASK;
+ }
+ if (spec.match_flags & EFX_FILTER_MATCH_REM_PORT) {
+ ip_entry->psrc = spec.rem_port;
+ ip_mask->psrc = PORT_FULL_MASK;
+ }
+ } else if (!(spec.match_flags &
+ ~(EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG |
+ EFX_FILTER_MATCH_REM_MAC | EFX_FILTER_MATCH_ETHER_TYPE |
+ EFX_FILTER_MATCH_OUTER_VID))) {
rule->flow_type = ETHER_FLOW;
- memset(mac_mask->h_dest, ~0, ETH_ALEN);
- if (vid != EFX_FILTER_VID_UNSPEC) {
- rule->flow_type |= FLOW_EXT;
- rule->h_ext.vlan_tci = htons(vid);
- rule->m_ext.vlan_tci = htons(0xfff);
+ if (spec.match_flags &
+ (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG)) {
+ memcpy(mac_entry->h_dest, spec.loc_mac, ETH_ALEN);
+ if (spec.match_flags & EFX_FILTER_MATCH_LOC_MAC)
+ memset(mac_mask->h_dest, ~0, ETH_ALEN);
+ else
+ memcpy(mac_mask->h_dest, mac_addr_ig_mask,
+ ETH_ALEN);
}
- return 0;
+ if (spec.match_flags & EFX_FILTER_MATCH_REM_MAC) {
+ memcpy(mac_entry->h_source, spec.rem_mac, ETH_ALEN);
+ memset(mac_mask->h_source, ~0, ETH_ALEN);
+ }
+ if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) {
+ mac_entry->h_proto = spec.ether_type;
+ mac_mask->h_proto = ETHER_TYPE_FULL_MASK;
+ }
+ } else {
+ /* The above should handle all filters that we insert */
+ WARN_ON(1);
+ return -EINVAL;
}
- rc = efx_filter_get_ipv4_local(&spec, &proto,
- &ip_entry->ip4dst, &ip_entry->pdst);
- if (rc != 0) {
- rc = efx_filter_get_ipv4_full(
- &spec, &proto, &ip_entry->ip4dst, &ip_entry->pdst,
- &ip_entry->ip4src, &ip_entry->psrc);
- EFX_WARN_ON_PARANOID(rc);
- ip_mask->ip4src = IP4_ADDR_FULL_MASK;
- ip_mask->psrc = PORT_FULL_MASK;
+ if (spec.match_flags & EFX_FILTER_MATCH_OUTER_VID) {
+ rule->flow_type |= FLOW_EXT;
+ rule->h_ext.vlan_tci = spec.outer_vid;
+ rule->m_ext.vlan_tci = htons(0xfff);
}
- rule->flow_type = (proto == IPPROTO_TCP) ? TCP_V4_FLOW : UDP_V4_FLOW;
- ip_mask->ip4dst = IP4_ADDR_FULL_MASK;
- ip_mask->pdst = PORT_FULL_MASK;
+
return rc;
}
@@ -982,82 +905,80 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx,
efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL,
efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
(rule->ring_cookie == RX_CLS_FLOW_DISC) ?
- 0xfff : rule->ring_cookie);
+ EFX_FILTER_RX_DMAQ_ID_DROP : rule->ring_cookie);
- switch (rule->flow_type) {
+ switch (rule->flow_type & ~FLOW_EXT) {
case TCP_V4_FLOW:
- case UDP_V4_FLOW: {
- u8 proto = (rule->flow_type == TCP_V4_FLOW ?
- IPPROTO_TCP : IPPROTO_UDP);
-
- /* Must match all of destination, */
- if (!(ip_mask->ip4dst == IP4_ADDR_FULL_MASK &&
- ip_mask->pdst == PORT_FULL_MASK))
- return -EINVAL;
- /* all or none of source, */
- if ((ip_mask->ip4src || ip_mask->psrc) &&
- !(ip_mask->ip4src == IP4_ADDR_FULL_MASK &&
- ip_mask->psrc == PORT_FULL_MASK))
- return -EINVAL;
- /* and nothing else */
- if (ip_mask->tos || rule->m_ext.vlan_tci)
+ case UDP_V4_FLOW:
+ spec.match_flags = (EFX_FILTER_MATCH_ETHER_TYPE |
+ EFX_FILTER_MATCH_IP_PROTO);
+ spec.ether_type = htons(ETH_P_IP);
+ spec.ip_proto = ((rule->flow_type & ~FLOW_EXT) == TCP_V4_FLOW ?
+ IPPROTO_TCP : IPPROTO_UDP);
+ if (ip_mask->ip4dst) {
+ if (ip_mask->ip4dst != IP4_ADDR_FULL_MASK)
+ return -EINVAL;
+ spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST;
+ spec.loc_host[0] = ip_entry->ip4dst;
+ }
+ if (ip_mask->ip4src) {
+ if (ip_mask->ip4src != IP4_ADDR_FULL_MASK)
+ return -EINVAL;
+ spec.match_flags |= EFX_FILTER_MATCH_REM_HOST;
+ spec.rem_host[0] = ip_entry->ip4src;
+ }
+ if (ip_mask->pdst) {
+ if (ip_mask->pdst != PORT_FULL_MASK)
+ return -EINVAL;
+ spec.match_flags |= EFX_FILTER_MATCH_LOC_PORT;
+ spec.loc_port = ip_entry->pdst;
+ }
+ if (ip_mask->psrc) {
+ if (ip_mask->psrc != PORT_FULL_MASK)
+ return -EINVAL;
+ spec.match_flags |= EFX_FILTER_MATCH_REM_PORT;
+ spec.rem_port = ip_entry->psrc;
+ }
+ if (ip_mask->tos)
return -EINVAL;
-
- if (ip_mask->ip4src)
- rc = efx_filter_set_ipv4_full(&spec, proto,
- ip_entry->ip4dst,
- ip_entry->pdst,
- ip_entry->ip4src,
- ip_entry->psrc);
- else
- rc = efx_filter_set_ipv4_local(&spec, proto,
- ip_entry->ip4dst,
- ip_entry->pdst);
- if (rc)
- return rc;
break;
- }
-
- case ETHER_FLOW | FLOW_EXT:
- case ETHER_FLOW: {
- u16 vlan_tag_mask = (rule->flow_type & FLOW_EXT ?
- ntohs(rule->m_ext.vlan_tci) : 0);
-
- /* Must not match on source address or Ethertype */
- if (!is_zero_ether_addr(mac_mask->h_source) ||
- mac_mask->h_proto)
- return -EINVAL;
- /* Is it a default UC or MC filter? */
- if (ether_addr_equal(mac_mask->h_dest, mac_addr_mc_mask) &&
- vlan_tag_mask == 0) {
- if (is_multicast_ether_addr(mac_entry->h_dest))
- rc = efx_filter_set_mc_def(&spec);
+ case ETHER_FLOW:
+ if (!is_zero_ether_addr(mac_mask->h_dest)) {
+ if (ether_addr_equal(mac_mask->h_dest,
+ mac_addr_ig_mask))
+ spec.match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG;
+ else if (is_broadcast_ether_addr(mac_mask->h_dest))
+ spec.match_flags |= EFX_FILTER_MATCH_LOC_MAC;
else
- rc = efx_filter_set_uc_def(&spec);
+ return -EINVAL;
+ memcpy(spec.loc_mac, mac_entry->h_dest, ETH_ALEN);
}
- /* Otherwise, it must match all of destination and all
- * or none of VID.
- */
- else if (is_broadcast_ether_addr(mac_mask->h_dest) &&
- (vlan_tag_mask == 0xfff || vlan_tag_mask == 0)) {
- rc = efx_filter_set_eth_local(
- &spec,
- vlan_tag_mask ?
- ntohs(rule->h_ext.vlan_tci) : EFX_FILTER_VID_UNSPEC,
- mac_entry->h_dest);
- } else {
- rc = -EINVAL;
+ if (!is_zero_ether_addr(mac_mask->h_source)) {
+ if (!is_broadcast_ether_addr(mac_mask->h_source))
+ return -EINVAL;
+ spec.match_flags |= EFX_FILTER_MATCH_REM_MAC;
+ memcpy(spec.rem_mac, mac_entry->h_source, ETH_ALEN);
+ }
+ if (mac_mask->h_proto) {
+ if (mac_mask->h_proto != ETHER_TYPE_FULL_MASK)
+ return -EINVAL;
+ spec.match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
+ spec.ether_type = mac_entry->h_proto;
}
- if (rc)
- return rc;
break;
- }
default:
return -EINVAL;
}
+ if ((rule->flow_type & FLOW_EXT) && rule->m_ext.vlan_tci) {
+ if (rule->m_ext.vlan_tci != htons(0xfff))
+ return -EINVAL;
+ spec.match_flags |= EFX_FILTER_MATCH_OUTER_VID;
+ spec.outer_vid = rule->h_ext.vlan_tci;
+ }
+
rc = efx_filter_insert_filter(efx, &spec, true);
if (rc < 0)
return rc;
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index 71998e7995d9..ff5d322b9b49 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -1,7 +1,7 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2010 Solarflare Communications Inc.
+ * Copyright 2006-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -19,17 +19,284 @@
#include "net_driver.h"
#include "bitfield.h"
#include "efx.h"
-#include "spi.h"
#include "nic.h"
-#include "regs.h"
+#include "farch_regs.h"
#include "io.h"
#include "phy.h"
#include "workarounds.h"
#include "selftest.h"
+#include "mdio_10g.h"
/* Hardware control for SFC4000 (aka Falcon). */
+/**************************************************************************
+ *
+ * NIC stats
+ *
+ **************************************************************************
+ */
+
+#define FALCON_MAC_STATS_SIZE 0x100
+
+#define XgRxOctets_offset 0x0
+#define XgRxOctets_WIDTH 48
+#define XgRxOctetsOK_offset 0x8
+#define XgRxOctetsOK_WIDTH 48
+#define XgRxPkts_offset 0x10
+#define XgRxPkts_WIDTH 32
+#define XgRxPktsOK_offset 0x14
+#define XgRxPktsOK_WIDTH 32
+#define XgRxBroadcastPkts_offset 0x18
+#define XgRxBroadcastPkts_WIDTH 32
+#define XgRxMulticastPkts_offset 0x1C
+#define XgRxMulticastPkts_WIDTH 32
+#define XgRxUnicastPkts_offset 0x20
+#define XgRxUnicastPkts_WIDTH 32
+#define XgRxUndersizePkts_offset 0x24
+#define XgRxUndersizePkts_WIDTH 32
+#define XgRxOversizePkts_offset 0x28
+#define XgRxOversizePkts_WIDTH 32
+#define XgRxJabberPkts_offset 0x2C
+#define XgRxJabberPkts_WIDTH 32
+#define XgRxUndersizeFCSerrorPkts_offset 0x30
+#define XgRxUndersizeFCSerrorPkts_WIDTH 32
+#define XgRxDropEvents_offset 0x34
+#define XgRxDropEvents_WIDTH 32
+#define XgRxFCSerrorPkts_offset 0x38
+#define XgRxFCSerrorPkts_WIDTH 32
+#define XgRxAlignError_offset 0x3C
+#define XgRxAlignError_WIDTH 32
+#define XgRxSymbolError_offset 0x40
+#define XgRxSymbolError_WIDTH 32
+#define XgRxInternalMACError_offset 0x44
+#define XgRxInternalMACError_WIDTH 32
+#define XgRxControlPkts_offset 0x48
+#define XgRxControlPkts_WIDTH 32
+#define XgRxPausePkts_offset 0x4C
+#define XgRxPausePkts_WIDTH 32
+#define XgRxPkts64Octets_offset 0x50
+#define XgRxPkts64Octets_WIDTH 32
+#define XgRxPkts65to127Octets_offset 0x54
+#define XgRxPkts65to127Octets_WIDTH 32
+#define XgRxPkts128to255Octets_offset 0x58
+#define XgRxPkts128to255Octets_WIDTH 32
+#define XgRxPkts256to511Octets_offset 0x5C
+#define XgRxPkts256to511Octets_WIDTH 32
+#define XgRxPkts512to1023Octets_offset 0x60
+#define XgRxPkts512to1023Octets_WIDTH 32
+#define XgRxPkts1024to15xxOctets_offset 0x64
+#define XgRxPkts1024to15xxOctets_WIDTH 32
+#define XgRxPkts15xxtoMaxOctets_offset 0x68
+#define XgRxPkts15xxtoMaxOctets_WIDTH 32
+#define XgRxLengthError_offset 0x6C
+#define XgRxLengthError_WIDTH 32
+#define XgTxPkts_offset 0x80
+#define XgTxPkts_WIDTH 32
+#define XgTxOctets_offset 0x88
+#define XgTxOctets_WIDTH 48
+#define XgTxMulticastPkts_offset 0x90
+#define XgTxMulticastPkts_WIDTH 32
+#define XgTxBroadcastPkts_offset 0x94
+#define XgTxBroadcastPkts_WIDTH 32
+#define XgTxUnicastPkts_offset 0x98
+#define XgTxUnicastPkts_WIDTH 32
+#define XgTxControlPkts_offset 0x9C
+#define XgTxControlPkts_WIDTH 32
+#define XgTxPausePkts_offset 0xA0
+#define XgTxPausePkts_WIDTH 32
+#define XgTxPkts64Octets_offset 0xA4
+#define XgTxPkts64Octets_WIDTH 32
+#define XgTxPkts65to127Octets_offset 0xA8
+#define XgTxPkts65to127Octets_WIDTH 32
+#define XgTxPkts128to255Octets_offset 0xAC
+#define XgTxPkts128to255Octets_WIDTH 32
+#define XgTxPkts256to511Octets_offset 0xB0
+#define XgTxPkts256to511Octets_WIDTH 32
+#define XgTxPkts512to1023Octets_offset 0xB4
+#define XgTxPkts512to1023Octets_WIDTH 32
+#define XgTxPkts1024to15xxOctets_offset 0xB8
+#define XgTxPkts1024to15xxOctets_WIDTH 32
+#define XgTxPkts1519toMaxOctets_offset 0xBC
+#define XgTxPkts1519toMaxOctets_WIDTH 32
+#define XgTxUndersizePkts_offset 0xC0
+#define XgTxUndersizePkts_WIDTH 32
+#define XgTxOversizePkts_offset 0xC4
+#define XgTxOversizePkts_WIDTH 32
+#define XgTxNonTcpUdpPkt_offset 0xC8
+#define XgTxNonTcpUdpPkt_WIDTH 16
+#define XgTxMacSrcErrPkt_offset 0xCC
+#define XgTxMacSrcErrPkt_WIDTH 16
+#define XgTxIpSrcErrPkt_offset 0xD0
+#define XgTxIpSrcErrPkt_WIDTH 16
+#define XgDmaDone_offset 0xD4
+#define XgDmaDone_WIDTH 32
+
+#define FALCON_XMAC_STATS_DMA_FLAG(efx) \
+ (*(u32 *)((efx)->stats_buffer.addr + XgDmaDone_offset))
+
+#define FALCON_DMA_STAT(ext_name, hw_name) \
+ [FALCON_STAT_ ## ext_name] = \
+ { #ext_name, \
+ /* 48-bit stats are zero-padded to 64 on DMA */ \
+ hw_name ## _ ## WIDTH == 48 ? 64 : hw_name ## _ ## WIDTH, \
+ hw_name ## _ ## offset }
+#define FALCON_OTHER_STAT(ext_name) \
+ [FALCON_STAT_ ## ext_name] = { #ext_name, 0, 0 }
+
+static const struct efx_hw_stat_desc falcon_stat_desc[FALCON_STAT_COUNT] = {
+ FALCON_DMA_STAT(tx_bytes, XgTxOctets),
+ FALCON_DMA_STAT(tx_packets, XgTxPkts),
+ FALCON_DMA_STAT(tx_pause, XgTxPausePkts),
+ FALCON_DMA_STAT(tx_control, XgTxControlPkts),
+ FALCON_DMA_STAT(tx_unicast, XgTxUnicastPkts),
+ FALCON_DMA_STAT(tx_multicast, XgTxMulticastPkts),
+ FALCON_DMA_STAT(tx_broadcast, XgTxBroadcastPkts),
+ FALCON_DMA_STAT(tx_lt64, XgTxUndersizePkts),
+ FALCON_DMA_STAT(tx_64, XgTxPkts64Octets),
+ FALCON_DMA_STAT(tx_65_to_127, XgTxPkts65to127Octets),
+ FALCON_DMA_STAT(tx_128_to_255, XgTxPkts128to255Octets),
+ FALCON_DMA_STAT(tx_256_to_511, XgTxPkts256to511Octets),
+ FALCON_DMA_STAT(tx_512_to_1023, XgTxPkts512to1023Octets),
+ FALCON_DMA_STAT(tx_1024_to_15xx, XgTxPkts1024to15xxOctets),
+ FALCON_DMA_STAT(tx_15xx_to_jumbo, XgTxPkts1519toMaxOctets),
+ FALCON_DMA_STAT(tx_gtjumbo, XgTxOversizePkts),
+ FALCON_DMA_STAT(tx_non_tcpudp, XgTxNonTcpUdpPkt),
+ FALCON_DMA_STAT(tx_mac_src_error, XgTxMacSrcErrPkt),
+ FALCON_DMA_STAT(tx_ip_src_error, XgTxIpSrcErrPkt),
+ FALCON_DMA_STAT(rx_bytes, XgRxOctets),
+ FALCON_DMA_STAT(rx_good_bytes, XgRxOctetsOK),
+ FALCON_OTHER_STAT(rx_bad_bytes),
+ FALCON_DMA_STAT(rx_packets, XgRxPkts),
+ FALCON_DMA_STAT(rx_good, XgRxPktsOK),
+ FALCON_DMA_STAT(rx_bad, XgRxFCSerrorPkts),
+ FALCON_DMA_STAT(rx_pause, XgRxPausePkts),
+ FALCON_DMA_STAT(rx_control, XgRxControlPkts),
+ FALCON_DMA_STAT(rx_unicast, XgRxUnicastPkts),
+ FALCON_DMA_STAT(rx_multicast, XgRxMulticastPkts),
+ FALCON_DMA_STAT(rx_broadcast, XgRxBroadcastPkts),
+ FALCON_DMA_STAT(rx_lt64, XgRxUndersizePkts),
+ FALCON_DMA_STAT(rx_64, XgRxPkts64Octets),
+ FALCON_DMA_STAT(rx_65_to_127, XgRxPkts65to127Octets),
+ FALCON_DMA_STAT(rx_128_to_255, XgRxPkts128to255Octets),
+ FALCON_DMA_STAT(rx_256_to_511, XgRxPkts256to511Octets),
+ FALCON_DMA_STAT(rx_512_to_1023, XgRxPkts512to1023Octets),
+ FALCON_DMA_STAT(rx_1024_to_15xx, XgRxPkts1024to15xxOctets),
+ FALCON_DMA_STAT(rx_15xx_to_jumbo, XgRxPkts15xxtoMaxOctets),
+ FALCON_DMA_STAT(rx_gtjumbo, XgRxOversizePkts),
+ FALCON_DMA_STAT(rx_bad_lt64, XgRxUndersizeFCSerrorPkts),
+ FALCON_DMA_STAT(rx_bad_gtjumbo, XgRxJabberPkts),
+ FALCON_DMA_STAT(rx_overflow, XgRxDropEvents),
+ FALCON_DMA_STAT(rx_symbol_error, XgRxSymbolError),
+ FALCON_DMA_STAT(rx_align_error, XgRxAlignError),
+ FALCON_DMA_STAT(rx_length_error, XgRxLengthError),
+ FALCON_DMA_STAT(rx_internal_error, XgRxInternalMACError),
+ FALCON_OTHER_STAT(rx_nodesc_drop_cnt),
+};
+static const unsigned long falcon_stat_mask[] = {
+ [0 ... BITS_TO_LONGS(FALCON_STAT_COUNT) - 1] = ~0UL,
+};
+
+/**************************************************************************
+ *
+ * Basic SPI command set and bit definitions
+ *
+ *************************************************************************/
+
+#define SPI_WRSR 0x01 /* Write status register */
+#define SPI_WRITE 0x02 /* Write data to memory array */
+#define SPI_READ 0x03 /* Read data from memory array */
+#define SPI_WRDI 0x04 /* Reset write enable latch */
+#define SPI_RDSR 0x05 /* Read status register */
+#define SPI_WREN 0x06 /* Set write enable latch */
+#define SPI_SST_EWSR 0x50 /* SST: Enable write to status register */
+
+#define SPI_STATUS_WPEN 0x80 /* Write-protect pin enabled */
+#define SPI_STATUS_BP2 0x10 /* Block protection bit 2 */
+#define SPI_STATUS_BP1 0x08 /* Block protection bit 1 */
+#define SPI_STATUS_BP0 0x04 /* Block protection bit 0 */
+#define SPI_STATUS_WEN 0x02 /* State of the write enable latch */
+#define SPI_STATUS_NRDY 0x01 /* Device busy flag */
+
+/**************************************************************************
+ *
+ * Non-volatile memory layout
+ *
+ **************************************************************************
+ */
+
+/* SFC4000 flash is partitioned into:
+ * 0-0x400 chip and board config (see struct falcon_nvconfig)
+ * 0x400-0x8000 unused (or may contain VPD if EEPROM not present)
+ * 0x8000-end boot code (mapped to PCI expansion ROM)
+ * SFC4000 small EEPROM (size < 0x400) is used for VPD only.
+ * SFC4000 large EEPROM (size >= 0x400) is partitioned into:
+ * 0-0x400 chip and board config
+ * configurable VPD
+ * 0x800-0x1800 boot config
+ * Aside from the chip and board config, all of these are optional and may
+ * be absent or truncated depending on the devices used.
+ */
+#define FALCON_NVCONFIG_END 0x400U
+#define FALCON_FLASH_BOOTCODE_START 0x8000U
+#define FALCON_EEPROM_BOOTCONFIG_START 0x800U
+#define FALCON_EEPROM_BOOTCONFIG_END 0x1800U
+
+/* Board configuration v2 (v1 is obsolete; later versions are compatible) */
+struct falcon_nvconfig_board_v2 {
+ __le16 nports;
+ u8 port0_phy_addr;
+ u8 port0_phy_type;
+ u8 port1_phy_addr;
+ u8 port1_phy_type;
+ __le16 asic_sub_revision;
+ __le16 board_revision;
+} __packed;
+
+/* Board configuration v3 extra information */
+struct falcon_nvconfig_board_v3 {
+ __le32 spi_device_type[2];
+} __packed;
+
+/* Bit numbers for spi_device_type */
+#define SPI_DEV_TYPE_SIZE_LBN 0
+#define SPI_DEV_TYPE_SIZE_WIDTH 5
+#define SPI_DEV_TYPE_ADDR_LEN_LBN 6
+#define SPI_DEV_TYPE_ADDR_LEN_WIDTH 2
+#define SPI_DEV_TYPE_ERASE_CMD_LBN 8
+#define SPI_DEV_TYPE_ERASE_CMD_WIDTH 8
+#define SPI_DEV_TYPE_ERASE_SIZE_LBN 16
+#define SPI_DEV_TYPE_ERASE_SIZE_WIDTH 5
+#define SPI_DEV_TYPE_BLOCK_SIZE_LBN 24
+#define SPI_DEV_TYPE_BLOCK_SIZE_WIDTH 5
+#define SPI_DEV_TYPE_FIELD(type, field) \
+ (((type) >> EFX_LOW_BIT(field)) & EFX_MASK32(EFX_WIDTH(field)))
+
+#define FALCON_NVCONFIG_OFFSET 0x300
+
+#define FALCON_NVCONFIG_BOARD_MAGIC_NUM 0xFA1C
+struct falcon_nvconfig {
+ efx_oword_t ee_vpd_cfg_reg; /* 0x300 */
+ u8 mac_address[2][8]; /* 0x310 */
+ efx_oword_t pcie_sd_ctl0123_reg; /* 0x320 */
+ efx_oword_t pcie_sd_ctl45_reg; /* 0x330 */
+ efx_oword_t pcie_pcs_ctl_stat_reg; /* 0x340 */
+ efx_oword_t hw_init_reg; /* 0x350 */
+ efx_oword_t nic_stat_reg; /* 0x360 */
+ efx_oword_t glb_ctl_reg; /* 0x370 */
+ efx_oword_t srm_cfg_reg; /* 0x380 */
+ efx_oword_t spare_reg; /* 0x390 */
+ __le16 board_magic_num; /* 0x3A0 */
+ __le16 board_struct_ver;
+ __le16 board_checksum;
+ struct falcon_nvconfig_board_v2 board_v2;
+ efx_oword_t ee_base_page_reg; /* 0x3B0 */
+ struct falcon_nvconfig_board_v3 board_v3; /* 0x3C0 */
+} __packed;
+
+/*************************************************************************/
+
static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method);
+static void falcon_reconfigure_mac_wrapper(struct efx_nic *efx);
static const unsigned int
/* "Large" EEPROM device: Atmel AT25640 or similar
@@ -146,7 +413,7 @@ static void falcon_prepare_flush(struct efx_nic *efx)
*
* NB most hardware supports MSI interrupts
*/
-inline void falcon_irq_ack_a1(struct efx_nic *efx)
+static inline void falcon_irq_ack_a1(struct efx_nic *efx)
{
efx_dword_t reg;
@@ -156,7 +423,7 @@ inline void falcon_irq_ack_a1(struct efx_nic *efx)
}
-irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
+static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
{
struct efx_nic *efx = dev_id;
efx_oword_t *int_ker = efx->irq_status.addr;
@@ -177,10 +444,13 @@ irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
"IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
+ if (!likely(ACCESS_ONCE(efx->irq_soft_enabled)))
+ return IRQ_HANDLED;
+
/* Check to see if we have a serious error condition */
syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
if (unlikely(syserr))
- return efx_nic_fatal_interrupt(efx);
+ return efx_farch_fatal_interrupt(efx);
/* Determine interrupting queues, clear interrupt status
* register and acknowledge the device interrupt.
@@ -241,9 +511,10 @@ static int falcon_spi_wait(struct efx_nic *efx)
}
}
-int falcon_spi_cmd(struct efx_nic *efx, const struct efx_spi_device *spi,
- unsigned int command, int address,
- const void *in, void *out, size_t len)
+static int
+falcon_spi_cmd(struct efx_nic *efx, const struct falcon_spi_device *spi,
+ unsigned int command, int address,
+ const void *in, void *out, size_t len)
{
bool addressed = (address >= 0);
bool reading = (out != NULL);
@@ -297,48 +568,16 @@ int falcon_spi_cmd(struct efx_nic *efx, const struct efx_spi_device *spi,
return 0;
}
-static size_t
-falcon_spi_write_limit(const struct efx_spi_device *spi, size_t start)
-{
- return min(FALCON_SPI_MAX_LEN,
- (spi->block_size - (start & (spi->block_size - 1))));
-}
-
static inline u8
-efx_spi_munge_command(const struct efx_spi_device *spi,
- const u8 command, const unsigned int address)
+falcon_spi_munge_command(const struct falcon_spi_device *spi,
+ const u8 command, const unsigned int address)
{
return command | (((address >> 8) & spi->munge_address) << 3);
}
-/* Wait up to 10 ms for buffered write completion */
-int
-falcon_spi_wait_write(struct efx_nic *efx, const struct efx_spi_device *spi)
-{
- unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 100);
- u8 status;
- int rc;
-
- for (;;) {
- rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
- &status, sizeof(status));
- if (rc)
- return rc;
- if (!(status & SPI_STATUS_NRDY))
- return 0;
- if (time_after_eq(jiffies, timeout)) {
- netif_err(efx, hw, efx->net_dev,
- "SPI write timeout on device %d"
- " last status=0x%02x\n",
- spi->device_id, status);
- return -ETIMEDOUT;
- }
- schedule_timeout_uninterruptible(1);
- }
-}
-
-int falcon_spi_read(struct efx_nic *efx, const struct efx_spi_device *spi,
- loff_t start, size_t len, size_t *retlen, u8 *buffer)
+static int
+falcon_spi_read(struct efx_nic *efx, const struct falcon_spi_device *spi,
+ loff_t start, size_t len, size_t *retlen, u8 *buffer)
{
size_t block_len, pos = 0;
unsigned int command;
@@ -347,7 +586,7 @@ int falcon_spi_read(struct efx_nic *efx, const struct efx_spi_device *spi,
while (pos < len) {
block_len = min(len - pos, FALCON_SPI_MAX_LEN);
- command = efx_spi_munge_command(spi, SPI_READ, start + pos);
+ command = falcon_spi_munge_command(spi, SPI_READ, start + pos);
rc = falcon_spi_cmd(efx, spi, command, start + pos, NULL,
buffer + pos, block_len);
if (rc)
@@ -367,8 +606,52 @@ int falcon_spi_read(struct efx_nic *efx, const struct efx_spi_device *spi,
return rc;
}
-int
-falcon_spi_write(struct efx_nic *efx, const struct efx_spi_device *spi,
+#ifdef CONFIG_SFC_MTD
+
+struct falcon_mtd_partition {
+ struct efx_mtd_partition common;
+ const struct falcon_spi_device *spi;
+ size_t offset;
+};
+
+#define to_falcon_mtd_partition(mtd) \
+ container_of(mtd, struct falcon_mtd_partition, common.mtd)
+
+static size_t
+falcon_spi_write_limit(const struct falcon_spi_device *spi, size_t start)
+{
+ return min(FALCON_SPI_MAX_LEN,
+ (spi->block_size - (start & (spi->block_size - 1))));
+}
+
+/* Wait up to 10 ms for buffered write completion */
+static int
+falcon_spi_wait_write(struct efx_nic *efx, const struct falcon_spi_device *spi)
+{
+ unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 100);
+ u8 status;
+ int rc;
+
+ for (;;) {
+ rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
+ &status, sizeof(status));
+ if (rc)
+ return rc;
+ if (!(status & SPI_STATUS_NRDY))
+ return 0;
+ if (time_after_eq(jiffies, timeout)) {
+ netif_err(efx, hw, efx->net_dev,
+ "SPI write timeout on device %d"
+ " last status=0x%02x\n",
+ spi->device_id, status);
+ return -ETIMEDOUT;
+ }
+ schedule_timeout_uninterruptible(1);
+ }
+}
+
+static int
+falcon_spi_write(struct efx_nic *efx, const struct falcon_spi_device *spi,
loff_t start, size_t len, size_t *retlen, const u8 *buffer)
{
u8 verify_buffer[FALCON_SPI_MAX_LEN];
@@ -383,7 +666,7 @@ falcon_spi_write(struct efx_nic *efx, const struct efx_spi_device *spi,
block_len = min(len - pos,
falcon_spi_write_limit(spi, start + pos));
- command = efx_spi_munge_command(spi, SPI_WRITE, start + pos);
+ command = falcon_spi_munge_command(spi, SPI_WRITE, start + pos);
rc = falcon_spi_cmd(efx, spi, command, start + pos,
buffer + pos, NULL, block_len);
if (rc)
@@ -393,7 +676,7 @@ falcon_spi_write(struct efx_nic *efx, const struct efx_spi_device *spi,
if (rc)
break;
- command = efx_spi_munge_command(spi, SPI_READ, start + pos);
+ command = falcon_spi_munge_command(spi, SPI_READ, start + pos);
rc = falcon_spi_cmd(efx, spi, command, start + pos,
NULL, verify_buffer, block_len);
if (memcmp(verify_buffer, buffer + pos, block_len)) {
@@ -416,6 +699,520 @@ falcon_spi_write(struct efx_nic *efx, const struct efx_spi_device *spi,
return rc;
}
+static int
+falcon_spi_slow_wait(struct falcon_mtd_partition *part, bool uninterruptible)
+{
+ const struct falcon_spi_device *spi = part->spi;
+ struct efx_nic *efx = part->common.mtd.priv;
+ u8 status;
+ int rc, i;
+
+ /* Wait up to 4s for flash/EEPROM to finish a slow operation. */
+ for (i = 0; i < 40; i++) {
+ __set_current_state(uninterruptible ?
+ TASK_UNINTERRUPTIBLE : TASK_INTERRUPTIBLE);
+ schedule_timeout(HZ / 10);
+ rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
+ &status, sizeof(status));
+ if (rc)
+ return rc;
+ if (!(status & SPI_STATUS_NRDY))
+ return 0;
+ if (signal_pending(current))
+ return -EINTR;
+ }
+ pr_err("%s: timed out waiting for %s\n",
+ part->common.name, part->common.dev_type_name);
+ return -ETIMEDOUT;
+}
+
+static int
+falcon_spi_unlock(struct efx_nic *efx, const struct falcon_spi_device *spi)
+{
+ const u8 unlock_mask = (SPI_STATUS_BP2 | SPI_STATUS_BP1 |
+ SPI_STATUS_BP0);
+ u8 status;
+ int rc;
+
+ rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
+ &status, sizeof(status));
+ if (rc)
+ return rc;
+
+ if (!(status & unlock_mask))
+ return 0; /* already unlocked */
+
+ rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
+ if (rc)
+ return rc;
+ rc = falcon_spi_cmd(efx, spi, SPI_SST_EWSR, -1, NULL, NULL, 0);
+ if (rc)
+ return rc;
+
+ status &= ~unlock_mask;
+ rc = falcon_spi_cmd(efx, spi, SPI_WRSR, -1, &status,
+ NULL, sizeof(status));
+ if (rc)
+ return rc;
+ rc = falcon_spi_wait_write(efx, spi);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+#define FALCON_SPI_VERIFY_BUF_LEN 16
+
+static int
+falcon_spi_erase(struct falcon_mtd_partition *part, loff_t start, size_t len)
+{
+ const struct falcon_spi_device *spi = part->spi;
+ struct efx_nic *efx = part->common.mtd.priv;
+ unsigned pos, block_len;
+ u8 empty[FALCON_SPI_VERIFY_BUF_LEN];
+ u8 buffer[FALCON_SPI_VERIFY_BUF_LEN];
+ int rc;
+
+ if (len != spi->erase_size)
+ return -EINVAL;
+
+ if (spi->erase_command == 0)
+ return -EOPNOTSUPP;
+
+ rc = falcon_spi_unlock(efx, spi);
+ if (rc)
+ return rc;
+ rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
+ if (rc)
+ return rc;
+ rc = falcon_spi_cmd(efx, spi, spi->erase_command, start, NULL,
+ NULL, 0);
+ if (rc)
+ return rc;
+ rc = falcon_spi_slow_wait(part, false);
+
+ /* Verify the entire region has been wiped */
+ memset(empty, 0xff, sizeof(empty));
+ for (pos = 0; pos < len; pos += block_len) {
+ block_len = min(len - pos, sizeof(buffer));
+ rc = falcon_spi_read(efx, spi, start + pos, block_len,
+ NULL, buffer);
+ if (rc)
+ return rc;
+ if (memcmp(empty, buffer, block_len))
+ return -EIO;
+
+ /* Avoid locking up the system */
+ cond_resched();
+ if (signal_pending(current))
+ return -EINTR;
+ }
+
+ return rc;
+}
+
+static void falcon_mtd_rename(struct efx_mtd_partition *part)
+{
+ struct efx_nic *efx = part->mtd.priv;
+
+ snprintf(part->name, sizeof(part->name), "%s %s",
+ efx->name, part->type_name);
+}
+
+static int falcon_mtd_read(struct mtd_info *mtd, loff_t start,
+ size_t len, size_t *retlen, u8 *buffer)
+{
+ struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd);
+ struct efx_nic *efx = mtd->priv;
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ rc = mutex_lock_interruptible(&nic_data->spi_lock);
+ if (rc)
+ return rc;
+ rc = falcon_spi_read(efx, part->spi, part->offset + start,
+ len, retlen, buffer);
+ mutex_unlock(&nic_data->spi_lock);
+ return rc;
+}
+
+static int falcon_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
+{
+ struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd);
+ struct efx_nic *efx = mtd->priv;
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ rc = mutex_lock_interruptible(&nic_data->spi_lock);
+ if (rc)
+ return rc;
+ rc = falcon_spi_erase(part, part->offset + start, len);
+ mutex_unlock(&nic_data->spi_lock);
+ return rc;
+}
+
+static int falcon_mtd_write(struct mtd_info *mtd, loff_t start,
+ size_t len, size_t *retlen, const u8 *buffer)
+{
+ struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd);
+ struct efx_nic *efx = mtd->priv;
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ rc = mutex_lock_interruptible(&nic_data->spi_lock);
+ if (rc)
+ return rc;
+ rc = falcon_spi_write(efx, part->spi, part->offset + start,
+ len, retlen, buffer);
+ mutex_unlock(&nic_data->spi_lock);
+ return rc;
+}
+
+static int falcon_mtd_sync(struct mtd_info *mtd)
+{
+ struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd);
+ struct efx_nic *efx = mtd->priv;
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ mutex_lock(&nic_data->spi_lock);
+ rc = falcon_spi_slow_wait(part, true);
+ mutex_unlock(&nic_data->spi_lock);
+ return rc;
+}
+
+static int falcon_mtd_probe(struct efx_nic *efx)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ struct falcon_mtd_partition *parts;
+ struct falcon_spi_device *spi;
+ size_t n_parts;
+ int rc = -ENODEV;
+
+ ASSERT_RTNL();
+
+ /* Allocate space for maximum number of partitions */
+ parts = kcalloc(2, sizeof(*parts), GFP_KERNEL);
+ if (!parts)
+ return -ENOMEM;
+ n_parts = 0;
+
+ spi = &nic_data->spi_flash;
+ if (falcon_spi_present(spi) && spi->size > FALCON_FLASH_BOOTCODE_START) {
+ parts[n_parts].spi = spi;
+ parts[n_parts].offset = FALCON_FLASH_BOOTCODE_START;
+ parts[n_parts].common.dev_type_name = "flash";
+ parts[n_parts].common.type_name = "sfc_flash_bootrom";
+ parts[n_parts].common.mtd.type = MTD_NORFLASH;
+ parts[n_parts].common.mtd.flags = MTD_CAP_NORFLASH;
+ parts[n_parts].common.mtd.size = spi->size - FALCON_FLASH_BOOTCODE_START;
+ parts[n_parts].common.mtd.erasesize = spi->erase_size;
+ n_parts++;
+ }
+
+ spi = &nic_data->spi_eeprom;
+ if (falcon_spi_present(spi) && spi->size > FALCON_EEPROM_BOOTCONFIG_START) {
+ parts[n_parts].spi = spi;
+ parts[n_parts].offset = FALCON_EEPROM_BOOTCONFIG_START;
+ parts[n_parts].common.dev_type_name = "EEPROM";
+ parts[n_parts].common.type_name = "sfc_bootconfig";
+ parts[n_parts].common.mtd.type = MTD_RAM;
+ parts[n_parts].common.mtd.flags = MTD_CAP_RAM;
+ parts[n_parts].common.mtd.size =
+ min(spi->size, FALCON_EEPROM_BOOTCONFIG_END) -
+ FALCON_EEPROM_BOOTCONFIG_START;
+ parts[n_parts].common.mtd.erasesize = spi->erase_size;
+ n_parts++;
+ }
+
+ rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
+ if (rc)
+ kfree(parts);
+ return rc;
+}
+
+#endif /* CONFIG_SFC_MTD */
+
+/**************************************************************************
+ *
+ * XMAC operations
+ *
+ **************************************************************************
+ */
+
+/* Configure the XAUI driver that is an output from Falcon */
+static void falcon_setup_xaui(struct efx_nic *efx)
+{
+ efx_oword_t sdctl, txdrv;
+
+ /* Move the XAUI into low power, unless there is no PHY, in
+ * which case the XAUI will have to drive a cable. */
+ if (efx->phy_type == PHY_TYPE_NONE)
+ return;
+
+ efx_reado(efx, &sdctl, FR_AB_XX_SD_CTL);
+ EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVD, FFE_AB_XX_SD_CTL_DRV_DEF);
+ EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVD, FFE_AB_XX_SD_CTL_DRV_DEF);
+ EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVC, FFE_AB_XX_SD_CTL_DRV_DEF);
+ EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVC, FFE_AB_XX_SD_CTL_DRV_DEF);
+ EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVB, FFE_AB_XX_SD_CTL_DRV_DEF);
+ EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVB, FFE_AB_XX_SD_CTL_DRV_DEF);
+ EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVA, FFE_AB_XX_SD_CTL_DRV_DEF);
+ EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVA, FFE_AB_XX_SD_CTL_DRV_DEF);
+ efx_writeo(efx, &sdctl, FR_AB_XX_SD_CTL);
+
+ EFX_POPULATE_OWORD_8(txdrv,
+ FRF_AB_XX_DEQD, FFE_AB_XX_TXDRV_DEQ_DEF,
+ FRF_AB_XX_DEQC, FFE_AB_XX_TXDRV_DEQ_DEF,
+ FRF_AB_XX_DEQB, FFE_AB_XX_TXDRV_DEQ_DEF,
+ FRF_AB_XX_DEQA, FFE_AB_XX_TXDRV_DEQ_DEF,
+ FRF_AB_XX_DTXD, FFE_AB_XX_TXDRV_DTX_DEF,
+ FRF_AB_XX_DTXC, FFE_AB_XX_TXDRV_DTX_DEF,
+ FRF_AB_XX_DTXB, FFE_AB_XX_TXDRV_DTX_DEF,
+ FRF_AB_XX_DTXA, FFE_AB_XX_TXDRV_DTX_DEF);
+ efx_writeo(efx, &txdrv, FR_AB_XX_TXDRV_CTL);
+}
+
+int falcon_reset_xaui(struct efx_nic *efx)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ efx_oword_t reg;
+ int count;
+
+ /* Don't fetch MAC statistics over an XMAC reset */
+ WARN_ON(nic_data->stats_disable_count == 0);
+
+ /* Start reset sequence */
+ EFX_POPULATE_OWORD_1(reg, FRF_AB_XX_RST_XX_EN, 1);
+ efx_writeo(efx, &reg, FR_AB_XX_PWR_RST);
+
+ /* Wait up to 10 ms for completion, then reinitialise */
+ for (count = 0; count < 1000; count++) {
+ efx_reado(efx, &reg, FR_AB_XX_PWR_RST);
+ if (EFX_OWORD_FIELD(reg, FRF_AB_XX_RST_XX_EN) == 0 &&
+ EFX_OWORD_FIELD(reg, FRF_AB_XX_SD_RST_ACT) == 0) {
+ falcon_setup_xaui(efx);
+ return 0;
+ }
+ udelay(10);
+ }
+ netif_err(efx, hw, efx->net_dev,
+ "timed out waiting for XAUI/XGXS reset\n");
+ return -ETIMEDOUT;
+}
+
+static void falcon_ack_status_intr(struct efx_nic *efx)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ efx_oword_t reg;
+
+ if ((efx_nic_rev(efx) != EFX_REV_FALCON_B0) || LOOPBACK_INTERNAL(efx))
+ return;
+
+ /* We expect xgmii faults if the wireside link is down */
+ if (!efx->link_state.up)
+ return;
+
+ /* We can only use this interrupt to signal the negative edge of
+ * xaui_align [we have to poll the positive edge]. */
+ if (nic_data->xmac_poll_required)
+ return;
+
+ efx_reado(efx, &reg, FR_AB_XM_MGT_INT_MSK);
+}
+
+static bool falcon_xgxs_link_ok(struct efx_nic *efx)
+{
+ efx_oword_t reg;
+ bool align_done, link_ok = false;
+ int sync_status;
+
+ /* Read link status */
+ efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
+
+ align_done = EFX_OWORD_FIELD(reg, FRF_AB_XX_ALIGN_DONE);
+ sync_status = EFX_OWORD_FIELD(reg, FRF_AB_XX_SYNC_STAT);
+ if (align_done && (sync_status == FFE_AB_XX_STAT_ALL_LANES))
+ link_ok = true;
+
+ /* Clear link status ready for next read */
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_COMMA_DET, FFE_AB_XX_STAT_ALL_LANES);
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_CHAR_ERR, FFE_AB_XX_STAT_ALL_LANES);
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_DISPERR, FFE_AB_XX_STAT_ALL_LANES);
+ efx_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
+
+ return link_ok;
+}
+
+static bool falcon_xmac_link_ok(struct efx_nic *efx)
+{
+ /*
+ * Check MAC's XGXS link status except when using XGMII loopback
+ * which bypasses the XGXS block.
+ * If possible, check PHY's XGXS link status except when using
+ * MAC loopback.
+ */
+ return (efx->loopback_mode == LOOPBACK_XGMII ||
+ falcon_xgxs_link_ok(efx)) &&
+ (!(efx->mdio.mmds & (1 << MDIO_MMD_PHYXS)) ||
+ LOOPBACK_INTERNAL(efx) ||
+ efx_mdio_phyxgxs_lane_sync(efx));
+}
+
+static void falcon_reconfigure_xmac_core(struct efx_nic *efx)
+{
+ unsigned int max_frame_len;
+ efx_oword_t reg;
+ bool rx_fc = !!(efx->link_state.fc & EFX_FC_RX);
+ bool tx_fc = !!(efx->link_state.fc & EFX_FC_TX);
+
+ /* Configure MAC - cut-thru mode is hard wired on */
+ EFX_POPULATE_OWORD_3(reg,
+ FRF_AB_XM_RX_JUMBO_MODE, 1,
+ FRF_AB_XM_TX_STAT_EN, 1,
+ FRF_AB_XM_RX_STAT_EN, 1);
+ efx_writeo(efx, &reg, FR_AB_XM_GLB_CFG);
+
+ /* Configure TX */
+ EFX_POPULATE_OWORD_6(reg,
+ FRF_AB_XM_TXEN, 1,
+ FRF_AB_XM_TX_PRMBL, 1,
+ FRF_AB_XM_AUTO_PAD, 1,
+ FRF_AB_XM_TXCRC, 1,
+ FRF_AB_XM_FCNTL, tx_fc,
+ FRF_AB_XM_IPG, 0x3);
+ efx_writeo(efx, &reg, FR_AB_XM_TX_CFG);
+
+ /* Configure RX */
+ EFX_POPULATE_OWORD_5(reg,
+ FRF_AB_XM_RXEN, 1,
+ FRF_AB_XM_AUTO_DEPAD, 0,
+ FRF_AB_XM_ACPT_ALL_MCAST, 1,
+ FRF_AB_XM_ACPT_ALL_UCAST, !efx->unicast_filter,
+ FRF_AB_XM_PASS_CRC_ERR, 1);
+ efx_writeo(efx, &reg, FR_AB_XM_RX_CFG);
+
+ /* Set frame length */
+ max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu);
+ EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_MAX_RX_FRM_SIZE, max_frame_len);
+ efx_writeo(efx, &reg, FR_AB_XM_RX_PARAM);
+ EFX_POPULATE_OWORD_2(reg,
+ FRF_AB_XM_MAX_TX_FRM_SIZE, max_frame_len,
+ FRF_AB_XM_TX_JUMBO_MODE, 1);
+ efx_writeo(efx, &reg, FR_AB_XM_TX_PARAM);
+
+ EFX_POPULATE_OWORD_2(reg,
+ FRF_AB_XM_PAUSE_TIME, 0xfffe, /* MAX PAUSE TIME */
+ FRF_AB_XM_DIS_FCNTL, !rx_fc);
+ efx_writeo(efx, &reg, FR_AB_XM_FC);
+
+ /* Set MAC address */
+ memcpy(&reg, &efx->net_dev->dev_addr[0], 4);
+ efx_writeo(efx, &reg, FR_AB_XM_ADR_LO);
+ memcpy(&reg, &efx->net_dev->dev_addr[4], 2);
+ efx_writeo(efx, &reg, FR_AB_XM_ADR_HI);
+}
+
+static void falcon_reconfigure_xgxs_core(struct efx_nic *efx)
+{
+ efx_oword_t reg;
+ bool xgxs_loopback = (efx->loopback_mode == LOOPBACK_XGXS);
+ bool xaui_loopback = (efx->loopback_mode == LOOPBACK_XAUI);
+ bool xgmii_loopback = (efx->loopback_mode == LOOPBACK_XGMII);
+ bool old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback;
+
+ /* XGXS block is flaky and will need to be reset if moving
+ * into our out of XGMII, XGXS or XAUI loopbacks. */
+ efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
+ old_xgxs_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN);
+ old_xgmii_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN);
+
+ efx_reado(efx, &reg, FR_AB_XX_SD_CTL);
+ old_xaui_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_LPBKA);
+
+ /* The PHY driver may have turned XAUI off */
+ if ((xgxs_loopback != old_xgxs_loopback) ||
+ (xaui_loopback != old_xaui_loopback) ||
+ (xgmii_loopback != old_xgmii_loopback))
+ falcon_reset_xaui(efx);
+
+ efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_FORCE_SIG,
+ (xgxs_loopback || xaui_loopback) ?
+ FFE_AB_XX_FORCE_SIG_ALL_LANES : 0);
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN, xgxs_loopback);
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN, xgmii_loopback);
+ efx_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
+
+ efx_reado(efx, &reg, FR_AB_XX_SD_CTL);
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKD, xaui_loopback);
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKC, xaui_loopback);
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKB, xaui_loopback);
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKA, xaui_loopback);
+ efx_writeo(efx, &reg, FR_AB_XX_SD_CTL);
+}
+
+
+/* Try to bring up the Falcon side of the Falcon-Phy XAUI link */
+static bool falcon_xmac_link_ok_retry(struct efx_nic *efx, int tries)
+{
+ bool mac_up = falcon_xmac_link_ok(efx);
+
+ if (LOOPBACK_MASK(efx) & LOOPBACKS_EXTERNAL(efx) & LOOPBACKS_WS ||
+ efx_phy_mode_disabled(efx->phy_mode))
+ /* XAUI link is expected to be down */
+ return mac_up;
+
+ falcon_stop_nic_stats(efx);
+
+ while (!mac_up && tries) {
+ netif_dbg(efx, hw, efx->net_dev, "bashing xaui\n");
+ falcon_reset_xaui(efx);
+ udelay(200);
+
+ mac_up = falcon_xmac_link_ok(efx);
+ --tries;
+ }
+
+ falcon_start_nic_stats(efx);
+
+ return mac_up;
+}
+
+static bool falcon_xmac_check_fault(struct efx_nic *efx)
+{
+ return !falcon_xmac_link_ok_retry(efx, 5);
+}
+
+static int falcon_reconfigure_xmac(struct efx_nic *efx)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+
+ efx_farch_filter_sync_rx_mode(efx);
+
+ falcon_reconfigure_xgxs_core(efx);
+ falcon_reconfigure_xmac_core(efx);
+
+ falcon_reconfigure_mac_wrapper(efx);
+
+ nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 5);
+ falcon_ack_status_intr(efx);
+
+ return 0;
+}
+
+static void falcon_poll_xmac(struct efx_nic *efx)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+
+ /* We expect xgmii faults if the wireside link is down */
+ if (!efx->link_state.up || !nic_data->xmac_poll_required)
+ return;
+
+ nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1);
+ falcon_ack_status_intr(efx);
+}
+
/**************************************************************************
*
* MAC wrapper
@@ -497,7 +1294,7 @@ static void falcon_reset_macs(struct efx_nic *efx)
falcon_setup_xaui(efx);
}
-void falcon_drain_tx_fifo(struct efx_nic *efx)
+static void falcon_drain_tx_fifo(struct efx_nic *efx)
{
efx_oword_t reg;
@@ -529,7 +1326,7 @@ static void falcon_deconfigure_mac_wrapper(struct efx_nic *efx)
falcon_drain_tx_fifo(efx);
}
-void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
+static void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
{
struct efx_link_state *link_state = &efx->link_state;
efx_oword_t reg;
@@ -550,7 +1347,7 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
EFX_POPULATE_OWORD_5(reg,
FRF_AB_MAC_XOFF_VAL, 0xffff /* max pause time */,
FRF_AB_MAC_BCAD_ACPT, 1,
- FRF_AB_MAC_UC_PROM, efx->promiscuous,
+ FRF_AB_MAC_UC_PROM, !efx->unicast_filter,
FRF_AB_MAC_LINK_STATUS, 1, /* always set */
FRF_AB_MAC_SPEED, link_speed);
/* On B0, MAC backpressure can be disabled and packets get
@@ -583,10 +1380,7 @@ static void falcon_stats_request(struct efx_nic *efx)
WARN_ON(nic_data->stats_pending);
WARN_ON(nic_data->stats_disable_count);
- if (nic_data->stats_dma_done == NULL)
- return; /* no mac selected */
-
- *nic_data->stats_dma_done = FALCON_STATS_NOT_DONE;
+ FALCON_XMAC_STATS_DMA_FLAG(efx) = 0;
nic_data->stats_pending = true;
wmb(); /* ensure done flag is clear */
@@ -608,9 +1402,11 @@ static void falcon_stats_complete(struct efx_nic *efx)
return;
nic_data->stats_pending = false;
- if (*nic_data->stats_dma_done == FALCON_STATS_DONE) {
+ if (FALCON_XMAC_STATS_DMA_FLAG(efx)) {
rmb(); /* read the done flag before the stats */
- falcon_update_stats_xmac(efx);
+ efx_nic_update_stats(falcon_stat_desc, FALCON_STAT_COUNT,
+ falcon_stat_mask, nic_data->stats,
+ efx->stats_buffer.addr, true);
} else {
netif_err(efx, hw, efx->net_dev,
"timed out waiting for statistics\n");
@@ -678,6 +1474,28 @@ static int falcon_reconfigure_port(struct efx_nic *efx)
return 0;
}
+/* TX flow control may automatically turn itself off if the link
+ * partner (intermittently) stops responding to pause frames. There
+ * isn't any indication that this has happened, so the best we do is
+ * leave it up to the user to spot this and fix it by cycling transmit
+ * flow control on this end.
+ */
+
+static void falcon_a1_prepare_enable_fc_tx(struct efx_nic *efx)
+{
+ /* Schedule a reset to recover */
+ efx_schedule_reset(efx, RESET_TYPE_INVISIBLE);
+}
+
+static void falcon_b0_prepare_enable_fc_tx(struct efx_nic *efx)
+{
+ /* Recover by resetting the EM block */
+ falcon_stop_nic_stats(efx);
+ falcon_drain_tx_fifo(efx);
+ falcon_reconfigure_xmac(efx);
+ falcon_start_nic_stats(efx);
+}
+
/**************************************************************************
*
* PHY access via GMII
@@ -861,7 +1679,7 @@ static int falcon_probe_port(struct efx_nic *efx)
/* Allocate buffer for stats */
rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
- FALCON_MAC_STATS_SIZE);
+ FALCON_MAC_STATS_SIZE, GFP_KERNEL);
if (rc)
return rc;
netif_dbg(efx, probe, efx->net_dev,
@@ -869,7 +1687,6 @@ static int falcon_probe_port(struct efx_nic *efx)
(u64)efx->stats_buffer.dma_addr,
efx->stats_buffer.addr,
(u64)virt_to_phys(efx->stats_buffer.addr));
- nic_data->stats_dma_done = efx->stats_buffer.addr + XgDmaDone_offset;
return 0;
}
@@ -926,15 +1743,15 @@ falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
{
struct falcon_nic_data *nic_data = efx->nic_data;
struct falcon_nvconfig *nvconfig;
- struct efx_spi_device *spi;
+ struct falcon_spi_device *spi;
void *region;
int rc, magic_num, struct_ver;
__le16 *word, *limit;
u32 csum;
- if (efx_spi_present(&nic_data->spi_flash))
+ if (falcon_spi_present(&nic_data->spi_flash))
spi = &nic_data->spi_flash;
- else if (efx_spi_present(&nic_data->spi_eeprom))
+ else if (falcon_spi_present(&nic_data->spi_eeprom))
spi = &nic_data->spi_eeprom;
else
return -EINVAL;
@@ -949,7 +1766,7 @@ falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
mutex_unlock(&nic_data->spi_lock);
if (rc) {
netif_err(efx, hw, efx->net_dev, "Failed to read %s\n",
- efx_spi_present(&nic_data->spi_flash) ?
+ falcon_spi_present(&nic_data->spi_flash) ?
"flash" : "EEPROM");
rc = -EIO;
goto out;
@@ -998,7 +1815,7 @@ static int falcon_test_nvram(struct efx_nic *efx)
return falcon_read_nvram(efx, NULL);
}
-static const struct efx_nic_register_test falcon_b0_register_tests[] = {
+static const struct efx_farch_register_test falcon_b0_register_tests[] = {
{ FR_AZ_ADR_REGION,
EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) },
{ FR_AZ_RX_CFG,
@@ -1058,8 +1875,8 @@ falcon_b0_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
efx_reset_down(efx, reset_method);
tests->registers =
- efx_nic_test_registers(efx, falcon_b0_register_tests,
- ARRAY_SIZE(falcon_b0_register_tests))
+ efx_farch_test_registers(efx, falcon_b0_register_tests,
+ ARRAY_SIZE(falcon_b0_register_tests))
? -1 : 1;
rc = falcon_reset_hw(efx, reset_method);
@@ -1078,8 +1895,7 @@ static enum reset_type falcon_map_reset_reason(enum reset_type reason)
{
switch (reason) {
case RESET_TYPE_RX_RECOVERY:
- case RESET_TYPE_RX_DESC_FETCH:
- case RESET_TYPE_TX_DESC_FETCH:
+ case RESET_TYPE_DMA_ERROR:
case RESET_TYPE_TX_SKIP:
/* These can occasionally occur due to hardware bugs.
* We try to reset without disrupting the link.
@@ -1294,7 +2110,7 @@ static int falcon_reset_sram(struct efx_nic *efx)
}
static void falcon_spi_device_init(struct efx_nic *efx,
- struct efx_spi_device *spi_device,
+ struct falcon_spi_device *spi_device,
unsigned int device_id, u32 device_type)
{
if (device_type != 0) {
@@ -1360,10 +2176,11 @@ out:
return rc;
}
-static void falcon_dimension_resources(struct efx_nic *efx)
+static int falcon_dimension_resources(struct efx_nic *efx)
{
efx->rx_dc_base = 0x20000;
efx->tx_dc_base = 0x26000;
+ return 0;
}
/* Probe all SPI devices on the NIC */
@@ -1410,6 +2227,20 @@ static void falcon_probe_spi_devices(struct efx_nic *efx)
large_eeprom_type);
}
+static unsigned int falcon_a1_mem_map_size(struct efx_nic *efx)
+{
+ return 0x20000;
+}
+
+static unsigned int falcon_b0_mem_map_size(struct efx_nic *efx)
+{
+ /* Map everything up to and including the RSS indirection table.
+ * The PCI core takes care of mapping the MSI-X tables.
+ */
+ return FR_BZ_RX_INDIRECTION_TBL +
+ FR_BZ_RX_INDIRECTION_TBL_STEP * FR_BZ_RX_INDIRECTION_TBL_ROWS;
+}
+
static int falcon_probe_nic(struct efx_nic *efx)
{
struct falcon_nic_data *nic_data;
@@ -1424,7 +2255,7 @@ static int falcon_probe_nic(struct efx_nic *efx)
rc = -ENODEV;
- if (efx_nic_fpga_ver(efx) != 0) {
+ if (efx_farch_fpga_ver(efx) != 0) {
netif_err(efx, probe, efx->net_dev,
"Falcon FPGA not supported\n");
goto fail1;
@@ -1478,7 +2309,8 @@ static int falcon_probe_nic(struct efx_nic *efx)
}
/* Allocate memory for INT_KER */
- rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t));
+ rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t),
+ GFP_KERNEL);
if (rc)
goto fail4;
BUG_ON(efx->irq_status.dma_addr & 0x0f);
@@ -1499,6 +2331,8 @@ static int falcon_probe_nic(struct efx_nic *efx)
goto fail5;
}
+ efx->max_channels = (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ? 4 :
+ EFX_MAX_CHANNELS);
efx->timer_quantum_ns = 4968; /* 621 cycles */
/* Initialise I2C adapter */
@@ -1657,7 +2491,7 @@ static int falcon_init_nic(struct efx_nic *efx)
efx_writeo(efx, &temp, FR_BZ_DP_CTRL);
}
- efx_nic_init_common(efx);
+ efx_farch_init_common(efx);
return 0;
}
@@ -1688,24 +2522,65 @@ static void falcon_remove_nic(struct efx_nic *efx)
efx->nic_data = NULL;
}
-static void falcon_update_nic_stats(struct efx_nic *efx)
+static size_t falcon_describe_nic_stats(struct efx_nic *efx, u8 *names)
+{
+ return efx_nic_describe_stats(falcon_stat_desc, FALCON_STAT_COUNT,
+ falcon_stat_mask, names);
+}
+
+static size_t falcon_update_nic_stats(struct efx_nic *efx, u64 *full_stats,
+ struct rtnl_link_stats64 *core_stats)
{
struct falcon_nic_data *nic_data = efx->nic_data;
+ u64 *stats = nic_data->stats;
efx_oword_t cnt;
- if (nic_data->stats_disable_count)
- return;
+ if (!nic_data->stats_disable_count) {
+ efx_reado(efx, &cnt, FR_AZ_RX_NODESC_DROP);
+ stats[FALCON_STAT_rx_nodesc_drop_cnt] +=
+ EFX_OWORD_FIELD(cnt, FRF_AB_RX_NODESC_DROP_CNT);
+
+ if (nic_data->stats_pending &&
+ FALCON_XMAC_STATS_DMA_FLAG(efx)) {
+ nic_data->stats_pending = false;
+ rmb(); /* read the done flag before the stats */
+ efx_nic_update_stats(
+ falcon_stat_desc, FALCON_STAT_COUNT,
+ falcon_stat_mask,
+ stats, efx->stats_buffer.addr, true);
+ }
- efx_reado(efx, &cnt, FR_AZ_RX_NODESC_DROP);
- efx->n_rx_nodesc_drop_cnt +=
- EFX_OWORD_FIELD(cnt, FRF_AB_RX_NODESC_DROP_CNT);
+ /* Update derived statistic */
+ efx_update_diff_stat(&stats[FALCON_STAT_rx_bad_bytes],
+ stats[FALCON_STAT_rx_bytes] -
+ stats[FALCON_STAT_rx_good_bytes] -
+ stats[FALCON_STAT_rx_control] * 64);
+ }
- if (nic_data->stats_pending &&
- *nic_data->stats_dma_done == FALCON_STATS_DONE) {
- nic_data->stats_pending = false;
- rmb(); /* read the done flag before the stats */
- falcon_update_stats_xmac(efx);
+ if (full_stats)
+ memcpy(full_stats, stats, sizeof(u64) * FALCON_STAT_COUNT);
+
+ if (core_stats) {
+ core_stats->rx_packets = stats[FALCON_STAT_rx_packets];
+ core_stats->tx_packets = stats[FALCON_STAT_tx_packets];
+ core_stats->rx_bytes = stats[FALCON_STAT_rx_bytes];
+ core_stats->tx_bytes = stats[FALCON_STAT_tx_bytes];
+ core_stats->rx_dropped = stats[FALCON_STAT_rx_nodesc_drop_cnt];
+ core_stats->multicast = stats[FALCON_STAT_rx_multicast];
+ core_stats->rx_length_errors =
+ stats[FALCON_STAT_rx_gtjumbo] +
+ stats[FALCON_STAT_rx_length_error];
+ core_stats->rx_crc_errors = stats[FALCON_STAT_rx_bad];
+ core_stats->rx_frame_errors = stats[FALCON_STAT_rx_align_error];
+ core_stats->rx_fifo_errors = stats[FALCON_STAT_rx_overflow];
+
+ core_stats->rx_errors = (core_stats->rx_length_errors +
+ core_stats->rx_crc_errors +
+ core_stats->rx_frame_errors +
+ stats[FALCON_STAT_rx_symbol_error]);
}
+
+ return FALCON_STAT_COUNT;
}
void falcon_start_nic_stats(struct efx_nic *efx)
@@ -1734,7 +2609,7 @@ void falcon_stop_nic_stats(struct efx_nic *efx)
/* Wait enough time for the most recent transfer to
* complete. */
for (i = 0; i < 4 && nic_data->stats_pending; i++) {
- if (*nic_data->stats_dma_done == FALCON_STATS_DONE)
+ if (FALCON_XMAC_STATS_DMA_FLAG(efx))
break;
msleep(1);
}
@@ -1778,11 +2653,12 @@ static int falcon_set_wol(struct efx_nic *efx, u32 type)
*/
const struct efx_nic_type falcon_a1_nic_type = {
+ .mem_map_size = falcon_a1_mem_map_size,
.probe = falcon_probe_nic,
.remove = falcon_remove_nic,
.init = falcon_init_nic,
.dimension_resources = falcon_dimension_resources,
- .fini = efx_port_dummy_op_void,
+ .fini = falcon_irq_ack_a1,
.monitor = falcon_monitor,
.map_reset_reason = falcon_map_reset_reason,
.map_reset_flags = falcon_map_reset_flags,
@@ -1790,23 +2666,71 @@ const struct efx_nic_type falcon_a1_nic_type = {
.probe_port = falcon_probe_port,
.remove_port = falcon_remove_port,
.handle_global_event = falcon_handle_global_event,
+ .fini_dmaq = efx_farch_fini_dmaq,
.prepare_flush = falcon_prepare_flush,
.finish_flush = efx_port_dummy_op_void,
+ .describe_stats = falcon_describe_nic_stats,
.update_stats = falcon_update_nic_stats,
.start_stats = falcon_start_nic_stats,
.stop_stats = falcon_stop_nic_stats,
.set_id_led = falcon_set_id_led,
.push_irq_moderation = falcon_push_irq_moderation,
.reconfigure_port = falcon_reconfigure_port,
+ .prepare_enable_fc_tx = falcon_a1_prepare_enable_fc_tx,
.reconfigure_mac = falcon_reconfigure_xmac,
.check_mac_fault = falcon_xmac_check_fault,
.get_wol = falcon_get_wol,
.set_wol = falcon_set_wol,
.resume_wol = efx_port_dummy_op_void,
.test_nvram = falcon_test_nvram,
+ .irq_enable_master = efx_farch_irq_enable_master,
+ .irq_test_generate = efx_farch_irq_test_generate,
+ .irq_disable_non_ev = efx_farch_irq_disable_master,
+ .irq_handle_msi = efx_farch_msi_interrupt,
+ .irq_handle_legacy = falcon_legacy_interrupt_a1,
+ .tx_probe = efx_farch_tx_probe,
+ .tx_init = efx_farch_tx_init,
+ .tx_remove = efx_farch_tx_remove,
+ .tx_write = efx_farch_tx_write,
+ .rx_push_indir_table = efx_farch_rx_push_indir_table,
+ .rx_probe = efx_farch_rx_probe,
+ .rx_init = efx_farch_rx_init,
+ .rx_remove = efx_farch_rx_remove,
+ .rx_write = efx_farch_rx_write,
+ .rx_defer_refill = efx_farch_rx_defer_refill,
+ .ev_probe = efx_farch_ev_probe,
+ .ev_init = efx_farch_ev_init,
+ .ev_fini = efx_farch_ev_fini,
+ .ev_remove = efx_farch_ev_remove,
+ .ev_process = efx_farch_ev_process,
+ .ev_read_ack = efx_farch_ev_read_ack,
+ .ev_test_generate = efx_farch_ev_test_generate,
+
+ /* We don't expose the filter table on Falcon A1 as it is not
+ * mapped into function 0, but these implementations still
+ * work with a degenerate case of all tables set to size 0.
+ */
+ .filter_table_probe = efx_farch_filter_table_probe,
+ .filter_table_restore = efx_farch_filter_table_restore,
+ .filter_table_remove = efx_farch_filter_table_remove,
+ .filter_insert = efx_farch_filter_insert,
+ .filter_remove_safe = efx_farch_filter_remove_safe,
+ .filter_get_safe = efx_farch_filter_get_safe,
+ .filter_clear_rx = efx_farch_filter_clear_rx,
+ .filter_count_rx_used = efx_farch_filter_count_rx_used,
+ .filter_get_rx_id_limit = efx_farch_filter_get_rx_id_limit,
+ .filter_get_rx_ids = efx_farch_filter_get_rx_ids,
+
+#ifdef CONFIG_SFC_MTD
+ .mtd_probe = falcon_mtd_probe,
+ .mtd_rename = falcon_mtd_rename,
+ .mtd_read = falcon_mtd_read,
+ .mtd_erase = falcon_mtd_erase,
+ .mtd_write = falcon_mtd_write,
+ .mtd_sync = falcon_mtd_sync,
+#endif
.revision = EFX_REV_FALCON_A1,
- .mem_map_size = 0x20000,
.txd_ptr_tbl_base = FR_AA_TX_DESC_PTR_TBL_KER,
.rxd_ptr_tbl_base = FR_AA_RX_DESC_PTR_TBL_KER,
.buf_tbl_base = FR_AA_BUF_FULL_TBL_KER,
@@ -1816,12 +2740,13 @@ const struct efx_nic_type falcon_a1_nic_type = {
.rx_buffer_padding = 0x24,
.can_rx_scatter = false,
.max_interrupt_mode = EFX_INT_MODE_MSI,
- .phys_addr_channels = 4,
.timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH,
.offload_features = NETIF_F_IP_CSUM,
+ .mcdi_max_ver = -1,
};
const struct efx_nic_type falcon_b0_nic_type = {
+ .mem_map_size = falcon_b0_mem_map_size,
.probe = falcon_probe_nic,
.remove = falcon_remove_nic,
.init = falcon_init_nic,
@@ -1834,14 +2759,17 @@ const struct efx_nic_type falcon_b0_nic_type = {
.probe_port = falcon_probe_port,
.remove_port = falcon_remove_port,
.handle_global_event = falcon_handle_global_event,
+ .fini_dmaq = efx_farch_fini_dmaq,
.prepare_flush = falcon_prepare_flush,
.finish_flush = efx_port_dummy_op_void,
+ .describe_stats = falcon_describe_nic_stats,
.update_stats = falcon_update_nic_stats,
.start_stats = falcon_start_nic_stats,
.stop_stats = falcon_stop_nic_stats,
.set_id_led = falcon_set_id_led,
.push_irq_moderation = falcon_push_irq_moderation,
.reconfigure_port = falcon_reconfigure_port,
+ .prepare_enable_fc_tx = falcon_b0_prepare_enable_fc_tx,
.reconfigure_mac = falcon_reconfigure_xmac,
.check_mac_fault = falcon_xmac_check_fault,
.get_wol = falcon_get_wol,
@@ -1849,28 +2777,67 @@ const struct efx_nic_type falcon_b0_nic_type = {
.resume_wol = efx_port_dummy_op_void,
.test_chip = falcon_b0_test_chip,
.test_nvram = falcon_test_nvram,
+ .irq_enable_master = efx_farch_irq_enable_master,
+ .irq_test_generate = efx_farch_irq_test_generate,
+ .irq_disable_non_ev = efx_farch_irq_disable_master,
+ .irq_handle_msi = efx_farch_msi_interrupt,
+ .irq_handle_legacy = efx_farch_legacy_interrupt,
+ .tx_probe = efx_farch_tx_probe,
+ .tx_init = efx_farch_tx_init,
+ .tx_remove = efx_farch_tx_remove,
+ .tx_write = efx_farch_tx_write,
+ .rx_push_indir_table = efx_farch_rx_push_indir_table,
+ .rx_probe = efx_farch_rx_probe,
+ .rx_init = efx_farch_rx_init,
+ .rx_remove = efx_farch_rx_remove,
+ .rx_write = efx_farch_rx_write,
+ .rx_defer_refill = efx_farch_rx_defer_refill,
+ .ev_probe = efx_farch_ev_probe,
+ .ev_init = efx_farch_ev_init,
+ .ev_fini = efx_farch_ev_fini,
+ .ev_remove = efx_farch_ev_remove,
+ .ev_process = efx_farch_ev_process,
+ .ev_read_ack = efx_farch_ev_read_ack,
+ .ev_test_generate = efx_farch_ev_test_generate,
+ .filter_table_probe = efx_farch_filter_table_probe,
+ .filter_table_restore = efx_farch_filter_table_restore,
+ .filter_table_remove = efx_farch_filter_table_remove,
+ .filter_update_rx_scatter = efx_farch_filter_update_rx_scatter,
+ .filter_insert = efx_farch_filter_insert,
+ .filter_remove_safe = efx_farch_filter_remove_safe,
+ .filter_get_safe = efx_farch_filter_get_safe,
+ .filter_clear_rx = efx_farch_filter_clear_rx,
+ .filter_count_rx_used = efx_farch_filter_count_rx_used,
+ .filter_get_rx_id_limit = efx_farch_filter_get_rx_id_limit,
+ .filter_get_rx_ids = efx_farch_filter_get_rx_ids,
+#ifdef CONFIG_RFS_ACCEL
+ .filter_rfs_insert = efx_farch_filter_rfs_insert,
+ .filter_rfs_expire_one = efx_farch_filter_rfs_expire_one,
+#endif
+#ifdef CONFIG_SFC_MTD
+ .mtd_probe = falcon_mtd_probe,
+ .mtd_rename = falcon_mtd_rename,
+ .mtd_read = falcon_mtd_read,
+ .mtd_erase = falcon_mtd_erase,
+ .mtd_write = falcon_mtd_write,
+ .mtd_sync = falcon_mtd_sync,
+#endif
.revision = EFX_REV_FALCON_B0,
- /* Map everything up to and including the RSS indirection
- * table. Don't map MSI-X table, MSI-X PBA since Linux
- * requires that they not be mapped. */
- .mem_map_size = (FR_BZ_RX_INDIRECTION_TBL +
- FR_BZ_RX_INDIRECTION_TBL_STEP *
- FR_BZ_RX_INDIRECTION_TBL_ROWS),
.txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
.rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
.buf_tbl_base = FR_BZ_BUF_FULL_TBL,
.evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL,
.evq_rptr_tbl_base = FR_BZ_EVQ_RPTR,
.max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
- .rx_buffer_hash_size = 0x10,
+ .rx_prefix_size = FS_BZ_RX_PREFIX_SIZE,
+ .rx_hash_offset = FS_BZ_RX_PREFIX_HASH_OFST,
.rx_buffer_padding = 0,
.can_rx_scatter = true,
.max_interrupt_mode = EFX_INT_MODE_MSIX,
- .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
- * interrupt handler only supports 32
- * channels */
.timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH,
.offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE,
+ .mcdi_max_ver = -1,
+ .max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS,
};
diff --git a/drivers/net/ethernet/sfc/falcon_boards.c b/drivers/net/ethernet/sfc/falcon_boards.c
index ec1e99d0dcad..1736f4b806af 100644
--- a/drivers/net/ethernet/sfc/falcon_boards.c
+++ b/drivers/net/ethernet/sfc/falcon_boards.c
@@ -1,6 +1,6 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2007-2010 Solarflare Communications Inc.
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2007-2012 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/ethernet/sfc/falcon_xmac.c b/drivers/net/ethernet/sfc/falcon_xmac.c
deleted file mode 100644
index 8333865d4c95..000000000000
--- a/drivers/net/ethernet/sfc/falcon_xmac.c
+++ /dev/null
@@ -1,362 +0,0 @@
-/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2010 Solarflare Communications Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation, incorporated herein by reference.
- */
-
-#include <linux/delay.h>
-#include "net_driver.h"
-#include "efx.h"
-#include "nic.h"
-#include "regs.h"
-#include "io.h"
-#include "mdio_10g.h"
-#include "workarounds.h"
-
-/**************************************************************************
- *
- * MAC operations
- *
- *************************************************************************/
-
-/* Configure the XAUI driver that is an output from Falcon */
-void falcon_setup_xaui(struct efx_nic *efx)
-{
- efx_oword_t sdctl, txdrv;
-
- /* Move the XAUI into low power, unless there is no PHY, in
- * which case the XAUI will have to drive a cable. */
- if (efx->phy_type == PHY_TYPE_NONE)
- return;
-
- efx_reado(efx, &sdctl, FR_AB_XX_SD_CTL);
- EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVD, FFE_AB_XX_SD_CTL_DRV_DEF);
- EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVD, FFE_AB_XX_SD_CTL_DRV_DEF);
- EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVC, FFE_AB_XX_SD_CTL_DRV_DEF);
- EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVC, FFE_AB_XX_SD_CTL_DRV_DEF);
- EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVB, FFE_AB_XX_SD_CTL_DRV_DEF);
- EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVB, FFE_AB_XX_SD_CTL_DRV_DEF);
- EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVA, FFE_AB_XX_SD_CTL_DRV_DEF);
- EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVA, FFE_AB_XX_SD_CTL_DRV_DEF);
- efx_writeo(efx, &sdctl, FR_AB_XX_SD_CTL);
-
- EFX_POPULATE_OWORD_8(txdrv,
- FRF_AB_XX_DEQD, FFE_AB_XX_TXDRV_DEQ_DEF,
- FRF_AB_XX_DEQC, FFE_AB_XX_TXDRV_DEQ_DEF,
- FRF_AB_XX_DEQB, FFE_AB_XX_TXDRV_DEQ_DEF,
- FRF_AB_XX_DEQA, FFE_AB_XX_TXDRV_DEQ_DEF,
- FRF_AB_XX_DTXD, FFE_AB_XX_TXDRV_DTX_DEF,
- FRF_AB_XX_DTXC, FFE_AB_XX_TXDRV_DTX_DEF,
- FRF_AB_XX_DTXB, FFE_AB_XX_TXDRV_DTX_DEF,
- FRF_AB_XX_DTXA, FFE_AB_XX_TXDRV_DTX_DEF);
- efx_writeo(efx, &txdrv, FR_AB_XX_TXDRV_CTL);
-}
-
-int falcon_reset_xaui(struct efx_nic *efx)
-{
- struct falcon_nic_data *nic_data = efx->nic_data;
- efx_oword_t reg;
- int count;
-
- /* Don't fetch MAC statistics over an XMAC reset */
- WARN_ON(nic_data->stats_disable_count == 0);
-
- /* Start reset sequence */
- EFX_POPULATE_OWORD_1(reg, FRF_AB_XX_RST_XX_EN, 1);
- efx_writeo(efx, &reg, FR_AB_XX_PWR_RST);
-
- /* Wait up to 10 ms for completion, then reinitialise */
- for (count = 0; count < 1000; count++) {
- efx_reado(efx, &reg, FR_AB_XX_PWR_RST);
- if (EFX_OWORD_FIELD(reg, FRF_AB_XX_RST_XX_EN) == 0 &&
- EFX_OWORD_FIELD(reg, FRF_AB_XX_SD_RST_ACT) == 0) {
- falcon_setup_xaui(efx);
- return 0;
- }
- udelay(10);
- }
- netif_err(efx, hw, efx->net_dev,
- "timed out waiting for XAUI/XGXS reset\n");
- return -ETIMEDOUT;
-}
-
-static void falcon_ack_status_intr(struct efx_nic *efx)
-{
- struct falcon_nic_data *nic_data = efx->nic_data;
- efx_oword_t reg;
-
- if ((efx_nic_rev(efx) != EFX_REV_FALCON_B0) || LOOPBACK_INTERNAL(efx))
- return;
-
- /* We expect xgmii faults if the wireside link is down */
- if (!EFX_WORKAROUND_5147(efx) || !efx->link_state.up)
- return;
-
- /* We can only use this interrupt to signal the negative edge of
- * xaui_align [we have to poll the positive edge]. */
- if (nic_data->xmac_poll_required)
- return;
-
- efx_reado(efx, &reg, FR_AB_XM_MGT_INT_MSK);
-}
-
-static bool falcon_xgxs_link_ok(struct efx_nic *efx)
-{
- efx_oword_t reg;
- bool align_done, link_ok = false;
- int sync_status;
-
- /* Read link status */
- efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
-
- align_done = EFX_OWORD_FIELD(reg, FRF_AB_XX_ALIGN_DONE);
- sync_status = EFX_OWORD_FIELD(reg, FRF_AB_XX_SYNC_STAT);
- if (align_done && (sync_status == FFE_AB_XX_STAT_ALL_LANES))
- link_ok = true;
-
- /* Clear link status ready for next read */
- EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_COMMA_DET, FFE_AB_XX_STAT_ALL_LANES);
- EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_CHAR_ERR, FFE_AB_XX_STAT_ALL_LANES);
- EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_DISPERR, FFE_AB_XX_STAT_ALL_LANES);
- efx_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
-
- return link_ok;
-}
-
-static bool falcon_xmac_link_ok(struct efx_nic *efx)
-{
- /*
- * Check MAC's XGXS link status except when using XGMII loopback
- * which bypasses the XGXS block.
- * If possible, check PHY's XGXS link status except when using
- * MAC loopback.
- */
- return (efx->loopback_mode == LOOPBACK_XGMII ||
- falcon_xgxs_link_ok(efx)) &&
- (!(efx->mdio.mmds & (1 << MDIO_MMD_PHYXS)) ||
- LOOPBACK_INTERNAL(efx) ||
- efx_mdio_phyxgxs_lane_sync(efx));
-}
-
-static void falcon_reconfigure_xmac_core(struct efx_nic *efx)
-{
- unsigned int max_frame_len;
- efx_oword_t reg;
- bool rx_fc = !!(efx->link_state.fc & EFX_FC_RX);
- bool tx_fc = !!(efx->link_state.fc & EFX_FC_TX);
-
- /* Configure MAC - cut-thru mode is hard wired on */
- EFX_POPULATE_OWORD_3(reg,
- FRF_AB_XM_RX_JUMBO_MODE, 1,
- FRF_AB_XM_TX_STAT_EN, 1,
- FRF_AB_XM_RX_STAT_EN, 1);
- efx_writeo(efx, &reg, FR_AB_XM_GLB_CFG);
-
- /* Configure TX */
- EFX_POPULATE_OWORD_6(reg,
- FRF_AB_XM_TXEN, 1,
- FRF_AB_XM_TX_PRMBL, 1,
- FRF_AB_XM_AUTO_PAD, 1,
- FRF_AB_XM_TXCRC, 1,
- FRF_AB_XM_FCNTL, tx_fc,
- FRF_AB_XM_IPG, 0x3);
- efx_writeo(efx, &reg, FR_AB_XM_TX_CFG);
-
- /* Configure RX */
- EFX_POPULATE_OWORD_5(reg,
- FRF_AB_XM_RXEN, 1,
- FRF_AB_XM_AUTO_DEPAD, 0,
- FRF_AB_XM_ACPT_ALL_MCAST, 1,
- FRF_AB_XM_ACPT_ALL_UCAST, efx->promiscuous,
- FRF_AB_XM_PASS_CRC_ERR, 1);
- efx_writeo(efx, &reg, FR_AB_XM_RX_CFG);
-
- /* Set frame length */
- max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu);
- EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_MAX_RX_FRM_SIZE, max_frame_len);
- efx_writeo(efx, &reg, FR_AB_XM_RX_PARAM);
- EFX_POPULATE_OWORD_2(reg,
- FRF_AB_XM_MAX_TX_FRM_SIZE, max_frame_len,
- FRF_AB_XM_TX_JUMBO_MODE, 1);
- efx_writeo(efx, &reg, FR_AB_XM_TX_PARAM);
-
- EFX_POPULATE_OWORD_2(reg,
- FRF_AB_XM_PAUSE_TIME, 0xfffe, /* MAX PAUSE TIME */
- FRF_AB_XM_DIS_FCNTL, !rx_fc);
- efx_writeo(efx, &reg, FR_AB_XM_FC);
-
- /* Set MAC address */
- memcpy(&reg, &efx->net_dev->dev_addr[0], 4);
- efx_writeo(efx, &reg, FR_AB_XM_ADR_LO);
- memcpy(&reg, &efx->net_dev->dev_addr[4], 2);
- efx_writeo(efx, &reg, FR_AB_XM_ADR_HI);
-}
-
-static void falcon_reconfigure_xgxs_core(struct efx_nic *efx)
-{
- efx_oword_t reg;
- bool xgxs_loopback = (efx->loopback_mode == LOOPBACK_XGXS);
- bool xaui_loopback = (efx->loopback_mode == LOOPBACK_XAUI);
- bool xgmii_loopback = (efx->loopback_mode == LOOPBACK_XGMII);
-
- /* XGXS block is flaky and will need to be reset if moving
- * into our out of XGMII, XGXS or XAUI loopbacks. */
- if (EFX_WORKAROUND_5147(efx)) {
- bool old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback;
- bool reset_xgxs;
-
- efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
- old_xgxs_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN);
- old_xgmii_loopback =
- EFX_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN);
-
- efx_reado(efx, &reg, FR_AB_XX_SD_CTL);
- old_xaui_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_LPBKA);
-
- /* The PHY driver may have turned XAUI off */
- reset_xgxs = ((xgxs_loopback != old_xgxs_loopback) ||
- (xaui_loopback != old_xaui_loopback) ||
- (xgmii_loopback != old_xgmii_loopback));
-
- if (reset_xgxs)
- falcon_reset_xaui(efx);
- }
-
- efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
- EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_FORCE_SIG,
- (xgxs_loopback || xaui_loopback) ?
- FFE_AB_XX_FORCE_SIG_ALL_LANES : 0);
- EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN, xgxs_loopback);
- EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN, xgmii_loopback);
- efx_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
-
- efx_reado(efx, &reg, FR_AB_XX_SD_CTL);
- EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKD, xaui_loopback);
- EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKC, xaui_loopback);
- EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKB, xaui_loopback);
- EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKA, xaui_loopback);
- efx_writeo(efx, &reg, FR_AB_XX_SD_CTL);
-}
-
-
-/* Try to bring up the Falcon side of the Falcon-Phy XAUI link */
-static bool falcon_xmac_link_ok_retry(struct efx_nic *efx, int tries)
-{
- bool mac_up = falcon_xmac_link_ok(efx);
-
- if (LOOPBACK_MASK(efx) & LOOPBACKS_EXTERNAL(efx) & LOOPBACKS_WS ||
- efx_phy_mode_disabled(efx->phy_mode))
- /* XAUI link is expected to be down */
- return mac_up;
-
- falcon_stop_nic_stats(efx);
-
- while (!mac_up && tries) {
- netif_dbg(efx, hw, efx->net_dev, "bashing xaui\n");
- falcon_reset_xaui(efx);
- udelay(200);
-
- mac_up = falcon_xmac_link_ok(efx);
- --tries;
- }
-
- falcon_start_nic_stats(efx);
-
- return mac_up;
-}
-
-bool falcon_xmac_check_fault(struct efx_nic *efx)
-{
- return !falcon_xmac_link_ok_retry(efx, 5);
-}
-
-int falcon_reconfigure_xmac(struct efx_nic *efx)
-{
- struct falcon_nic_data *nic_data = efx->nic_data;
-
- falcon_reconfigure_xgxs_core(efx);
- falcon_reconfigure_xmac_core(efx);
-
- falcon_reconfigure_mac_wrapper(efx);
-
- nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 5);
- falcon_ack_status_intr(efx);
-
- return 0;
-}
-
-void falcon_update_stats_xmac(struct efx_nic *efx)
-{
- struct efx_mac_stats *mac_stats = &efx->mac_stats;
-
- /* Update MAC stats from DMAed values */
- FALCON_STAT(efx, XgRxOctets, rx_bytes);
- FALCON_STAT(efx, XgRxOctetsOK, rx_good_bytes);
- FALCON_STAT(efx, XgRxPkts, rx_packets);
- FALCON_STAT(efx, XgRxPktsOK, rx_good);
- FALCON_STAT(efx, XgRxBroadcastPkts, rx_broadcast);
- FALCON_STAT(efx, XgRxMulticastPkts, rx_multicast);
- FALCON_STAT(efx, XgRxUnicastPkts, rx_unicast);
- FALCON_STAT(efx, XgRxUndersizePkts, rx_lt64);
- FALCON_STAT(efx, XgRxOversizePkts, rx_gtjumbo);
- FALCON_STAT(efx, XgRxJabberPkts, rx_bad_gtjumbo);
- FALCON_STAT(efx, XgRxUndersizeFCSerrorPkts, rx_bad_lt64);
- FALCON_STAT(efx, XgRxDropEvents, rx_overflow);
- FALCON_STAT(efx, XgRxFCSerrorPkts, rx_bad);
- FALCON_STAT(efx, XgRxAlignError, rx_align_error);
- FALCON_STAT(efx, XgRxSymbolError, rx_symbol_error);
- FALCON_STAT(efx, XgRxInternalMACError, rx_internal_error);
- FALCON_STAT(efx, XgRxControlPkts, rx_control);
- FALCON_STAT(efx, XgRxPausePkts, rx_pause);
- FALCON_STAT(efx, XgRxPkts64Octets, rx_64);
- FALCON_STAT(efx, XgRxPkts65to127Octets, rx_65_to_127);
- FALCON_STAT(efx, XgRxPkts128to255Octets, rx_128_to_255);
- FALCON_STAT(efx, XgRxPkts256to511Octets, rx_256_to_511);
- FALCON_STAT(efx, XgRxPkts512to1023Octets, rx_512_to_1023);
- FALCON_STAT(efx, XgRxPkts1024to15xxOctets, rx_1024_to_15xx);
- FALCON_STAT(efx, XgRxPkts15xxtoMaxOctets, rx_15xx_to_jumbo);
- FALCON_STAT(efx, XgRxLengthError, rx_length_error);
- FALCON_STAT(efx, XgTxPkts, tx_packets);
- FALCON_STAT(efx, XgTxOctets, tx_bytes);
- FALCON_STAT(efx, XgTxMulticastPkts, tx_multicast);
- FALCON_STAT(efx, XgTxBroadcastPkts, tx_broadcast);
- FALCON_STAT(efx, XgTxUnicastPkts, tx_unicast);
- FALCON_STAT(efx, XgTxControlPkts, tx_control);
- FALCON_STAT(efx, XgTxPausePkts, tx_pause);
- FALCON_STAT(efx, XgTxPkts64Octets, tx_64);
- FALCON_STAT(efx, XgTxPkts65to127Octets, tx_65_to_127);
- FALCON_STAT(efx, XgTxPkts128to255Octets, tx_128_to_255);
- FALCON_STAT(efx, XgTxPkts256to511Octets, tx_256_to_511);
- FALCON_STAT(efx, XgTxPkts512to1023Octets, tx_512_to_1023);
- FALCON_STAT(efx, XgTxPkts1024to15xxOctets, tx_1024_to_15xx);
- FALCON_STAT(efx, XgTxPkts1519toMaxOctets, tx_15xx_to_jumbo);
- FALCON_STAT(efx, XgTxUndersizePkts, tx_lt64);
- FALCON_STAT(efx, XgTxOversizePkts, tx_gtjumbo);
- FALCON_STAT(efx, XgTxNonTcpUdpPkt, tx_non_tcpudp);
- FALCON_STAT(efx, XgTxMacSrcErrPkt, tx_mac_src_error);
- FALCON_STAT(efx, XgTxIpSrcErrPkt, tx_ip_src_error);
-
- /* Update derived statistics */
- efx_update_diff_stat(&mac_stats->tx_good_bytes,
- mac_stats->tx_bytes - mac_stats->tx_bad_bytes -
- mac_stats->tx_control * 64);
- efx_update_diff_stat(&mac_stats->rx_bad_bytes,
- mac_stats->rx_bytes - mac_stats->rx_good_bytes -
- mac_stats->rx_control * 64);
-}
-
-void falcon_poll_xmac(struct efx_nic *efx)
-{
- struct falcon_nic_data *nic_data = efx->nic_data;
-
- if (!EFX_WORKAROUND_5147(efx) || !efx->link_state.up ||
- !nic_data->xmac_poll_required)
- return;
-
- nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1);
- falcon_ack_status_intr(efx);
-}
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
new file mode 100644
index 000000000000..c0907d884d75
--- /dev/null
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -0,0 +1,2942 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2013 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/crc32.h>
+#include "net_driver.h"
+#include "bitfield.h"
+#include "efx.h"
+#include "nic.h"
+#include "farch_regs.h"
+#include "io.h"
+#include "workarounds.h"
+
+/* Falcon-architecture (SFC4000 and SFC9000-family) support */
+
+/**************************************************************************
+ *
+ * Configurable values
+ *
+ **************************************************************************
+ */
+
+/* This is set to 16 for a good reason. In summary, if larger than
+ * 16, the descriptor cache holds more than a default socket
+ * buffer's worth of packets (for UDP we can only have at most one
+ * socket buffer's worth outstanding). This combined with the fact
+ * that we only get 1 TX event per descriptor cache means the NIC
+ * goes idle.
+ */
+#define TX_DC_ENTRIES 16
+#define TX_DC_ENTRIES_ORDER 1
+
+#define RX_DC_ENTRIES 64
+#define RX_DC_ENTRIES_ORDER 3
+
+/* If EFX_MAX_INT_ERRORS internal errors occur within
+ * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
+ * disable it.
+ */
+#define EFX_INT_ERROR_EXPIRE 3600
+#define EFX_MAX_INT_ERRORS 5
+
+/* Depth of RX flush request fifo */
+#define EFX_RX_FLUSH_COUNT 4
+
+/* Driver generated events */
+#define _EFX_CHANNEL_MAGIC_TEST 0x000101
+#define _EFX_CHANNEL_MAGIC_FILL 0x000102
+#define _EFX_CHANNEL_MAGIC_RX_DRAIN 0x000103
+#define _EFX_CHANNEL_MAGIC_TX_DRAIN 0x000104
+
+#define _EFX_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data))
+#define _EFX_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8)
+
+#define EFX_CHANNEL_MAGIC_TEST(_channel) \
+ _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TEST, (_channel)->channel)
+#define EFX_CHANNEL_MAGIC_FILL(_rx_queue) \
+ _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_FILL, \
+ efx_rx_queue_index(_rx_queue))
+#define EFX_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \
+ _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_RX_DRAIN, \
+ efx_rx_queue_index(_rx_queue))
+#define EFX_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \
+ _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \
+ (_tx_queue)->queue)
+
+static void efx_farch_magic_event(struct efx_channel *channel, u32 magic);
+
+/**************************************************************************
+ *
+ * Hardware access
+ *
+ **************************************************************************/
+
+static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value,
+ unsigned int index)
+{
+ efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base,
+ value, index);
+}
+
+static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b,
+ const efx_oword_t *mask)
+{
+ return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) ||
+ ((a->u64[1] ^ b->u64[1]) & mask->u64[1]);
+}
+
+int efx_farch_test_registers(struct efx_nic *efx,
+ const struct efx_farch_register_test *regs,
+ size_t n_regs)
+{
+ unsigned address = 0, i, j;
+ efx_oword_t mask, imask, original, reg, buf;
+
+ for (i = 0; i < n_regs; ++i) {
+ address = regs[i].address;
+ mask = imask = regs[i].mask;
+ EFX_INVERT_OWORD(imask);
+
+ efx_reado(efx, &original, address);
+
+ /* bit sweep on and off */
+ for (j = 0; j < 128; j++) {
+ if (!EFX_EXTRACT_OWORD32(mask, j, j))
+ continue;
+
+ /* Test this testable bit can be set in isolation */
+ EFX_AND_OWORD(reg, original, mask);
+ EFX_SET_OWORD32(reg, j, j, 1);
+
+ efx_writeo(efx, &reg, address);
+ efx_reado(efx, &buf, address);
+
+ if (efx_masked_compare_oword(&reg, &buf, &mask))
+ goto fail;
+
+ /* Test this testable bit can be cleared in isolation */
+ EFX_OR_OWORD(reg, original, mask);
+ EFX_SET_OWORD32(reg, j, j, 0);
+
+ efx_writeo(efx, &reg, address);
+ efx_reado(efx, &buf, address);
+
+ if (efx_masked_compare_oword(&reg, &buf, &mask))
+ goto fail;
+ }
+
+ efx_writeo(efx, &original, address);
+ }
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev,
+ "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT
+ " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg),
+ EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask));
+ return -EIO;
+}
+
+/**************************************************************************
+ *
+ * Special buffer handling
+ * Special buffers are used for event queues and the TX and RX
+ * descriptor rings.
+ *
+ *************************************************************************/
+
+/*
+ * Initialise a special buffer
+ *
+ * This will define a buffer (previously allocated via
+ * efx_alloc_special_buffer()) in the buffer table, allowing
+ * it to be used for event queues, descriptor rings etc.
+ */
+static void
+efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
+{
+ efx_qword_t buf_desc;
+ unsigned int index;
+ dma_addr_t dma_addr;
+ int i;
+
+ EFX_BUG_ON_PARANOID(!buffer->buf.addr);
+
+ /* Write buffer descriptors to NIC */
+ for (i = 0; i < buffer->entries; i++) {
+ index = buffer->index + i;
+ dma_addr = buffer->buf.dma_addr + (i * EFX_BUF_SIZE);
+ netif_dbg(efx, probe, efx->net_dev,
+ "mapping special buffer %d at %llx\n",
+ index, (unsigned long long)dma_addr);
+ EFX_POPULATE_QWORD_3(buf_desc,
+ FRF_AZ_BUF_ADR_REGION, 0,
+ FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12,
+ FRF_AZ_BUF_OWNER_ID_FBUF, 0);
+ efx_write_buf_tbl(efx, &buf_desc, index);
+ }
+}
+
+/* Unmaps a buffer and clears the buffer table entries */
+static void
+efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
+{
+ efx_oword_t buf_tbl_upd;
+ unsigned int start = buffer->index;
+ unsigned int end = (buffer->index + buffer->entries - 1);
+
+ if (!buffer->entries)
+ return;
+
+ netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n",
+ buffer->index, buffer->index + buffer->entries - 1);
+
+ EFX_POPULATE_OWORD_4(buf_tbl_upd,
+ FRF_AZ_BUF_UPD_CMD, 0,
+ FRF_AZ_BUF_CLR_CMD, 1,
+ FRF_AZ_BUF_CLR_END_ID, end,
+ FRF_AZ_BUF_CLR_START_ID, start);
+ efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD);
+}
+
+/*
+ * Allocate a new special buffer
+ *
+ * This allocates memory for a new buffer, clears it and allocates a
+ * new buffer ID range. It does not write into the buffer table.
+ *
+ * This call will allocate 4KB buffers, since 8KB buffers can't be
+ * used for event queues and descriptor rings.
+ */
+static int efx_alloc_special_buffer(struct efx_nic *efx,
+ struct efx_special_buffer *buffer,
+ unsigned int len)
+{
+ len = ALIGN(len, EFX_BUF_SIZE);
+
+ if (efx_nic_alloc_buffer(efx, &buffer->buf, len, GFP_KERNEL))
+ return -ENOMEM;
+ buffer->entries = len / EFX_BUF_SIZE;
+ BUG_ON(buffer->buf.dma_addr & (EFX_BUF_SIZE - 1));
+
+ /* Select new buffer ID */
+ buffer->index = efx->next_buffer_table;
+ efx->next_buffer_table += buffer->entries;
+#ifdef CONFIG_SFC_SRIOV
+ BUG_ON(efx_sriov_enabled(efx) &&
+ efx->vf_buftbl_base < efx->next_buffer_table);
+#endif
+
+ netif_dbg(efx, probe, efx->net_dev,
+ "allocating special buffers %d-%d at %llx+%x "
+ "(virt %p phys %llx)\n", buffer->index,
+ buffer->index + buffer->entries - 1,
+ (u64)buffer->buf.dma_addr, len,
+ buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr));
+
+ return 0;
+}
+
+static void
+efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
+{
+ if (!buffer->buf.addr)
+ return;
+
+ netif_dbg(efx, hw, efx->net_dev,
+ "deallocating special buffers %d-%d at %llx+%x "
+ "(virt %p phys %llx)\n", buffer->index,
+ buffer->index + buffer->entries - 1,
+ (u64)buffer->buf.dma_addr, buffer->buf.len,
+ buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr));
+
+ efx_nic_free_buffer(efx, &buffer->buf);
+ buffer->entries = 0;
+}
+
+/**************************************************************************
+ *
+ * TX path
+ *
+ **************************************************************************/
+
+/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
+static inline void efx_farch_notify_tx_desc(struct efx_tx_queue *tx_queue)
+{
+ unsigned write_ptr;
+ efx_dword_t reg;
+
+ write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
+ EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr);
+ efx_writed_page(tx_queue->efx, &reg,
+ FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
+}
+
+/* Write pointer and first descriptor for TX descriptor ring */
+static inline void efx_farch_push_tx_desc(struct efx_tx_queue *tx_queue,
+ const efx_qword_t *txd)
+{
+ unsigned write_ptr;
+ efx_oword_t reg;
+
+ BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0);
+ BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0);
+
+ write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
+ EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true,
+ FRF_AZ_TX_DESC_WPTR, write_ptr);
+ reg.qword[0] = *txd;
+ efx_writeo_page(tx_queue->efx, &reg,
+ FR_BZ_TX_DESC_UPD_P0, tx_queue->queue);
+}
+
+
+/* For each entry inserted into the software descriptor ring, create a
+ * descriptor in the hardware TX descriptor ring (in host memory), and
+ * write a doorbell.
+ */
+void efx_farch_tx_write(struct efx_tx_queue *tx_queue)
+{
+
+ struct efx_tx_buffer *buffer;
+ efx_qword_t *txd;
+ unsigned write_ptr;
+ unsigned old_write_count = tx_queue->write_count;
+
+ BUG_ON(tx_queue->write_count == tx_queue->insert_count);
+
+ do {
+ write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
+ buffer = &tx_queue->buffer[write_ptr];
+ txd = efx_tx_desc(tx_queue, write_ptr);
+ ++tx_queue->write_count;
+
+ EFX_BUG_ON_PARANOID(buffer->flags & EFX_TX_BUF_OPTION);
+
+ /* Create TX descriptor ring entry */
+ BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
+ EFX_POPULATE_QWORD_4(*txd,
+ FSF_AZ_TX_KER_CONT,
+ buffer->flags & EFX_TX_BUF_CONT,
+ FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
+ FSF_AZ_TX_KER_BUF_REGION, 0,
+ FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
+ } while (tx_queue->write_count != tx_queue->insert_count);
+
+ wmb(); /* Ensure descriptors are written before they are fetched */
+
+ if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) {
+ txd = efx_tx_desc(tx_queue,
+ old_write_count & tx_queue->ptr_mask);
+ efx_farch_push_tx_desc(tx_queue, txd);
+ ++tx_queue->pushes;
+ } else {
+ efx_farch_notify_tx_desc(tx_queue);
+ }
+}
+
+/* Allocate hardware resources for a TX queue */
+int efx_farch_tx_probe(struct efx_tx_queue *tx_queue)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ unsigned entries;
+
+ entries = tx_queue->ptr_mask + 1;
+ return efx_alloc_special_buffer(efx, &tx_queue->txd,
+ entries * sizeof(efx_qword_t));
+}
+
+void efx_farch_tx_init(struct efx_tx_queue *tx_queue)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ efx_oword_t reg;
+
+ /* Pin TX descriptor ring */
+ efx_init_special_buffer(efx, &tx_queue->txd);
+
+ /* Push TX descriptor ring to card */
+ EFX_POPULATE_OWORD_10(reg,
+ FRF_AZ_TX_DESCQ_EN, 1,
+ FRF_AZ_TX_ISCSI_DDIG_EN, 0,
+ FRF_AZ_TX_ISCSI_HDIG_EN, 0,
+ FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
+ FRF_AZ_TX_DESCQ_EVQ_ID,
+ tx_queue->channel->channel,
+ FRF_AZ_TX_DESCQ_OWNER_ID, 0,
+ FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue,
+ FRF_AZ_TX_DESCQ_SIZE,
+ __ffs(tx_queue->txd.entries),
+ FRF_AZ_TX_DESCQ_TYPE, 0,
+ FRF_BZ_TX_NON_IP_DROP_DIS, 1);
+
+ if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
+ int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
+ EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
+ EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS,
+ !csum);
+ }
+
+ efx_writeo_table(efx, &reg, efx->type->txd_ptr_tbl_base,
+ tx_queue->queue);
+
+ if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
+ /* Only 128 bits in this register */
+ BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128);
+
+ efx_reado(efx, &reg, FR_AA_TX_CHKSM_CFG);
+ if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
+ __clear_bit_le(tx_queue->queue, &reg);
+ else
+ __set_bit_le(tx_queue->queue, &reg);
+ efx_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG);
+ }
+
+ if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
+ EFX_POPULATE_OWORD_1(reg,
+ FRF_BZ_TX_PACE,
+ (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
+ FFE_BZ_TX_PACE_OFF :
+ FFE_BZ_TX_PACE_RESERVED);
+ efx_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL,
+ tx_queue->queue);
+ }
+}
+
+static void efx_farch_flush_tx_queue(struct efx_tx_queue *tx_queue)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ efx_oword_t tx_flush_descq;
+
+ WARN_ON(atomic_read(&tx_queue->flush_outstanding));
+ atomic_set(&tx_queue->flush_outstanding, 1);
+
+ EFX_POPULATE_OWORD_2(tx_flush_descq,
+ FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
+ FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
+ efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ);
+}
+
+void efx_farch_tx_fini(struct efx_tx_queue *tx_queue)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ efx_oword_t tx_desc_ptr;
+
+ /* Remove TX descriptor ring from card */
+ EFX_ZERO_OWORD(tx_desc_ptr);
+ efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
+ tx_queue->queue);
+
+ /* Unpin TX descriptor ring */
+ efx_fini_special_buffer(efx, &tx_queue->txd);
+}
+
+/* Free buffers backing TX queue */
+void efx_farch_tx_remove(struct efx_tx_queue *tx_queue)
+{
+ efx_free_special_buffer(tx_queue->efx, &tx_queue->txd);
+}
+
+/**************************************************************************
+ *
+ * RX path
+ *
+ **************************************************************************/
+
+/* This creates an entry in the RX descriptor queue */
+static inline void
+efx_farch_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index)
+{
+ struct efx_rx_buffer *rx_buf;
+ efx_qword_t *rxd;
+
+ rxd = efx_rx_desc(rx_queue, index);
+ rx_buf = efx_rx_buffer(rx_queue, index);
+ EFX_POPULATE_QWORD_3(*rxd,
+ FSF_AZ_RX_KER_BUF_SIZE,
+ rx_buf->len -
+ rx_queue->efx->type->rx_buffer_padding,
+ FSF_AZ_RX_KER_BUF_REGION, 0,
+ FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
+}
+
+/* This writes to the RX_DESC_WPTR register for the specified receive
+ * descriptor ring.
+ */
+void efx_farch_rx_write(struct efx_rx_queue *rx_queue)
+{
+ struct efx_nic *efx = rx_queue->efx;
+ efx_dword_t reg;
+ unsigned write_ptr;
+
+ while (rx_queue->notified_count != rx_queue->added_count) {
+ efx_farch_build_rx_desc(
+ rx_queue,
+ rx_queue->notified_count & rx_queue->ptr_mask);
+ ++rx_queue->notified_count;
+ }
+
+ wmb();
+ write_ptr = rx_queue->added_count & rx_queue->ptr_mask;
+ EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr);
+ efx_writed_page(efx, &reg, FR_AZ_RX_DESC_UPD_DWORD_P0,
+ efx_rx_queue_index(rx_queue));
+}
+
+int efx_farch_rx_probe(struct efx_rx_queue *rx_queue)
+{
+ struct efx_nic *efx = rx_queue->efx;
+ unsigned entries;
+
+ entries = rx_queue->ptr_mask + 1;
+ return efx_alloc_special_buffer(efx, &rx_queue->rxd,
+ entries * sizeof(efx_qword_t));
+}
+
+void efx_farch_rx_init(struct efx_rx_queue *rx_queue)
+{
+ efx_oword_t rx_desc_ptr;
+ struct efx_nic *efx = rx_queue->efx;
+ bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0;
+ bool iscsi_digest_en = is_b0;
+ bool jumbo_en;
+
+ /* For kernel-mode queues in Falcon A1, the JUMBO flag enables
+ * DMA to continue after a PCIe page boundary (and scattering
+ * is not possible). In Falcon B0 and Siena, it enables
+ * scatter.
+ */
+ jumbo_en = !is_b0 || efx->rx_scatter;
+
+ netif_dbg(efx, hw, efx->net_dev,
+ "RX queue %d ring in special buffers %d-%d\n",
+ efx_rx_queue_index(rx_queue), rx_queue->rxd.index,
+ rx_queue->rxd.index + rx_queue->rxd.entries - 1);
+
+ rx_queue->scatter_n = 0;
+
+ /* Pin RX descriptor ring */
+ efx_init_special_buffer(efx, &rx_queue->rxd);
+
+ /* Push RX descriptor ring to card */
+ EFX_POPULATE_OWORD_10(rx_desc_ptr,
+ FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en,
+ FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en,
+ FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
+ FRF_AZ_RX_DESCQ_EVQ_ID,
+ efx_rx_queue_channel(rx_queue)->channel,
+ FRF_AZ_RX_DESCQ_OWNER_ID, 0,
+ FRF_AZ_RX_DESCQ_LABEL,
+ efx_rx_queue_index(rx_queue),
+ FRF_AZ_RX_DESCQ_SIZE,
+ __ffs(rx_queue->rxd.entries),
+ FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
+ FRF_AZ_RX_DESCQ_JUMBO, jumbo_en,
+ FRF_AZ_RX_DESCQ_EN, 1);
+ efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
+ efx_rx_queue_index(rx_queue));
+}
+
+static void efx_farch_flush_rx_queue(struct efx_rx_queue *rx_queue)
+{
+ struct efx_nic *efx = rx_queue->efx;
+ efx_oword_t rx_flush_descq;
+
+ EFX_POPULATE_OWORD_2(rx_flush_descq,
+ FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
+ FRF_AZ_RX_FLUSH_DESCQ,
+ efx_rx_queue_index(rx_queue));
+ efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ);
+}
+
+void efx_farch_rx_fini(struct efx_rx_queue *rx_queue)
+{
+ efx_oword_t rx_desc_ptr;
+ struct efx_nic *efx = rx_queue->efx;
+
+ /* Remove RX descriptor ring from card */
+ EFX_ZERO_OWORD(rx_desc_ptr);
+ efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
+ efx_rx_queue_index(rx_queue));
+
+ /* Unpin RX descriptor ring */
+ efx_fini_special_buffer(efx, &rx_queue->rxd);
+}
+
+/* Free buffers backing RX queue */
+void efx_farch_rx_remove(struct efx_rx_queue *rx_queue)
+{
+ efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd);
+}
+
+/**************************************************************************
+ *
+ * Flush handling
+ *
+ **************************************************************************/
+
+/* efx_farch_flush_queues() must be woken up when all flushes are completed,
+ * or more RX flushes can be kicked off.
+ */
+static bool efx_farch_flush_wake(struct efx_nic *efx)
+{
+ /* Ensure that all updates are visible to efx_farch_flush_queues() */
+ smp_mb();
+
+ return (atomic_read(&efx->active_queues) == 0 ||
+ (atomic_read(&efx->rxq_flush_outstanding) < EFX_RX_FLUSH_COUNT
+ && atomic_read(&efx->rxq_flush_pending) > 0));
+}
+
+static bool efx_check_tx_flush_complete(struct efx_nic *efx)
+{
+ bool i = true;
+ efx_oword_t txd_ptr_tbl;
+ struct efx_channel *channel;
+ struct efx_tx_queue *tx_queue;
+
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ efx_reado_table(efx, &txd_ptr_tbl,
+ FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue);
+ if (EFX_OWORD_FIELD(txd_ptr_tbl,
+ FRF_AZ_TX_DESCQ_FLUSH) ||
+ EFX_OWORD_FIELD(txd_ptr_tbl,
+ FRF_AZ_TX_DESCQ_EN)) {
+ netif_dbg(efx, hw, efx->net_dev,
+ "flush did not complete on TXQ %d\n",
+ tx_queue->queue);
+ i = false;
+ } else if (atomic_cmpxchg(&tx_queue->flush_outstanding,
+ 1, 0)) {
+ /* The flush is complete, but we didn't
+ * receive a flush completion event
+ */
+ netif_dbg(efx, hw, efx->net_dev,
+ "flush complete on TXQ %d, so drain "
+ "the queue\n", tx_queue->queue);
+ /* Don't need to increment active_queues as it
+ * has already been incremented for the queues
+ * which did not drain
+ */
+ efx_farch_magic_event(channel,
+ EFX_CHANNEL_MAGIC_TX_DRAIN(
+ tx_queue));
+ }
+ }
+ }
+
+ return i;
+}
+
+/* Flush all the transmit queues, and continue flushing receive queues until
+ * they're all flushed. Wait for the DRAIN events to be recieved so that there
+ * are no more RX and TX events left on any channel. */
+static int efx_farch_do_flush(struct efx_nic *efx)
+{
+ unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */
+ struct efx_channel *channel;
+ struct efx_rx_queue *rx_queue;
+ struct efx_tx_queue *tx_queue;
+ int rc = 0;
+
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ efx_farch_flush_tx_queue(tx_queue);
+ }
+ efx_for_each_channel_rx_queue(rx_queue, channel) {
+ rx_queue->flush_pending = true;
+ atomic_inc(&efx->rxq_flush_pending);
+ }
+ }
+
+ while (timeout && atomic_read(&efx->active_queues) > 0) {
+ /* If SRIOV is enabled, then offload receive queue flushing to
+ * the firmware (though we will still have to poll for
+ * completion). If that fails, fall back to the old scheme.
+ */
+ if (efx_sriov_enabled(efx)) {
+ rc = efx_mcdi_flush_rxqs(efx);
+ if (!rc)
+ goto wait;
+ }
+
+ /* The hardware supports four concurrent rx flushes, each of
+ * which may need to be retried if there is an outstanding
+ * descriptor fetch
+ */
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_rx_queue(rx_queue, channel) {
+ if (atomic_read(&efx->rxq_flush_outstanding) >=
+ EFX_RX_FLUSH_COUNT)
+ break;
+
+ if (rx_queue->flush_pending) {
+ rx_queue->flush_pending = false;
+ atomic_dec(&efx->rxq_flush_pending);
+ atomic_inc(&efx->rxq_flush_outstanding);
+ efx_farch_flush_rx_queue(rx_queue);
+ }
+ }
+ }
+
+ wait:
+ timeout = wait_event_timeout(efx->flush_wq,
+ efx_farch_flush_wake(efx),
+ timeout);
+ }
+
+ if (atomic_read(&efx->active_queues) &&
+ !efx_check_tx_flush_complete(efx)) {
+ netif_err(efx, hw, efx->net_dev, "failed to flush %d queues "
+ "(rx %d+%d)\n", atomic_read(&efx->active_queues),
+ atomic_read(&efx->rxq_flush_outstanding),
+ atomic_read(&efx->rxq_flush_pending));
+ rc = -ETIMEDOUT;
+
+ atomic_set(&efx->active_queues, 0);
+ atomic_set(&efx->rxq_flush_pending, 0);
+ atomic_set(&efx->rxq_flush_outstanding, 0);
+ }
+
+ return rc;
+}
+
+int efx_farch_fini_dmaq(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+ struct efx_tx_queue *tx_queue;
+ struct efx_rx_queue *rx_queue;
+ int rc = 0;
+
+ /* Do not attempt to write to the NIC during EEH recovery */
+ if (efx->state != STATE_RECOVERY) {
+ /* Only perform flush if DMA is enabled */
+ if (efx->pci_dev->is_busmaster) {
+ efx->type->prepare_flush(efx);
+ rc = efx_farch_do_flush(efx);
+ efx->type->finish_flush(efx);
+ }
+
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_rx_queue(rx_queue, channel)
+ efx_farch_rx_fini(rx_queue);
+ efx_for_each_channel_tx_queue(tx_queue, channel)
+ efx_farch_tx_fini(tx_queue);
+ }
+ }
+
+ return rc;
+}
+
+/**************************************************************************
+ *
+ * Event queue processing
+ * Event queues are processed by per-channel tasklets.
+ *
+ **************************************************************************/
+
+/* Update a channel's event queue's read pointer (RPTR) register
+ *
+ * This writes the EVQ_RPTR_REG register for the specified channel's
+ * event queue.
+ */
+void efx_farch_ev_read_ack(struct efx_channel *channel)
+{
+ efx_dword_t reg;
+ struct efx_nic *efx = channel->efx;
+
+ EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR,
+ channel->eventq_read_ptr & channel->eventq_mask);
+
+ /* For Falcon A1, EVQ_RPTR_KER is documented as having a step size
+ * of 4 bytes, but it is really 16 bytes just like later revisions.
+ */
+ efx_writed(efx, &reg,
+ efx->type->evq_rptr_tbl_base +
+ FR_BZ_EVQ_RPTR_STEP * channel->channel);
+}
+
+/* Use HW to insert a SW defined event */
+void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq,
+ efx_qword_t *event)
+{
+ efx_oword_t drv_ev_reg;
+
+ BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 ||
+ FRF_AZ_DRV_EV_DATA_WIDTH != 64);
+ drv_ev_reg.u32[0] = event->u32[0];
+ drv_ev_reg.u32[1] = event->u32[1];
+ drv_ev_reg.u32[2] = 0;
+ drv_ev_reg.u32[3] = 0;
+ EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, evq);
+ efx_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV);
+}
+
+static void efx_farch_magic_event(struct efx_channel *channel, u32 magic)
+{
+ efx_qword_t event;
+
+ EFX_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE,
+ FSE_AZ_EV_CODE_DRV_GEN_EV,
+ FSF_AZ_DRV_GEN_EV_MAGIC, magic);
+ efx_farch_generate_event(channel->efx, channel->channel, &event);
+}
+
+/* Handle a transmit completion event
+ *
+ * The NIC batches TX completion events; the message we receive is of
+ * the form "complete all TX events up to this index".
+ */
+static int
+efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
+{
+ unsigned int tx_ev_desc_ptr;
+ unsigned int tx_ev_q_label;
+ struct efx_tx_queue *tx_queue;
+ struct efx_nic *efx = channel->efx;
+ int tx_packets = 0;
+
+ if (unlikely(ACCESS_ONCE(efx->reset_pending)))
+ return 0;
+
+ if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
+ /* Transmit completion */
+ tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
+ tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
+ tx_queue = efx_channel_get_tx_queue(
+ channel, tx_ev_q_label % EFX_TXQ_TYPES);
+ tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) &
+ tx_queue->ptr_mask);
+ efx_xmit_done(tx_queue, tx_ev_desc_ptr);
+ } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
+ /* Rewrite the FIFO write pointer */
+ tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
+ tx_queue = efx_channel_get_tx_queue(
+ channel, tx_ev_q_label % EFX_TXQ_TYPES);
+
+ netif_tx_lock(efx->net_dev);
+ efx_farch_notify_tx_desc(tx_queue);
+ netif_tx_unlock(efx->net_dev);
+ } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR)) {
+ efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
+ } else {
+ netif_err(efx, tx_err, efx->net_dev,
+ "channel %d unexpected TX event "
+ EFX_QWORD_FMT"\n", channel->channel,
+ EFX_QWORD_VAL(*event));
+ }
+
+ return tx_packets;
+}
+
+/* Detect errors included in the rx_evt_pkt_ok bit. */
+static u16 efx_farch_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
+ const efx_qword_t *event)
+{
+ struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
+ struct efx_nic *efx = rx_queue->efx;
+ bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
+ bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
+ bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
+ bool rx_ev_other_err, rx_ev_pause_frm;
+ bool rx_ev_hdr_type, rx_ev_mcast_pkt;
+ unsigned rx_ev_pkt_type;
+
+ rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
+ rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
+ rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC);
+ rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE);
+ rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event,
+ FSF_AZ_RX_EV_BUF_OWNER_ID_ERR);
+ rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event,
+ FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR);
+ rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event,
+ FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR);
+ rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR);
+ rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC);
+ rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ?
+ 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB));
+ rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR);
+
+ /* Every error apart from tobe_disc and pause_frm */
+ rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
+ rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
+ rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
+
+ /* Count errors that are not in MAC stats. Ignore expected
+ * checksum errors during self-test. */
+ if (rx_ev_frm_trunc)
+ ++channel->n_rx_frm_trunc;
+ else if (rx_ev_tobe_disc)
+ ++channel->n_rx_tobe_disc;
+ else if (!efx->loopback_selftest) {
+ if (rx_ev_ip_hdr_chksum_err)
+ ++channel->n_rx_ip_hdr_chksum_err;
+ else if (rx_ev_tcp_udp_chksum_err)
+ ++channel->n_rx_tcp_udp_chksum_err;
+ }
+
+ /* TOBE_DISC is expected on unicast mismatches; don't print out an
+ * error message. FRM_TRUNC indicates RXDP dropped the packet due
+ * to a FIFO overflow.
+ */
+#ifdef DEBUG
+ if (rx_ev_other_err && net_ratelimit()) {
+ netif_dbg(efx, rx_err, efx->net_dev,
+ " RX queue %d unexpected RX event "
+ EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
+ efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event),
+ rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
+ rx_ev_ip_hdr_chksum_err ?
+ " [IP_HDR_CHKSUM_ERR]" : "",
+ rx_ev_tcp_udp_chksum_err ?
+ " [TCP_UDP_CHKSUM_ERR]" : "",
+ rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
+ rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
+ rx_ev_drib_nib ? " [DRIB_NIB]" : "",
+ rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
+ rx_ev_pause_frm ? " [PAUSE]" : "");
+ }
+#endif
+
+ /* The frame must be discarded if any of these are true. */
+ return (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
+ rx_ev_tobe_disc | rx_ev_pause_frm) ?
+ EFX_RX_PKT_DISCARD : 0;
+}
+
+/* Handle receive events that are not in-order. Return true if this
+ * can be handled as a partial packet discard, false if it's more
+ * serious.
+ */
+static bool
+efx_farch_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
+{
+ struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
+ struct efx_nic *efx = rx_queue->efx;
+ unsigned expected, dropped;
+
+ if (rx_queue->scatter_n &&
+ index == ((rx_queue->removed_count + rx_queue->scatter_n - 1) &
+ rx_queue->ptr_mask)) {
+ ++channel->n_rx_nodesc_trunc;
+ return true;
+ }
+
+ expected = rx_queue->removed_count & rx_queue->ptr_mask;
+ dropped = (index - expected) & rx_queue->ptr_mask;
+ netif_info(efx, rx_err, efx->net_dev,
+ "dropped %d events (index=%d expected=%d)\n",
+ dropped, index, expected);
+
+ efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ?
+ RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
+ return false;
+}
+
+/* Handle a packet received event
+ *
+ * The NIC gives a "discard" flag if it's a unicast packet with the
+ * wrong destination address
+ * Also "is multicast" and "matches multicast filter" flags can be used to
+ * discard non-matching multicast packets.
+ */
+static void
+efx_farch_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
+{
+ unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
+ unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
+ unsigned expected_ptr;
+ bool rx_ev_pkt_ok, rx_ev_sop, rx_ev_cont;
+ u16 flags;
+ struct efx_rx_queue *rx_queue;
+ struct efx_nic *efx = channel->efx;
+
+ if (unlikely(ACCESS_ONCE(efx->reset_pending)))
+ return;
+
+ rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT);
+ rx_ev_sop = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP);
+ WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
+ channel->channel);
+
+ rx_queue = efx_channel_get_rx_queue(channel);
+
+ rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
+ expected_ptr = ((rx_queue->removed_count + rx_queue->scatter_n) &
+ rx_queue->ptr_mask);
+
+ /* Check for partial drops and other errors */
+ if (unlikely(rx_ev_desc_ptr != expected_ptr) ||
+ unlikely(rx_ev_sop != (rx_queue->scatter_n == 0))) {
+ if (rx_ev_desc_ptr != expected_ptr &&
+ !efx_farch_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr))
+ return;
+
+ /* Discard all pending fragments */
+ if (rx_queue->scatter_n) {
+ efx_rx_packet(
+ rx_queue,
+ rx_queue->removed_count & rx_queue->ptr_mask,
+ rx_queue->scatter_n, 0, EFX_RX_PKT_DISCARD);
+ rx_queue->removed_count += rx_queue->scatter_n;
+ rx_queue->scatter_n = 0;
+ }
+
+ /* Return if there is no new fragment */
+ if (rx_ev_desc_ptr != expected_ptr)
+ return;
+
+ /* Discard new fragment if not SOP */
+ if (!rx_ev_sop) {
+ efx_rx_packet(
+ rx_queue,
+ rx_queue->removed_count & rx_queue->ptr_mask,
+ 1, 0, EFX_RX_PKT_DISCARD);
+ ++rx_queue->removed_count;
+ return;
+ }
+ }
+
+ ++rx_queue->scatter_n;
+ if (rx_ev_cont)
+ return;
+
+ rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
+ rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
+ rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
+
+ if (likely(rx_ev_pkt_ok)) {
+ /* If packet is marked as OK then we can rely on the
+ * hardware checksum and classification.
+ */
+ flags = 0;
+ switch (rx_ev_hdr_type) {
+ case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP:
+ flags |= EFX_RX_PKT_TCP;
+ /* fall through */
+ case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP:
+ flags |= EFX_RX_PKT_CSUMMED;
+ /* fall through */
+ case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER:
+ case FSE_AZ_RX_EV_HDR_TYPE_OTHER:
+ break;
+ }
+ } else {
+ flags = efx_farch_handle_rx_not_ok(rx_queue, event);
+ }
+
+ /* Detect multicast packets that didn't match the filter */
+ rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
+ if (rx_ev_mcast_pkt) {
+ unsigned int rx_ev_mcast_hash_match =
+ EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH);
+
+ if (unlikely(!rx_ev_mcast_hash_match)) {
+ ++channel->n_rx_mcast_mismatch;
+ flags |= EFX_RX_PKT_DISCARD;
+ }
+ }
+
+ channel->irq_mod_score += 2;
+
+ /* Handle received packet */
+ efx_rx_packet(rx_queue,
+ rx_queue->removed_count & rx_queue->ptr_mask,
+ rx_queue->scatter_n, rx_ev_byte_cnt, flags);
+ rx_queue->removed_count += rx_queue->scatter_n;
+ rx_queue->scatter_n = 0;
+}
+
+/* If this flush done event corresponds to a &struct efx_tx_queue, then
+ * send an %EFX_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue
+ * of all transmit completions.
+ */
+static void
+efx_farch_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
+{
+ struct efx_tx_queue *tx_queue;
+ int qid;
+
+ qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
+ if (qid < EFX_TXQ_TYPES * efx->n_tx_channels) {
+ tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES,
+ qid % EFX_TXQ_TYPES);
+ if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) {
+ efx_farch_magic_event(tx_queue->channel,
+ EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue));
+ }
+ }
+}
+
+/* If this flush done event corresponds to a &struct efx_rx_queue: If the flush
+ * was succesful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add
+ * the RX queue back to the mask of RX queues in need of flushing.
+ */
+static void
+efx_farch_handle_rx_flush_done(struct efx_nic *efx, efx_qword_t *event)
+{
+ struct efx_channel *channel;
+ struct efx_rx_queue *rx_queue;
+ int qid;
+ bool failed;
+
+ qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
+ failed = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
+ if (qid >= efx->n_channels)
+ return;
+ channel = efx_get_channel(efx, qid);
+ if (!efx_channel_has_rx_queue(channel))
+ return;
+ rx_queue = efx_channel_get_rx_queue(channel);
+
+ if (failed) {
+ netif_info(efx, hw, efx->net_dev,
+ "RXQ %d flush retry\n", qid);
+ rx_queue->flush_pending = true;
+ atomic_inc(&efx->rxq_flush_pending);
+ } else {
+ efx_farch_magic_event(efx_rx_queue_channel(rx_queue),
+ EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue));
+ }
+ atomic_dec(&efx->rxq_flush_outstanding);
+ if (efx_farch_flush_wake(efx))
+ wake_up(&efx->flush_wq);
+}
+
+static void
+efx_farch_handle_drain_event(struct efx_channel *channel)
+{
+ struct efx_nic *efx = channel->efx;
+
+ WARN_ON(atomic_read(&efx->active_queues) == 0);
+ atomic_dec(&efx->active_queues);
+ if (efx_farch_flush_wake(efx))
+ wake_up(&efx->flush_wq);
+}
+
+static void efx_farch_handle_generated_event(struct efx_channel *channel,
+ efx_qword_t *event)
+{
+ struct efx_nic *efx = channel->efx;
+ struct efx_rx_queue *rx_queue =
+ efx_channel_has_rx_queue(channel) ?
+ efx_channel_get_rx_queue(channel) : NULL;
+ unsigned magic, code;
+
+ magic = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
+ code = _EFX_CHANNEL_MAGIC_CODE(magic);
+
+ if (magic == EFX_CHANNEL_MAGIC_TEST(channel)) {
+ channel->event_test_cpu = raw_smp_processor_id();
+ } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_FILL(rx_queue)) {
+ /* The queue must be empty, so we won't receive any rx
+ * events, so efx_process_channel() won't refill the
+ * queue. Refill it here */
+ efx_fast_push_rx_descriptors(rx_queue);
+ } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) {
+ efx_farch_handle_drain_event(channel);
+ } else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) {
+ efx_farch_handle_drain_event(channel);
+ } else {
+ netif_dbg(efx, hw, efx->net_dev, "channel %d received "
+ "generated event "EFX_QWORD_FMT"\n",
+ channel->channel, EFX_QWORD_VAL(*event));
+ }
+}
+
+static void
+efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
+{
+ struct efx_nic *efx = channel->efx;
+ unsigned int ev_sub_code;
+ unsigned int ev_sub_data;
+
+ ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE);
+ ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
+
+ switch (ev_sub_code) {
+ case FSE_AZ_TX_DESCQ_FLS_DONE_EV:
+ netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
+ channel->channel, ev_sub_data);
+ efx_farch_handle_tx_flush_done(efx, event);
+ efx_sriov_tx_flush_done(efx, event);
+ break;
+ case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
+ netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
+ channel->channel, ev_sub_data);
+ efx_farch_handle_rx_flush_done(efx, event);
+ efx_sriov_rx_flush_done(efx, event);
+ break;
+ case FSE_AZ_EVQ_INIT_DONE_EV:
+ netif_dbg(efx, hw, efx->net_dev,
+ "channel %d EVQ %d initialised\n",
+ channel->channel, ev_sub_data);
+ break;
+ case FSE_AZ_SRM_UPD_DONE_EV:
+ netif_vdbg(efx, hw, efx->net_dev,
+ "channel %d SRAM update done\n", channel->channel);
+ break;
+ case FSE_AZ_WAKE_UP_EV:
+ netif_vdbg(efx, hw, efx->net_dev,
+ "channel %d RXQ %d wakeup event\n",
+ channel->channel, ev_sub_data);
+ break;
+ case FSE_AZ_TIMER_EV:
+ netif_vdbg(efx, hw, efx->net_dev,
+ "channel %d RX queue %d timer expired\n",
+ channel->channel, ev_sub_data);
+ break;
+ case FSE_AA_RX_RECOVER_EV:
+ netif_err(efx, rx_err, efx->net_dev,
+ "channel %d seen DRIVER RX_RESET event. "
+ "Resetting.\n", channel->channel);
+ atomic_inc(&efx->rx_reset);
+ efx_schedule_reset(efx,
+ EFX_WORKAROUND_6555(efx) ?
+ RESET_TYPE_RX_RECOVERY :
+ RESET_TYPE_DISABLE);
+ break;
+ case FSE_BZ_RX_DSC_ERROR_EV:
+ if (ev_sub_data < EFX_VI_BASE) {
+ netif_err(efx, rx_err, efx->net_dev,
+ "RX DMA Q %d reports descriptor fetch error."
+ " RX Q %d is disabled.\n", ev_sub_data,
+ ev_sub_data);
+ efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
+ } else
+ efx_sriov_desc_fetch_err(efx, ev_sub_data);
+ break;
+ case FSE_BZ_TX_DSC_ERROR_EV:
+ if (ev_sub_data < EFX_VI_BASE) {
+ netif_err(efx, tx_err, efx->net_dev,
+ "TX DMA Q %d reports descriptor fetch error."
+ " TX Q %d is disabled.\n", ev_sub_data,
+ ev_sub_data);
+ efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
+ } else
+ efx_sriov_desc_fetch_err(efx, ev_sub_data);
+ break;
+ default:
+ netif_vdbg(efx, hw, efx->net_dev,
+ "channel %d unknown driver event code %d "
+ "data %04x\n", channel->channel, ev_sub_code,
+ ev_sub_data);
+ break;
+ }
+}
+
+int efx_farch_ev_process(struct efx_channel *channel, int budget)
+{
+ struct efx_nic *efx = channel->efx;
+ unsigned int read_ptr;
+ efx_qword_t event, *p_event;
+ int ev_code;
+ int tx_packets = 0;
+ int spent = 0;
+
+ read_ptr = channel->eventq_read_ptr;
+
+ for (;;) {
+ p_event = efx_event(channel, read_ptr);
+ event = *p_event;
+
+ if (!efx_event_present(&event))
+ /* End of events */
+ break;
+
+ netif_vdbg(channel->efx, intr, channel->efx->net_dev,
+ "channel %d event is "EFX_QWORD_FMT"\n",
+ channel->channel, EFX_QWORD_VAL(event));
+
+ /* Clear this event by marking it all ones */
+ EFX_SET_QWORD(*p_event);
+
+ ++read_ptr;
+
+ ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
+
+ switch (ev_code) {
+ case FSE_AZ_EV_CODE_RX_EV:
+ efx_farch_handle_rx_event(channel, &event);
+ if (++spent == budget)
+ goto out;
+ break;
+ case FSE_AZ_EV_CODE_TX_EV:
+ tx_packets += efx_farch_handle_tx_event(channel,
+ &event);
+ if (tx_packets > efx->txq_entries) {
+ spent = budget;
+ goto out;
+ }
+ break;
+ case FSE_AZ_EV_CODE_DRV_GEN_EV:
+ efx_farch_handle_generated_event(channel, &event);
+ break;
+ case FSE_AZ_EV_CODE_DRIVER_EV:
+ efx_farch_handle_driver_event(channel, &event);
+ break;
+ case FSE_CZ_EV_CODE_USER_EV:
+ efx_sriov_event(channel, &event);
+ break;
+ case FSE_CZ_EV_CODE_MCDI_EV:
+ efx_mcdi_process_event(channel, &event);
+ break;
+ case FSE_AZ_EV_CODE_GLOBAL_EV:
+ if (efx->type->handle_global_event &&
+ efx->type->handle_global_event(channel, &event))
+ break;
+ /* else fall through */
+ default:
+ netif_err(channel->efx, hw, channel->efx->net_dev,
+ "channel %d unknown event type %d (data "
+ EFX_QWORD_FMT ")\n", channel->channel,
+ ev_code, EFX_QWORD_VAL(event));
+ }
+ }
+
+out:
+ channel->eventq_read_ptr = read_ptr;
+ return spent;
+}
+
+/* Allocate buffer table entries for event queue */
+int efx_farch_ev_probe(struct efx_channel *channel)
+{
+ struct efx_nic *efx = channel->efx;
+ unsigned entries;
+
+ entries = channel->eventq_mask + 1;
+ return efx_alloc_special_buffer(efx, &channel->eventq,
+ entries * sizeof(efx_qword_t));
+}
+
+int efx_farch_ev_init(struct efx_channel *channel)
+{
+ efx_oword_t reg;
+ struct efx_nic *efx = channel->efx;
+
+ netif_dbg(efx, hw, efx->net_dev,
+ "channel %d event queue in special buffers %d-%d\n",
+ channel->channel, channel->eventq.index,
+ channel->eventq.index + channel->eventq.entries - 1);
+
+ if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
+ EFX_POPULATE_OWORD_3(reg,
+ FRF_CZ_TIMER_Q_EN, 1,
+ FRF_CZ_HOST_NOTIFY_MODE, 0,
+ FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
+ efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
+ }
+
+ /* Pin event queue buffer */
+ efx_init_special_buffer(efx, &channel->eventq);
+
+ /* Fill event queue with all ones (i.e. empty events) */
+ memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len);
+
+ /* Push event queue to card */
+ EFX_POPULATE_OWORD_3(reg,
+ FRF_AZ_EVQ_EN, 1,
+ FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries),
+ FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index);
+ efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
+ channel->channel);
+
+ return 0;
+}
+
+void efx_farch_ev_fini(struct efx_channel *channel)
+{
+ efx_oword_t reg;
+ struct efx_nic *efx = channel->efx;
+
+ /* Remove event queue from card */
+ EFX_ZERO_OWORD(reg);
+ efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
+ channel->channel);
+ if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
+ efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
+
+ /* Unpin event queue */
+ efx_fini_special_buffer(efx, &channel->eventq);
+}
+
+/* Free buffers backing event queue */
+void efx_farch_ev_remove(struct efx_channel *channel)
+{
+ efx_free_special_buffer(channel->efx, &channel->eventq);
+}
+
+
+void efx_farch_ev_test_generate(struct efx_channel *channel)
+{
+ efx_farch_magic_event(channel, EFX_CHANNEL_MAGIC_TEST(channel));
+}
+
+void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue)
+{
+ efx_farch_magic_event(efx_rx_queue_channel(rx_queue),
+ EFX_CHANNEL_MAGIC_FILL(rx_queue));
+}
+
+/**************************************************************************
+ *
+ * Hardware interrupts
+ * The hardware interrupt handler does very little work; all the event
+ * queue processing is carried out by per-channel tasklets.
+ *
+ **************************************************************************/
+
+/* Enable/disable/generate interrupts */
+static inline void efx_farch_interrupts(struct efx_nic *efx,
+ bool enabled, bool force)
+{
+ efx_oword_t int_en_reg_ker;
+
+ EFX_POPULATE_OWORD_3(int_en_reg_ker,
+ FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level,
+ FRF_AZ_KER_INT_KER, force,
+ FRF_AZ_DRV_INT_EN_KER, enabled);
+ efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
+}
+
+void efx_farch_irq_enable_master(struct efx_nic *efx)
+{
+ EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr));
+ wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
+
+ efx_farch_interrupts(efx, true, false);
+}
+
+void efx_farch_irq_disable_master(struct efx_nic *efx)
+{
+ /* Disable interrupts */
+ efx_farch_interrupts(efx, false, false);
+}
+
+/* Generate a test interrupt
+ * Interrupt must already have been enabled, otherwise nasty things
+ * may happen.
+ */
+void efx_farch_irq_test_generate(struct efx_nic *efx)
+{
+ efx_farch_interrupts(efx, true, true);
+}
+
+/* Process a fatal interrupt
+ * Disable bus mastering ASAP and schedule a reset
+ */
+irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ efx_oword_t *int_ker = efx->irq_status.addr;
+ efx_oword_t fatal_intr;
+ int error, mem_perr;
+
+ efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER);
+ error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR);
+
+ netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status "
+ EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
+ EFX_OWORD_VAL(fatal_intr),
+ error ? "disabling bus mastering" : "no recognised error");
+
+ /* If this is a memory parity error dump which blocks are offending */
+ mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) ||
+ EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER));
+ if (mem_perr) {
+ efx_oword_t reg;
+ efx_reado(efx, &reg, FR_AZ_MEM_STAT);
+ netif_err(efx, hw, efx->net_dev,
+ "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n",
+ EFX_OWORD_VAL(reg));
+ }
+
+ /* Disable both devices */
+ pci_clear_master(efx->pci_dev);
+ if (efx_nic_is_dual_func(efx))
+ pci_clear_master(nic_data->pci_dev2);
+ efx_farch_irq_disable_master(efx);
+
+ /* Count errors and reset or disable the NIC accordingly */
+ if (efx->int_error_count == 0 ||
+ time_after(jiffies, efx->int_error_expire)) {
+ efx->int_error_count = 0;
+ efx->int_error_expire =
+ jiffies + EFX_INT_ERROR_EXPIRE * HZ;
+ }
+ if (++efx->int_error_count < EFX_MAX_INT_ERRORS) {
+ netif_err(efx, hw, efx->net_dev,
+ "SYSTEM ERROR - reset scheduled\n");
+ efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
+ } else {
+ netif_err(efx, hw, efx->net_dev,
+ "SYSTEM ERROR - max number of errors seen."
+ "NIC will be disabled\n");
+ efx_schedule_reset(efx, RESET_TYPE_DISABLE);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* Handle a legacy interrupt
+ * Acknowledges the interrupt and schedule event queue processing.
+ */
+irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id)
+{
+ struct efx_nic *efx = dev_id;
+ bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled);
+ efx_oword_t *int_ker = efx->irq_status.addr;
+ irqreturn_t result = IRQ_NONE;
+ struct efx_channel *channel;
+ efx_dword_t reg;
+ u32 queues;
+ int syserr;
+
+ /* Read the ISR which also ACKs the interrupts */
+ efx_readd(efx, &reg, FR_BZ_INT_ISR0);
+ queues = EFX_EXTRACT_DWORD(reg, 0, 31);
+
+ /* Legacy interrupts are disabled too late by the EEH kernel
+ * code. Disable them earlier.
+ * If an EEH error occurred, the read will have returned all ones.
+ */
+ if (EFX_DWORD_IS_ALL_ONES(reg) && efx_try_recovery(efx) &&
+ !efx->eeh_disabled_legacy_irq) {
+ disable_irq_nosync(efx->legacy_irq);
+ efx->eeh_disabled_legacy_irq = true;
+ }
+
+ /* Handle non-event-queue sources */
+ if (queues & (1U << efx->irq_level) && soft_enabled) {
+ syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
+ if (unlikely(syserr))
+ return efx_farch_fatal_interrupt(efx);
+ efx->last_irq_cpu = raw_smp_processor_id();
+ }
+
+ if (queues != 0) {
+ efx->irq_zero_count = 0;
+
+ /* Schedule processing of any interrupting queues */
+ if (likely(soft_enabled)) {
+ efx_for_each_channel(channel, efx) {
+ if (queues & 1)
+ efx_schedule_channel_irq(channel);
+ queues >>= 1;
+ }
+ }
+ result = IRQ_HANDLED;
+
+ } else {
+ efx_qword_t *event;
+
+ /* Legacy ISR read can return zero once (SF bug 15783) */
+
+ /* We can't return IRQ_HANDLED more than once on seeing ISR=0
+ * because this might be a shared interrupt. */
+ if (efx->irq_zero_count++ == 0)
+ result = IRQ_HANDLED;
+
+ /* Ensure we schedule or rearm all event queues */
+ if (likely(soft_enabled)) {
+ efx_for_each_channel(channel, efx) {
+ event = efx_event(channel,
+ channel->eventq_read_ptr);
+ if (efx_event_present(event))
+ efx_schedule_channel_irq(channel);
+ else
+ efx_farch_ev_read_ack(channel);
+ }
+ }
+ }
+
+ if (result == IRQ_HANDLED)
+ netif_vdbg(efx, intr, efx->net_dev,
+ "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
+ irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
+
+ return result;
+}
+
+/* Handle an MSI interrupt
+ *
+ * Handle an MSI hardware interrupt. This routine schedules event
+ * queue processing. No interrupt acknowledgement cycle is necessary.
+ * Also, we never need to check that the interrupt is for us, since
+ * MSI interrupts cannot be shared.
+ */
+irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id)
+{
+ struct efx_msi_context *context = dev_id;
+ struct efx_nic *efx = context->efx;
+ efx_oword_t *int_ker = efx->irq_status.addr;
+ int syserr;
+
+ netif_vdbg(efx, intr, efx->net_dev,
+ "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
+ irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
+
+ if (!likely(ACCESS_ONCE(efx->irq_soft_enabled)))
+ return IRQ_HANDLED;
+
+ /* Handle non-event-queue sources */
+ if (context->index == efx->irq_level) {
+ syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
+ if (unlikely(syserr))
+ return efx_farch_fatal_interrupt(efx);
+ efx->last_irq_cpu = raw_smp_processor_id();
+ }
+
+ /* Schedule processing of the channel */
+ efx_schedule_channel_irq(efx->channel[context->index]);
+
+ return IRQ_HANDLED;
+}
+
+
+/* Setup RSS indirection table.
+ * This maps from the hash value of the packet to RXQ
+ */
+void efx_farch_rx_push_indir_table(struct efx_nic *efx)
+{
+ size_t i = 0;
+ efx_dword_t dword;
+
+ if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
+ return;
+
+ BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
+ FR_BZ_RX_INDIRECTION_TBL_ROWS);
+
+ for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) {
+ EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
+ efx->rx_indir_table[i]);
+ efx_writed(efx, &dword,
+ FR_BZ_RX_INDIRECTION_TBL +
+ FR_BZ_RX_INDIRECTION_TBL_STEP * i);
+ }
+}
+
+/* Looks at available SRAM resources and works out how many queues we
+ * can support, and where things like descriptor caches should live.
+ *
+ * SRAM is split up as follows:
+ * 0 buftbl entries for channels
+ * efx->vf_buftbl_base buftbl entries for SR-IOV
+ * efx->rx_dc_base RX descriptor caches
+ * efx->tx_dc_base TX descriptor caches
+ */
+void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw)
+{
+ unsigned vi_count, buftbl_min;
+
+ /* Account for the buffer table entries backing the datapath channels
+ * and the descriptor caches for those channels.
+ */
+ buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE +
+ efx->n_tx_channels * EFX_TXQ_TYPES * EFX_MAX_DMAQ_SIZE +
+ efx->n_channels * EFX_MAX_EVQ_SIZE)
+ * sizeof(efx_qword_t) / EFX_BUF_SIZE);
+ vi_count = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
+
+#ifdef CONFIG_SFC_SRIOV
+ if (efx_sriov_wanted(efx)) {
+ unsigned vi_dc_entries, buftbl_free, entries_per_vf, vf_limit;
+
+ efx->vf_buftbl_base = buftbl_min;
+
+ vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES;
+ vi_count = max(vi_count, EFX_VI_BASE);
+ buftbl_free = (sram_lim_qw - buftbl_min -
+ vi_count * vi_dc_entries);
+
+ entries_per_vf = ((vi_dc_entries + EFX_VF_BUFTBL_PER_VI) *
+ efx_vf_size(efx));
+ vf_limit = min(buftbl_free / entries_per_vf,
+ (1024U - EFX_VI_BASE) >> efx->vi_scale);
+
+ if (efx->vf_count > vf_limit) {
+ netif_err(efx, probe, efx->net_dev,
+ "Reducing VF count from from %d to %d\n",
+ efx->vf_count, vf_limit);
+ efx->vf_count = vf_limit;
+ }
+ vi_count += efx->vf_count * efx_vf_size(efx);
+ }
+#endif
+
+ efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES;
+ efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES;
+}
+
+u32 efx_farch_fpga_ver(struct efx_nic *efx)
+{
+ efx_oword_t altera_build;
+ efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD);
+ return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER);
+}
+
+void efx_farch_init_common(struct efx_nic *efx)
+{
+ efx_oword_t temp;
+
+ /* Set positions of descriptor caches in SRAM. */
+ EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base);
+ efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG);
+ EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base);
+ efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG);
+
+ /* Set TX descriptor cache size. */
+ BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER));
+ EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
+ efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG);
+
+ /* Set RX descriptor cache size. Set low watermark to size-8, as
+ * this allows most efficient prefetching.
+ */
+ BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER));
+ EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
+ efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG);
+ EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
+ efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM);
+
+ /* Program INT_KER address */
+ EFX_POPULATE_OWORD_2(temp,
+ FRF_AZ_NORM_INT_VEC_DIS_KER,
+ EFX_INT_MODE_USE_MSI(efx),
+ FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
+ efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER);
+
+ if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx))
+ /* Use an interrupt level unused by event queues */
+ efx->irq_level = 0x1f;
+ else
+ /* Use a valid MSI-X vector */
+ efx->irq_level = 0;
+
+ /* Enable all the genuinely fatal interrupts. (They are still
+ * masked by the overall interrupt mask, controlled by
+ * falcon_interrupts()).
+ *
+ * Note: All other fatal interrupts are enabled
+ */
+ EFX_POPULATE_OWORD_3(temp,
+ FRF_AZ_ILL_ADR_INT_KER_EN, 1,
+ FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
+ FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
+ if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
+ EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1);
+ EFX_INVERT_OWORD(temp);
+ efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
+
+ efx_farch_rx_push_indir_table(efx);
+
+ /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
+ * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
+ */
+ efx_reado(efx, &temp, FR_AZ_TX_RESERVED);
+ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
+ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
+ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
+ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1);
+ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1);
+ /* Enable SW_EV to inherit in char driver - assume harmless here */
+ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
+ /* Prefetch threshold 2 => fetch when descriptor cache half empty */
+ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
+ /* Disable hardware watchdog which can misfire */
+ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff);
+ /* Squash TX of packets of 16 bytes or less */
+ if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
+ EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
+ efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
+
+ if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
+ EFX_POPULATE_OWORD_4(temp,
+ /* Default values */
+ FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
+ FRF_BZ_TX_PACE_SB_AF, 0xb,
+ FRF_BZ_TX_PACE_FB_BASE, 0,
+ /* Allow large pace values in the
+ * fast bin. */
+ FRF_BZ_TX_PACE_BIN_TH,
+ FFE_BZ_TX_PACE_RESERVED);
+ efx_writeo(efx, &temp, FR_BZ_TX_PACE);
+ }
+}
+
+/**************************************************************************
+ *
+ * Filter tables
+ *
+ **************************************************************************
+ */
+
+/* "Fudge factors" - difference between programmed value and actual depth.
+ * Due to pipelined implementation we need to program H/W with a value that
+ * is larger than the hop limit we want.
+ */
+#define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD 3
+#define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL 1
+
+/* Hard maximum search limit. Hardware will time-out beyond 200-something.
+ * We also need to avoid infinite loops in efx_farch_filter_search() when the
+ * table is full.
+ */
+#define EFX_FARCH_FILTER_CTL_SRCH_MAX 200
+
+/* Don't try very hard to find space for performance hints, as this is
+ * counter-productive. */
+#define EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX 5
+
+enum efx_farch_filter_type {
+ EFX_FARCH_FILTER_TCP_FULL = 0,
+ EFX_FARCH_FILTER_TCP_WILD,
+ EFX_FARCH_FILTER_UDP_FULL,
+ EFX_FARCH_FILTER_UDP_WILD,
+ EFX_FARCH_FILTER_MAC_FULL = 4,
+ EFX_FARCH_FILTER_MAC_WILD,
+ EFX_FARCH_FILTER_UC_DEF = 8,
+ EFX_FARCH_FILTER_MC_DEF,
+ EFX_FARCH_FILTER_TYPE_COUNT, /* number of specific types */
+};
+
+enum efx_farch_filter_table_id {
+ EFX_FARCH_FILTER_TABLE_RX_IP = 0,
+ EFX_FARCH_FILTER_TABLE_RX_MAC,
+ EFX_FARCH_FILTER_TABLE_RX_DEF,
+ EFX_FARCH_FILTER_TABLE_TX_MAC,
+ EFX_FARCH_FILTER_TABLE_COUNT,
+};
+
+enum efx_farch_filter_index {
+ EFX_FARCH_FILTER_INDEX_UC_DEF,
+ EFX_FARCH_FILTER_INDEX_MC_DEF,
+ EFX_FARCH_FILTER_SIZE_RX_DEF,
+};
+
+struct efx_farch_filter_spec {
+ u8 type:4;
+ u8 priority:4;
+ u8 flags;
+ u16 dmaq_id;
+ u32 data[3];
+};
+
+struct efx_farch_filter_table {
+ enum efx_farch_filter_table_id id;
+ u32 offset; /* address of table relative to BAR */
+ unsigned size; /* number of entries */
+ unsigned step; /* step between entries */
+ unsigned used; /* number currently used */
+ unsigned long *used_bitmap;
+ struct efx_farch_filter_spec *spec;
+ unsigned search_limit[EFX_FARCH_FILTER_TYPE_COUNT];
+};
+
+struct efx_farch_filter_state {
+ struct efx_farch_filter_table table[EFX_FARCH_FILTER_TABLE_COUNT];
+};
+
+static void
+efx_farch_filter_table_clear_entry(struct efx_nic *efx,
+ struct efx_farch_filter_table *table,
+ unsigned int filter_idx);
+
+/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
+ * key derived from the n-tuple. The initial LFSR state is 0xffff. */
+static u16 efx_farch_filter_hash(u32 key)
+{
+ u16 tmp;
+
+ /* First 16 rounds */
+ tmp = 0x1fff ^ key >> 16;
+ tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
+ tmp = tmp ^ tmp >> 9;
+ /* Last 16 rounds */
+ tmp = tmp ^ tmp << 13 ^ key;
+ tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
+ return tmp ^ tmp >> 9;
+}
+
+/* To allow for hash collisions, filter search continues at these
+ * increments from the first possible entry selected by the hash. */
+static u16 efx_farch_filter_increment(u32 key)
+{
+ return key * 2 - 1;
+}
+
+static enum efx_farch_filter_table_id
+efx_farch_filter_spec_table_id(const struct efx_farch_filter_spec *spec)
+{
+ BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
+ (EFX_FARCH_FILTER_TCP_FULL >> 2));
+ BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
+ (EFX_FARCH_FILTER_TCP_WILD >> 2));
+ BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
+ (EFX_FARCH_FILTER_UDP_FULL >> 2));
+ BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
+ (EFX_FARCH_FILTER_UDP_WILD >> 2));
+ BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC !=
+ (EFX_FARCH_FILTER_MAC_FULL >> 2));
+ BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC !=
+ (EFX_FARCH_FILTER_MAC_WILD >> 2));
+ BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_TX_MAC !=
+ EFX_FARCH_FILTER_TABLE_RX_MAC + 2);
+ return (spec->type >> 2) + ((spec->flags & EFX_FILTER_FLAG_TX) ? 2 : 0);
+}
+
+static void efx_farch_filter_push_rx_config(struct efx_nic *efx)
+{
+ struct efx_farch_filter_state *state = efx->filter_state;
+ struct efx_farch_filter_table *table;
+ efx_oword_t filter_ctl;
+
+ efx_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
+
+ table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
+ EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT,
+ table->search_limit[EFX_FARCH_FILTER_TCP_FULL] +
+ EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
+ EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT,
+ table->search_limit[EFX_FARCH_FILTER_TCP_WILD] +
+ EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
+ EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT,
+ table->search_limit[EFX_FARCH_FILTER_UDP_FULL] +
+ EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
+ EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT,
+ table->search_limit[EFX_FARCH_FILTER_UDP_WILD] +
+ EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
+
+ table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC];
+ if (table->size) {
+ EFX_SET_OWORD_FIELD(
+ filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT,
+ table->search_limit[EFX_FARCH_FILTER_MAC_FULL] +
+ EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
+ EFX_SET_OWORD_FIELD(
+ filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT,
+ table->search_limit[EFX_FARCH_FILTER_MAC_WILD] +
+ EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
+ }
+
+ table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF];
+ if (table->size) {
+ EFX_SET_OWORD_FIELD(
+ filter_ctl, FRF_CZ_UNICAST_NOMATCH_Q_ID,
+ table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].dmaq_id);
+ EFX_SET_OWORD_FIELD(
+ filter_ctl, FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED,
+ !!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags &
+ EFX_FILTER_FLAG_RX_RSS));
+ EFX_SET_OWORD_FIELD(
+ filter_ctl, FRF_CZ_MULTICAST_NOMATCH_Q_ID,
+ table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].dmaq_id);
+ EFX_SET_OWORD_FIELD(
+ filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED,
+ !!(table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags &
+ EFX_FILTER_FLAG_RX_RSS));
+
+ /* There is a single bit to enable RX scatter for all
+ * unmatched packets. Only set it if scatter is
+ * enabled in both filter specs.
+ */
+ EFX_SET_OWORD_FIELD(
+ filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
+ !!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags &
+ table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags &
+ EFX_FILTER_FLAG_RX_SCATTER));
+ } else if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
+ /* We don't expose 'default' filters because unmatched
+ * packets always go to the queue number found in the
+ * RSS table. But we still need to set the RX scatter
+ * bit here.
+ */
+ EFX_SET_OWORD_FIELD(
+ filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
+ efx->rx_scatter);
+ }
+
+ efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
+}
+
+static void efx_farch_filter_push_tx_limits(struct efx_nic *efx)
+{
+ struct efx_farch_filter_state *state = efx->filter_state;
+ struct efx_farch_filter_table *table;
+ efx_oword_t tx_cfg;
+
+ efx_reado(efx, &tx_cfg, FR_AZ_TX_CFG);
+
+ table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC];
+ if (table->size) {
+ EFX_SET_OWORD_FIELD(
+ tx_cfg, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE,
+ table->search_limit[EFX_FARCH_FILTER_MAC_FULL] +
+ EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
+ EFX_SET_OWORD_FIELD(
+ tx_cfg, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE,
+ table->search_limit[EFX_FARCH_FILTER_MAC_WILD] +
+ EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
+ }
+
+ efx_writeo(efx, &tx_cfg, FR_AZ_TX_CFG);
+}
+
+static int
+efx_farch_filter_from_gen_spec(struct efx_farch_filter_spec *spec,
+ const struct efx_filter_spec *gen_spec)
+{
+ bool is_full = false;
+
+ if ((gen_spec->flags & EFX_FILTER_FLAG_RX_RSS) &&
+ gen_spec->rss_context != EFX_FILTER_RSS_CONTEXT_DEFAULT)
+ return -EINVAL;
+
+ spec->priority = gen_spec->priority;
+ spec->flags = gen_spec->flags;
+ spec->dmaq_id = gen_spec->dmaq_id;
+
+ switch (gen_spec->match_flags) {
+ case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
+ EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT):
+ is_full = true;
+ /* fall through */
+ case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT): {
+ __be32 rhost, host1, host2;
+ __be16 rport, port1, port2;
+
+ EFX_BUG_ON_PARANOID(!(gen_spec->flags & EFX_FILTER_FLAG_RX));
+
+ if (gen_spec->ether_type != htons(ETH_P_IP))
+ return -EPROTONOSUPPORT;
+ if (gen_spec->loc_port == 0 ||
+ (is_full && gen_spec->rem_port == 0))
+ return -EADDRNOTAVAIL;
+ switch (gen_spec->ip_proto) {
+ case IPPROTO_TCP:
+ spec->type = (is_full ? EFX_FARCH_FILTER_TCP_FULL :
+ EFX_FARCH_FILTER_TCP_WILD);
+ break;
+ case IPPROTO_UDP:
+ spec->type = (is_full ? EFX_FARCH_FILTER_UDP_FULL :
+ EFX_FARCH_FILTER_UDP_WILD);
+ break;
+ default:
+ return -EPROTONOSUPPORT;
+ }
+
+ /* Filter is constructed in terms of source and destination,
+ * with the odd wrinkle that the ports are swapped in a UDP
+ * wildcard filter. We need to convert from local and remote
+ * (= zero for wildcard) addresses.
+ */
+ rhost = is_full ? gen_spec->rem_host[0] : 0;
+ rport = is_full ? gen_spec->rem_port : 0;
+ host1 = rhost;
+ host2 = gen_spec->loc_host[0];
+ if (!is_full && gen_spec->ip_proto == IPPROTO_UDP) {
+ port1 = gen_spec->loc_port;
+ port2 = rport;
+ } else {
+ port1 = rport;
+ port2 = gen_spec->loc_port;
+ }
+ spec->data[0] = ntohl(host1) << 16 | ntohs(port1);
+ spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16;
+ spec->data[2] = ntohl(host2);
+
+ break;
+ }
+
+ case EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_OUTER_VID:
+ is_full = true;
+ /* fall through */
+ case EFX_FILTER_MATCH_LOC_MAC:
+ spec->type = (is_full ? EFX_FARCH_FILTER_MAC_FULL :
+ EFX_FARCH_FILTER_MAC_WILD);
+ spec->data[0] = is_full ? ntohs(gen_spec->outer_vid) : 0;
+ spec->data[1] = (gen_spec->loc_mac[2] << 24 |
+ gen_spec->loc_mac[3] << 16 |
+ gen_spec->loc_mac[4] << 8 |
+ gen_spec->loc_mac[5]);
+ spec->data[2] = (gen_spec->loc_mac[0] << 8 |
+ gen_spec->loc_mac[1]);
+ break;
+
+ case EFX_FILTER_MATCH_LOC_MAC_IG:
+ spec->type = (is_multicast_ether_addr(gen_spec->loc_mac) ?
+ EFX_FARCH_FILTER_MC_DEF :
+ EFX_FARCH_FILTER_UC_DEF);
+ memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */
+ break;
+
+ default:
+ return -EPROTONOSUPPORT;
+ }
+
+ return 0;
+}
+
+static void
+efx_farch_filter_to_gen_spec(struct efx_filter_spec *gen_spec,
+ const struct efx_farch_filter_spec *spec)
+{
+ bool is_full = false;
+
+ /* *gen_spec should be completely initialised, to be consistent
+ * with efx_filter_init_{rx,tx}() and in case we want to copy
+ * it back to userland.
+ */
+ memset(gen_spec, 0, sizeof(*gen_spec));
+
+ gen_spec->priority = spec->priority;
+ gen_spec->flags = spec->flags;
+ gen_spec->dmaq_id = spec->dmaq_id;
+
+ switch (spec->type) {
+ case EFX_FARCH_FILTER_TCP_FULL:
+ case EFX_FARCH_FILTER_UDP_FULL:
+ is_full = true;
+ /* fall through */
+ case EFX_FARCH_FILTER_TCP_WILD:
+ case EFX_FARCH_FILTER_UDP_WILD: {
+ __be32 host1, host2;
+ __be16 port1, port2;
+
+ gen_spec->match_flags =
+ EFX_FILTER_MATCH_ETHER_TYPE |
+ EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT;
+ if (is_full)
+ gen_spec->match_flags |= (EFX_FILTER_MATCH_REM_HOST |
+ EFX_FILTER_MATCH_REM_PORT);
+ gen_spec->ether_type = htons(ETH_P_IP);
+ gen_spec->ip_proto =
+ (spec->type == EFX_FARCH_FILTER_TCP_FULL ||
+ spec->type == EFX_FARCH_FILTER_TCP_WILD) ?
+ IPPROTO_TCP : IPPROTO_UDP;
+
+ host1 = htonl(spec->data[0] >> 16 | spec->data[1] << 16);
+ port1 = htons(spec->data[0]);
+ host2 = htonl(spec->data[2]);
+ port2 = htons(spec->data[1] >> 16);
+ if (spec->flags & EFX_FILTER_FLAG_TX) {
+ gen_spec->loc_host[0] = host1;
+ gen_spec->rem_host[0] = host2;
+ } else {
+ gen_spec->loc_host[0] = host2;
+ gen_spec->rem_host[0] = host1;
+ }
+ if (!!(gen_spec->flags & EFX_FILTER_FLAG_TX) ^
+ (!is_full && gen_spec->ip_proto == IPPROTO_UDP)) {
+ gen_spec->loc_port = port1;
+ gen_spec->rem_port = port2;
+ } else {
+ gen_spec->loc_port = port2;
+ gen_spec->rem_port = port1;
+ }
+
+ break;
+ }
+
+ case EFX_FARCH_FILTER_MAC_FULL:
+ is_full = true;
+ /* fall through */
+ case EFX_FARCH_FILTER_MAC_WILD:
+ gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC;
+ if (is_full)
+ gen_spec->match_flags |= EFX_FILTER_MATCH_OUTER_VID;
+ gen_spec->loc_mac[0] = spec->data[2] >> 8;
+ gen_spec->loc_mac[1] = spec->data[2];
+ gen_spec->loc_mac[2] = spec->data[1] >> 24;
+ gen_spec->loc_mac[3] = spec->data[1] >> 16;
+ gen_spec->loc_mac[4] = spec->data[1] >> 8;
+ gen_spec->loc_mac[5] = spec->data[1];
+ gen_spec->outer_vid = htons(spec->data[0]);
+ break;
+
+ case EFX_FARCH_FILTER_UC_DEF:
+ case EFX_FARCH_FILTER_MC_DEF:
+ gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC_IG;
+ gen_spec->loc_mac[0] = spec->type == EFX_FARCH_FILTER_MC_DEF;
+ break;
+
+ default:
+ WARN_ON(1);
+ break;
+ }
+}
+
+static void
+efx_farch_filter_init_rx_for_stack(struct efx_nic *efx,
+ struct efx_farch_filter_spec *spec)
+{
+ /* If there's only one channel then disable RSS for non VF
+ * traffic, thereby allowing VFs to use RSS when the PF can't.
+ */
+ spec->priority = EFX_FILTER_PRI_REQUIRED;
+ spec->flags = (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_STACK |
+ (efx->n_rx_channels > 1 ? EFX_FILTER_FLAG_RX_RSS : 0) |
+ (efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0));
+ spec->dmaq_id = 0;
+}
+
+/* Build a filter entry and return its n-tuple key. */
+static u32 efx_farch_filter_build(efx_oword_t *filter,
+ struct efx_farch_filter_spec *spec)
+{
+ u32 data3;
+
+ switch (efx_farch_filter_spec_table_id(spec)) {
+ case EFX_FARCH_FILTER_TABLE_RX_IP: {
+ bool is_udp = (spec->type == EFX_FARCH_FILTER_UDP_FULL ||
+ spec->type == EFX_FARCH_FILTER_UDP_WILD);
+ EFX_POPULATE_OWORD_7(
+ *filter,
+ FRF_BZ_RSS_EN,
+ !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
+ FRF_BZ_SCATTER_EN,
+ !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
+ FRF_BZ_TCP_UDP, is_udp,
+ FRF_BZ_RXQ_ID, spec->dmaq_id,
+ EFX_DWORD_2, spec->data[2],
+ EFX_DWORD_1, spec->data[1],
+ EFX_DWORD_0, spec->data[0]);
+ data3 = is_udp;
+ break;
+ }
+
+ case EFX_FARCH_FILTER_TABLE_RX_MAC: {
+ bool is_wild = spec->type == EFX_FARCH_FILTER_MAC_WILD;
+ EFX_POPULATE_OWORD_7(
+ *filter,
+ FRF_CZ_RMFT_RSS_EN,
+ !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
+ FRF_CZ_RMFT_SCATTER_EN,
+ !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
+ FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id,
+ FRF_CZ_RMFT_WILDCARD_MATCH, is_wild,
+ FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2],
+ FRF_CZ_RMFT_DEST_MAC_LO, spec->data[1],
+ FRF_CZ_RMFT_VLAN_ID, spec->data[0]);
+ data3 = is_wild;
+ break;
+ }
+
+ case EFX_FARCH_FILTER_TABLE_TX_MAC: {
+ bool is_wild = spec->type == EFX_FARCH_FILTER_MAC_WILD;
+ EFX_POPULATE_OWORD_5(*filter,
+ FRF_CZ_TMFT_TXQ_ID, spec->dmaq_id,
+ FRF_CZ_TMFT_WILDCARD_MATCH, is_wild,
+ FRF_CZ_TMFT_SRC_MAC_HI, spec->data[2],
+ FRF_CZ_TMFT_SRC_MAC_LO, spec->data[1],
+ FRF_CZ_TMFT_VLAN_ID, spec->data[0]);
+ data3 = is_wild | spec->dmaq_id << 1;
+ break;
+ }
+
+ default:
+ BUG();
+ }
+
+ return spec->data[0] ^ spec->data[1] ^ spec->data[2] ^ data3;
+}
+
+static bool efx_farch_filter_equal(const struct efx_farch_filter_spec *left,
+ const struct efx_farch_filter_spec *right)
+{
+ if (left->type != right->type ||
+ memcmp(left->data, right->data, sizeof(left->data)))
+ return false;
+
+ if (left->flags & EFX_FILTER_FLAG_TX &&
+ left->dmaq_id != right->dmaq_id)
+ return false;
+
+ return true;
+}
+
+/*
+ * Construct/deconstruct external filter IDs. At least the RX filter
+ * IDs must be ordered by matching priority, for RX NFC semantics.
+ *
+ * Deconstruction needs to be robust against invalid IDs so that
+ * efx_filter_remove_id_safe() and efx_filter_get_filter_safe() can
+ * accept user-provided IDs.
+ */
+
+#define EFX_FARCH_FILTER_MATCH_PRI_COUNT 5
+
+static const u8 efx_farch_filter_type_match_pri[EFX_FARCH_FILTER_TYPE_COUNT] = {
+ [EFX_FARCH_FILTER_TCP_FULL] = 0,
+ [EFX_FARCH_FILTER_UDP_FULL] = 0,
+ [EFX_FARCH_FILTER_TCP_WILD] = 1,
+ [EFX_FARCH_FILTER_UDP_WILD] = 1,
+ [EFX_FARCH_FILTER_MAC_FULL] = 2,
+ [EFX_FARCH_FILTER_MAC_WILD] = 3,
+ [EFX_FARCH_FILTER_UC_DEF] = 4,
+ [EFX_FARCH_FILTER_MC_DEF] = 4,
+};
+
+static const enum efx_farch_filter_table_id efx_farch_filter_range_table[] = {
+ EFX_FARCH_FILTER_TABLE_RX_IP, /* RX match pri 0 */
+ EFX_FARCH_FILTER_TABLE_RX_IP,
+ EFX_FARCH_FILTER_TABLE_RX_MAC,
+ EFX_FARCH_FILTER_TABLE_RX_MAC,
+ EFX_FARCH_FILTER_TABLE_RX_DEF, /* RX match pri 4 */
+ EFX_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 0 */
+ EFX_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 1 */
+};
+
+#define EFX_FARCH_FILTER_INDEX_WIDTH 13
+#define EFX_FARCH_FILTER_INDEX_MASK ((1 << EFX_FARCH_FILTER_INDEX_WIDTH) - 1)
+
+static inline u32
+efx_farch_filter_make_id(const struct efx_farch_filter_spec *spec,
+ unsigned int index)
+{
+ unsigned int range;
+
+ range = efx_farch_filter_type_match_pri[spec->type];
+ if (!(spec->flags & EFX_FILTER_FLAG_RX))
+ range += EFX_FARCH_FILTER_MATCH_PRI_COUNT;
+
+ return range << EFX_FARCH_FILTER_INDEX_WIDTH | index;
+}
+
+static inline enum efx_farch_filter_table_id
+efx_farch_filter_id_table_id(u32 id)
+{
+ unsigned int range = id >> EFX_FARCH_FILTER_INDEX_WIDTH;
+
+ if (range < ARRAY_SIZE(efx_farch_filter_range_table))
+ return efx_farch_filter_range_table[range];
+ else
+ return EFX_FARCH_FILTER_TABLE_COUNT; /* invalid */
+}
+
+static inline unsigned int efx_farch_filter_id_index(u32 id)
+{
+ return id & EFX_FARCH_FILTER_INDEX_MASK;
+}
+
+u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx)
+{
+ struct efx_farch_filter_state *state = efx->filter_state;
+ unsigned int range = EFX_FARCH_FILTER_MATCH_PRI_COUNT - 1;
+ enum efx_farch_filter_table_id table_id;
+
+ do {
+ table_id = efx_farch_filter_range_table[range];
+ if (state->table[table_id].size != 0)
+ return range << EFX_FARCH_FILTER_INDEX_WIDTH |
+ state->table[table_id].size;
+ } while (range--);
+
+ return 0;
+}
+
+s32 efx_farch_filter_insert(struct efx_nic *efx,
+ struct efx_filter_spec *gen_spec,
+ bool replace_equal)
+{
+ struct efx_farch_filter_state *state = efx->filter_state;
+ struct efx_farch_filter_table *table;
+ struct efx_farch_filter_spec spec;
+ efx_oword_t filter;
+ int rep_index, ins_index;
+ unsigned int depth = 0;
+ int rc;
+
+ rc = efx_farch_filter_from_gen_spec(&spec, gen_spec);
+ if (rc)
+ return rc;
+
+ table = &state->table[efx_farch_filter_spec_table_id(&spec)];
+ if (table->size == 0)
+ return -EINVAL;
+
+ netif_vdbg(efx, hw, efx->net_dev,
+ "%s: type %d search_limit=%d", __func__, spec.type,
+ table->search_limit[spec.type]);
+
+ if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) {
+ /* One filter spec per type */
+ BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_UC_DEF != 0);
+ BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_MC_DEF !=
+ EFX_FARCH_FILTER_MC_DEF - EFX_FARCH_FILTER_UC_DEF);
+ rep_index = spec.type - EFX_FARCH_FILTER_UC_DEF;
+ ins_index = rep_index;
+
+ spin_lock_bh(&efx->filter_lock);
+ } else {
+ /* Search concurrently for
+ * (1) a filter to be replaced (rep_index): any filter
+ * with the same match values, up to the current
+ * search depth for this type, and
+ * (2) the insertion point (ins_index): (1) or any
+ * free slot before it or up to the maximum search
+ * depth for this priority
+ * We fail if we cannot find (2).
+ *
+ * We can stop once either
+ * (a) we find (1), in which case we have definitely
+ * found (2) as well; or
+ * (b) we have searched exhaustively for (1), and have
+ * either found (2) or searched exhaustively for it
+ */
+ u32 key = efx_farch_filter_build(&filter, &spec);
+ unsigned int hash = efx_farch_filter_hash(key);
+ unsigned int incr = efx_farch_filter_increment(key);
+ unsigned int max_rep_depth = table->search_limit[spec.type];
+ unsigned int max_ins_depth =
+ spec.priority <= EFX_FILTER_PRI_HINT ?
+ EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX :
+ EFX_FARCH_FILTER_CTL_SRCH_MAX;
+ unsigned int i = hash & (table->size - 1);
+
+ ins_index = -1;
+ depth = 1;
+
+ spin_lock_bh(&efx->filter_lock);
+
+ for (;;) {
+ if (!test_bit(i, table->used_bitmap)) {
+ if (ins_index < 0)
+ ins_index = i;
+ } else if (efx_farch_filter_equal(&spec,
+ &table->spec[i])) {
+ /* Case (a) */
+ if (ins_index < 0)
+ ins_index = i;
+ rep_index = i;
+ break;
+ }
+
+ if (depth >= max_rep_depth &&
+ (ins_index >= 0 || depth >= max_ins_depth)) {
+ /* Case (b) */
+ if (ins_index < 0) {
+ rc = -EBUSY;
+ goto out;
+ }
+ rep_index = -1;
+ break;
+ }
+
+ i = (i + incr) & (table->size - 1);
+ ++depth;
+ }
+ }
+
+ /* If we found a filter to be replaced, check whether we
+ * should do so
+ */
+ if (rep_index >= 0) {
+ struct efx_farch_filter_spec *saved_spec =
+ &table->spec[rep_index];
+
+ if (spec.priority == saved_spec->priority && !replace_equal) {
+ rc = -EEXIST;
+ goto out;
+ }
+ if (spec.priority < saved_spec->priority &&
+ !(saved_spec->priority == EFX_FILTER_PRI_REQUIRED &&
+ saved_spec->flags & EFX_FILTER_FLAG_RX_STACK)) {
+ rc = -EPERM;
+ goto out;
+ }
+ if (spec.flags & EFX_FILTER_FLAG_RX_STACK) {
+ /* Just make sure it won't be removed */
+ saved_spec->flags |= EFX_FILTER_FLAG_RX_STACK;
+ rc = 0;
+ goto out;
+ }
+ /* Retain the RX_STACK flag */
+ spec.flags |= saved_spec->flags & EFX_FILTER_FLAG_RX_STACK;
+ }
+
+ /* Insert the filter */
+ if (ins_index != rep_index) {
+ __set_bit(ins_index, table->used_bitmap);
+ ++table->used;
+ }
+ table->spec[ins_index] = spec;
+
+ if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) {
+ efx_farch_filter_push_rx_config(efx);
+ } else {
+ if (table->search_limit[spec.type] < depth) {
+ table->search_limit[spec.type] = depth;
+ if (spec.flags & EFX_FILTER_FLAG_TX)
+ efx_farch_filter_push_tx_limits(efx);
+ else
+ efx_farch_filter_push_rx_config(efx);
+ }
+
+ efx_writeo(efx, &filter,
+ table->offset + table->step * ins_index);
+
+ /* If we were able to replace a filter by inserting
+ * at a lower depth, clear the replaced filter
+ */
+ if (ins_index != rep_index && rep_index >= 0)
+ efx_farch_filter_table_clear_entry(efx, table,
+ rep_index);
+ }
+
+ netif_vdbg(efx, hw, efx->net_dev,
+ "%s: filter type %d index %d rxq %u set",
+ __func__, spec.type, ins_index, spec.dmaq_id);
+ rc = efx_farch_filter_make_id(&spec, ins_index);
+
+out:
+ spin_unlock_bh(&efx->filter_lock);
+ return rc;
+}
+
+static void
+efx_farch_filter_table_clear_entry(struct efx_nic *efx,
+ struct efx_farch_filter_table *table,
+ unsigned int filter_idx)
+{
+ static efx_oword_t filter;
+
+ EFX_WARN_ON_PARANOID(!test_bit(filter_idx, table->used_bitmap));
+ BUG_ON(table->offset == 0); /* can't clear MAC default filters */
+
+ __clear_bit(filter_idx, table->used_bitmap);
+ --table->used;
+ memset(&table->spec[filter_idx], 0, sizeof(table->spec[0]));
+
+ efx_writeo(efx, &filter, table->offset + table->step * filter_idx);
+
+ /* If this filter required a greater search depth than
+ * any other, the search limit for its type can now be
+ * decreased. However, it is hard to determine that
+ * unless the table has become completely empty - in
+ * which case, all its search limits can be set to 0.
+ */
+ if (unlikely(table->used == 0)) {
+ memset(table->search_limit, 0, sizeof(table->search_limit));
+ if (table->id == EFX_FARCH_FILTER_TABLE_TX_MAC)
+ efx_farch_filter_push_tx_limits(efx);
+ else
+ efx_farch_filter_push_rx_config(efx);
+ }
+}
+
+static int efx_farch_filter_remove(struct efx_nic *efx,
+ struct efx_farch_filter_table *table,
+ unsigned int filter_idx,
+ enum efx_filter_priority priority)
+{
+ struct efx_farch_filter_spec *spec = &table->spec[filter_idx];
+
+ if (!test_bit(filter_idx, table->used_bitmap) ||
+ spec->priority > priority)
+ return -ENOENT;
+
+ if (spec->flags & EFX_FILTER_FLAG_RX_STACK) {
+ efx_farch_filter_init_rx_for_stack(efx, spec);
+ efx_farch_filter_push_rx_config(efx);
+ } else {
+ efx_farch_filter_table_clear_entry(efx, table, filter_idx);
+ }
+
+ return 0;
+}
+
+int efx_farch_filter_remove_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id)
+{
+ struct efx_farch_filter_state *state = efx->filter_state;
+ enum efx_farch_filter_table_id table_id;
+ struct efx_farch_filter_table *table;
+ unsigned int filter_idx;
+ struct efx_farch_filter_spec *spec;
+ int rc;
+
+ table_id = efx_farch_filter_id_table_id(filter_id);
+ if ((unsigned int)table_id >= EFX_FARCH_FILTER_TABLE_COUNT)
+ return -ENOENT;
+ table = &state->table[table_id];
+
+ filter_idx = efx_farch_filter_id_index(filter_id);
+ if (filter_idx >= table->size)
+ return -ENOENT;
+ spec = &table->spec[filter_idx];
+
+ spin_lock_bh(&efx->filter_lock);
+ rc = efx_farch_filter_remove(efx, table, filter_idx, priority);
+ spin_unlock_bh(&efx->filter_lock);
+
+ return rc;
+}
+
+int efx_farch_filter_get_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id, struct efx_filter_spec *spec_buf)
+{
+ struct efx_farch_filter_state *state = efx->filter_state;
+ enum efx_farch_filter_table_id table_id;
+ struct efx_farch_filter_table *table;
+ struct efx_farch_filter_spec *spec;
+ unsigned int filter_idx;
+ int rc;
+
+ table_id = efx_farch_filter_id_table_id(filter_id);
+ if ((unsigned int)table_id >= EFX_FARCH_FILTER_TABLE_COUNT)
+ return -ENOENT;
+ table = &state->table[table_id];
+
+ filter_idx = efx_farch_filter_id_index(filter_id);
+ if (filter_idx >= table->size)
+ return -ENOENT;
+ spec = &table->spec[filter_idx];
+
+ spin_lock_bh(&efx->filter_lock);
+
+ if (test_bit(filter_idx, table->used_bitmap) &&
+ spec->priority == priority) {
+ efx_farch_filter_to_gen_spec(spec_buf, spec);
+ rc = 0;
+ } else {
+ rc = -ENOENT;
+ }
+
+ spin_unlock_bh(&efx->filter_lock);
+
+ return rc;
+}
+
+static void
+efx_farch_filter_table_clear(struct efx_nic *efx,
+ enum efx_farch_filter_table_id table_id,
+ enum efx_filter_priority priority)
+{
+ struct efx_farch_filter_state *state = efx->filter_state;
+ struct efx_farch_filter_table *table = &state->table[table_id];
+ unsigned int filter_idx;
+
+ spin_lock_bh(&efx->filter_lock);
+ for (filter_idx = 0; filter_idx < table->size; ++filter_idx)
+ efx_farch_filter_remove(efx, table, filter_idx, priority);
+ spin_unlock_bh(&efx->filter_lock);
+}
+
+void efx_farch_filter_clear_rx(struct efx_nic *efx,
+ enum efx_filter_priority priority)
+{
+ efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_IP,
+ priority);
+ efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_MAC,
+ priority);
+ efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_DEF,
+ priority);
+}
+
+u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
+ enum efx_filter_priority priority)
+{
+ struct efx_farch_filter_state *state = efx->filter_state;
+ enum efx_farch_filter_table_id table_id;
+ struct efx_farch_filter_table *table;
+ unsigned int filter_idx;
+ u32 count = 0;
+
+ spin_lock_bh(&efx->filter_lock);
+
+ for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP;
+ table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF;
+ table_id++) {
+ table = &state->table[table_id];
+ for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
+ if (test_bit(filter_idx, table->used_bitmap) &&
+ table->spec[filter_idx].priority == priority)
+ ++count;
+ }
+ }
+
+ spin_unlock_bh(&efx->filter_lock);
+
+ return count;
+}
+
+s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 *buf, u32 size)
+{
+ struct efx_farch_filter_state *state = efx->filter_state;
+ enum efx_farch_filter_table_id table_id;
+ struct efx_farch_filter_table *table;
+ unsigned int filter_idx;
+ s32 count = 0;
+
+ spin_lock_bh(&efx->filter_lock);
+
+ for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP;
+ table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF;
+ table_id++) {
+ table = &state->table[table_id];
+ for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
+ if (test_bit(filter_idx, table->used_bitmap) &&
+ table->spec[filter_idx].priority == priority) {
+ if (count == size) {
+ count = -EMSGSIZE;
+ goto out;
+ }
+ buf[count++] = efx_farch_filter_make_id(
+ &table->spec[filter_idx], filter_idx);
+ }
+ }
+ }
+out:
+ spin_unlock_bh(&efx->filter_lock);
+
+ return count;
+}
+
+/* Restore filter stater after reset */
+void efx_farch_filter_table_restore(struct efx_nic *efx)
+{
+ struct efx_farch_filter_state *state = efx->filter_state;
+ enum efx_farch_filter_table_id table_id;
+ struct efx_farch_filter_table *table;
+ efx_oword_t filter;
+ unsigned int filter_idx;
+
+ spin_lock_bh(&efx->filter_lock);
+
+ for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) {
+ table = &state->table[table_id];
+
+ /* Check whether this is a regular register table */
+ if (table->step == 0)
+ continue;
+
+ for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
+ if (!test_bit(filter_idx, table->used_bitmap))
+ continue;
+ efx_farch_filter_build(&filter, &table->spec[filter_idx]);
+ efx_writeo(efx, &filter,
+ table->offset + table->step * filter_idx);
+ }
+ }
+
+ efx_farch_filter_push_rx_config(efx);
+ efx_farch_filter_push_tx_limits(efx);
+
+ spin_unlock_bh(&efx->filter_lock);
+}
+
+void efx_farch_filter_table_remove(struct efx_nic *efx)
+{
+ struct efx_farch_filter_state *state = efx->filter_state;
+ enum efx_farch_filter_table_id table_id;
+
+ for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) {
+ kfree(state->table[table_id].used_bitmap);
+ vfree(state->table[table_id].spec);
+ }
+ kfree(state);
+}
+
+int efx_farch_filter_table_probe(struct efx_nic *efx)
+{
+ struct efx_farch_filter_state *state;
+ struct efx_farch_filter_table *table;
+ unsigned table_id;
+
+ state = kzalloc(sizeof(struct efx_farch_filter_state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+ efx->filter_state = state;
+
+ if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
+ table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
+ table->id = EFX_FARCH_FILTER_TABLE_RX_IP;
+ table->offset = FR_BZ_RX_FILTER_TBL0;
+ table->size = FR_BZ_RX_FILTER_TBL0_ROWS;
+ table->step = FR_BZ_RX_FILTER_TBL0_STEP;
+ }
+
+ if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
+ table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC];
+ table->id = EFX_FARCH_FILTER_TABLE_RX_MAC;
+ table->offset = FR_CZ_RX_MAC_FILTER_TBL0;
+ table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS;
+ table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP;
+
+ table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF];
+ table->id = EFX_FARCH_FILTER_TABLE_RX_DEF;
+ table->size = EFX_FARCH_FILTER_SIZE_RX_DEF;
+
+ table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC];
+ table->id = EFX_FARCH_FILTER_TABLE_TX_MAC;
+ table->offset = FR_CZ_TX_MAC_FILTER_TBL0;
+ table->size = FR_CZ_TX_MAC_FILTER_TBL0_ROWS;
+ table->step = FR_CZ_TX_MAC_FILTER_TBL0_STEP;
+ }
+
+ for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) {
+ table = &state->table[table_id];
+ if (table->size == 0)
+ continue;
+ table->used_bitmap = kcalloc(BITS_TO_LONGS(table->size),
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!table->used_bitmap)
+ goto fail;
+ table->spec = vzalloc(table->size * sizeof(*table->spec));
+ if (!table->spec)
+ goto fail;
+ }
+
+ table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF];
+ if (table->size) {
+ /* RX default filters must always exist */
+ struct efx_farch_filter_spec *spec;
+ unsigned i;
+
+ for (i = 0; i < EFX_FARCH_FILTER_SIZE_RX_DEF; i++) {
+ spec = &table->spec[i];
+ spec->type = EFX_FARCH_FILTER_UC_DEF + i;
+ efx_farch_filter_init_rx_for_stack(efx, spec);
+ __set_bit(i, table->used_bitmap);
+ }
+ }
+
+ efx_farch_filter_push_rx_config(efx);
+
+ return 0;
+
+fail:
+ efx_farch_filter_table_remove(efx);
+ return -ENOMEM;
+}
+
+/* Update scatter enable flags for filters pointing to our own RX queues */
+void efx_farch_filter_update_rx_scatter(struct efx_nic *efx)
+{
+ struct efx_farch_filter_state *state = efx->filter_state;
+ enum efx_farch_filter_table_id table_id;
+ struct efx_farch_filter_table *table;
+ efx_oword_t filter;
+ unsigned int filter_idx;
+
+ spin_lock_bh(&efx->filter_lock);
+
+ for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP;
+ table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF;
+ table_id++) {
+ table = &state->table[table_id];
+
+ for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
+ if (!test_bit(filter_idx, table->used_bitmap) ||
+ table->spec[filter_idx].dmaq_id >=
+ efx->n_rx_channels)
+ continue;
+
+ if (efx->rx_scatter)
+ table->spec[filter_idx].flags |=
+ EFX_FILTER_FLAG_RX_SCATTER;
+ else
+ table->spec[filter_idx].flags &=
+ ~EFX_FILTER_FLAG_RX_SCATTER;
+
+ if (table_id == EFX_FARCH_FILTER_TABLE_RX_DEF)
+ /* Pushed by efx_farch_filter_push_rx_config() */
+ continue;
+
+ efx_farch_filter_build(&filter, &table->spec[filter_idx]);
+ efx_writeo(efx, &filter,
+ table->offset + table->step * filter_idx);
+ }
+ }
+
+ efx_farch_filter_push_rx_config(efx);
+
+ spin_unlock_bh(&efx->filter_lock);
+}
+
+#ifdef CONFIG_RFS_ACCEL
+
+s32 efx_farch_filter_rfs_insert(struct efx_nic *efx,
+ struct efx_filter_spec *gen_spec)
+{
+ return efx_farch_filter_insert(efx, gen_spec, true);
+}
+
+bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
+ unsigned int index)
+{
+ struct efx_farch_filter_state *state = efx->filter_state;
+ struct efx_farch_filter_table *table =
+ &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
+
+ if (test_bit(index, table->used_bitmap) &&
+ table->spec[index].priority == EFX_FILTER_PRI_HINT &&
+ rps_may_expire_flow(efx->net_dev, table->spec[index].dmaq_id,
+ flow_id, index)) {
+ efx_farch_filter_table_clear_entry(efx, table, index);
+ return true;
+ }
+
+ return false;
+}
+
+#endif /* CONFIG_RFS_ACCEL */
+
+void efx_farch_filter_sync_rx_mode(struct efx_nic *efx)
+{
+ struct net_device *net_dev = efx->net_dev;
+ struct netdev_hw_addr *ha;
+ union efx_multicast_hash *mc_hash = &efx->multicast_hash;
+ u32 crc;
+ int bit;
+
+ netif_addr_lock_bh(net_dev);
+
+ efx->unicast_filter = !(net_dev->flags & IFF_PROMISC);
+
+ /* Build multicast hash table */
+ if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
+ memset(mc_hash, 0xff, sizeof(*mc_hash));
+ } else {
+ memset(mc_hash, 0x00, sizeof(*mc_hash));
+ netdev_for_each_mc_addr(ha, net_dev) {
+ crc = ether_crc_le(ETH_ALEN, ha->addr);
+ bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
+ __set_bit_le(bit, mc_hash);
+ }
+
+ /* Broadcast packets go through the multicast hash filter.
+ * ether_crc_le() of the broadcast address is 0xbe2612ff
+ * so we always add bit 0xff to the mask.
+ */
+ __set_bit_le(0xff, mc_hash);
+ }
+
+ netif_addr_unlock_bh(net_dev);
+}
diff --git a/drivers/net/ethernet/sfc/regs.h b/drivers/net/ethernet/sfc/farch_regs.h
index ade4c4dc56ca..7019a712e799 100644
--- a/drivers/net/ethernet/sfc/regs.h
+++ b/drivers/net/ethernet/sfc/farch_regs.h
@@ -1,15 +1,15 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2010 Solarflare Communications Inc.
+ * Copyright 2006-2012 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
-#ifndef EFX_REGS_H
-#define EFX_REGS_H
+#ifndef EFX_FARCH_REGS_H
+#define EFX_FARCH_REGS_H
/*
* Falcon hardware architecture definitions have a name prefix following
@@ -2925,264 +2925,8 @@
#define FSF_AZ_DRV_GEN_EV_MAGIC_LBN 0
#define FSF_AZ_DRV_GEN_EV_MAGIC_WIDTH 32
-/**************************************************************************
- *
- * Falcon MAC stats
- *
- **************************************************************************
- *
- */
-
-#define GRxGoodOct_offset 0x0
-#define GRxGoodOct_WIDTH 48
-#define GRxBadOct_offset 0x8
-#define GRxBadOct_WIDTH 48
-#define GRxMissPkt_offset 0x10
-#define GRxMissPkt_WIDTH 32
-#define GRxFalseCRS_offset 0x14
-#define GRxFalseCRS_WIDTH 32
-#define GRxPausePkt_offset 0x18
-#define GRxPausePkt_WIDTH 32
-#define GRxBadPkt_offset 0x1C
-#define GRxBadPkt_WIDTH 32
-#define GRxUcastPkt_offset 0x20
-#define GRxUcastPkt_WIDTH 32
-#define GRxMcastPkt_offset 0x24
-#define GRxMcastPkt_WIDTH 32
-#define GRxBcastPkt_offset 0x28
-#define GRxBcastPkt_WIDTH 32
-#define GRxGoodLt64Pkt_offset 0x2C
-#define GRxGoodLt64Pkt_WIDTH 32
-#define GRxBadLt64Pkt_offset 0x30
-#define GRxBadLt64Pkt_WIDTH 32
-#define GRx64Pkt_offset 0x34
-#define GRx64Pkt_WIDTH 32
-#define GRx65to127Pkt_offset 0x38
-#define GRx65to127Pkt_WIDTH 32
-#define GRx128to255Pkt_offset 0x3C
-#define GRx128to255Pkt_WIDTH 32
-#define GRx256to511Pkt_offset 0x40
-#define GRx256to511Pkt_WIDTH 32
-#define GRx512to1023Pkt_offset 0x44
-#define GRx512to1023Pkt_WIDTH 32
-#define GRx1024to15xxPkt_offset 0x48
-#define GRx1024to15xxPkt_WIDTH 32
-#define GRx15xxtoJumboPkt_offset 0x4C
-#define GRx15xxtoJumboPkt_WIDTH 32
-#define GRxGtJumboPkt_offset 0x50
-#define GRxGtJumboPkt_WIDTH 32
-#define GRxFcsErr64to15xxPkt_offset 0x54
-#define GRxFcsErr64to15xxPkt_WIDTH 32
-#define GRxFcsErr15xxtoJumboPkt_offset 0x58
-#define GRxFcsErr15xxtoJumboPkt_WIDTH 32
-#define GRxFcsErrGtJumboPkt_offset 0x5C
-#define GRxFcsErrGtJumboPkt_WIDTH 32
-#define GTxGoodBadOct_offset 0x80
-#define GTxGoodBadOct_WIDTH 48
-#define GTxGoodOct_offset 0x88
-#define GTxGoodOct_WIDTH 48
-#define GTxSglColPkt_offset 0x90
-#define GTxSglColPkt_WIDTH 32
-#define GTxMultColPkt_offset 0x94
-#define GTxMultColPkt_WIDTH 32
-#define GTxExColPkt_offset 0x98
-#define GTxExColPkt_WIDTH 32
-#define GTxDefPkt_offset 0x9C
-#define GTxDefPkt_WIDTH 32
-#define GTxLateCol_offset 0xA0
-#define GTxLateCol_WIDTH 32
-#define GTxExDefPkt_offset 0xA4
-#define GTxExDefPkt_WIDTH 32
-#define GTxPausePkt_offset 0xA8
-#define GTxPausePkt_WIDTH 32
-#define GTxBadPkt_offset 0xAC
-#define GTxBadPkt_WIDTH 32
-#define GTxUcastPkt_offset 0xB0
-#define GTxUcastPkt_WIDTH 32
-#define GTxMcastPkt_offset 0xB4
-#define GTxMcastPkt_WIDTH 32
-#define GTxBcastPkt_offset 0xB8
-#define GTxBcastPkt_WIDTH 32
-#define GTxLt64Pkt_offset 0xBC
-#define GTxLt64Pkt_WIDTH 32
-#define GTx64Pkt_offset 0xC0
-#define GTx64Pkt_WIDTH 32
-#define GTx65to127Pkt_offset 0xC4
-#define GTx65to127Pkt_WIDTH 32
-#define GTx128to255Pkt_offset 0xC8
-#define GTx128to255Pkt_WIDTH 32
-#define GTx256to511Pkt_offset 0xCC
-#define GTx256to511Pkt_WIDTH 32
-#define GTx512to1023Pkt_offset 0xD0
-#define GTx512to1023Pkt_WIDTH 32
-#define GTx1024to15xxPkt_offset 0xD4
-#define GTx1024to15xxPkt_WIDTH 32
-#define GTx15xxtoJumboPkt_offset 0xD8
-#define GTx15xxtoJumboPkt_WIDTH 32
-#define GTxGtJumboPkt_offset 0xDC
-#define GTxGtJumboPkt_WIDTH 32
-#define GTxNonTcpUdpPkt_offset 0xE0
-#define GTxNonTcpUdpPkt_WIDTH 16
-#define GTxMacSrcErrPkt_offset 0xE4
-#define GTxMacSrcErrPkt_WIDTH 16
-#define GTxIpSrcErrPkt_offset 0xE8
-#define GTxIpSrcErrPkt_WIDTH 16
-#define GDmaDone_offset 0xEC
-#define GDmaDone_WIDTH 32
-
-#define XgRxOctets_offset 0x0
-#define XgRxOctets_WIDTH 48
-#define XgRxOctetsOK_offset 0x8
-#define XgRxOctetsOK_WIDTH 48
-#define XgRxPkts_offset 0x10
-#define XgRxPkts_WIDTH 32
-#define XgRxPktsOK_offset 0x14
-#define XgRxPktsOK_WIDTH 32
-#define XgRxBroadcastPkts_offset 0x18
-#define XgRxBroadcastPkts_WIDTH 32
-#define XgRxMulticastPkts_offset 0x1C
-#define XgRxMulticastPkts_WIDTH 32
-#define XgRxUnicastPkts_offset 0x20
-#define XgRxUnicastPkts_WIDTH 32
-#define XgRxUndersizePkts_offset 0x24
-#define XgRxUndersizePkts_WIDTH 32
-#define XgRxOversizePkts_offset 0x28
-#define XgRxOversizePkts_WIDTH 32
-#define XgRxJabberPkts_offset 0x2C
-#define XgRxJabberPkts_WIDTH 32
-#define XgRxUndersizeFCSerrorPkts_offset 0x30
-#define XgRxUndersizeFCSerrorPkts_WIDTH 32
-#define XgRxDropEvents_offset 0x34
-#define XgRxDropEvents_WIDTH 32
-#define XgRxFCSerrorPkts_offset 0x38
-#define XgRxFCSerrorPkts_WIDTH 32
-#define XgRxAlignError_offset 0x3C
-#define XgRxAlignError_WIDTH 32
-#define XgRxSymbolError_offset 0x40
-#define XgRxSymbolError_WIDTH 32
-#define XgRxInternalMACError_offset 0x44
-#define XgRxInternalMACError_WIDTH 32
-#define XgRxControlPkts_offset 0x48
-#define XgRxControlPkts_WIDTH 32
-#define XgRxPausePkts_offset 0x4C
-#define XgRxPausePkts_WIDTH 32
-#define XgRxPkts64Octets_offset 0x50
-#define XgRxPkts64Octets_WIDTH 32
-#define XgRxPkts65to127Octets_offset 0x54
-#define XgRxPkts65to127Octets_WIDTH 32
-#define XgRxPkts128to255Octets_offset 0x58
-#define XgRxPkts128to255Octets_WIDTH 32
-#define XgRxPkts256to511Octets_offset 0x5C
-#define XgRxPkts256to511Octets_WIDTH 32
-#define XgRxPkts512to1023Octets_offset 0x60
-#define XgRxPkts512to1023Octets_WIDTH 32
-#define XgRxPkts1024to15xxOctets_offset 0x64
-#define XgRxPkts1024to15xxOctets_WIDTH 32
-#define XgRxPkts15xxtoMaxOctets_offset 0x68
-#define XgRxPkts15xxtoMaxOctets_WIDTH 32
-#define XgRxLengthError_offset 0x6C
-#define XgRxLengthError_WIDTH 32
-#define XgTxPkts_offset 0x80
-#define XgTxPkts_WIDTH 32
-#define XgTxOctets_offset 0x88
-#define XgTxOctets_WIDTH 48
-#define XgTxMulticastPkts_offset 0x90
-#define XgTxMulticastPkts_WIDTH 32
-#define XgTxBroadcastPkts_offset 0x94
-#define XgTxBroadcastPkts_WIDTH 32
-#define XgTxUnicastPkts_offset 0x98
-#define XgTxUnicastPkts_WIDTH 32
-#define XgTxControlPkts_offset 0x9C
-#define XgTxControlPkts_WIDTH 32
-#define XgTxPausePkts_offset 0xA0
-#define XgTxPausePkts_WIDTH 32
-#define XgTxPkts64Octets_offset 0xA4
-#define XgTxPkts64Octets_WIDTH 32
-#define XgTxPkts65to127Octets_offset 0xA8
-#define XgTxPkts65to127Octets_WIDTH 32
-#define XgTxPkts128to255Octets_offset 0xAC
-#define XgTxPkts128to255Octets_WIDTH 32
-#define XgTxPkts256to511Octets_offset 0xB0
-#define XgTxPkts256to511Octets_WIDTH 32
-#define XgTxPkts512to1023Octets_offset 0xB4
-#define XgTxPkts512to1023Octets_WIDTH 32
-#define XgTxPkts1024to15xxOctets_offset 0xB8
-#define XgTxPkts1024to15xxOctets_WIDTH 32
-#define XgTxPkts1519toMaxOctets_offset 0xBC
-#define XgTxPkts1519toMaxOctets_WIDTH 32
-#define XgTxUndersizePkts_offset 0xC0
-#define XgTxUndersizePkts_WIDTH 32
-#define XgTxOversizePkts_offset 0xC4
-#define XgTxOversizePkts_WIDTH 32
-#define XgTxNonTcpUdpPkt_offset 0xC8
-#define XgTxNonTcpUdpPkt_WIDTH 16
-#define XgTxMacSrcErrPkt_offset 0xCC
-#define XgTxMacSrcErrPkt_WIDTH 16
-#define XgTxIpSrcErrPkt_offset 0xD0
-#define XgTxIpSrcErrPkt_WIDTH 16
-#define XgDmaDone_offset 0xD4
-#define XgDmaDone_WIDTH 32
-
-#define FALCON_STATS_NOT_DONE 0x00000000
-#define FALCON_STATS_DONE 0xffffffff
-
-/**************************************************************************
- *
- * Falcon non-volatile configuration
- *
- **************************************************************************
- */
+/* RX packet prefix */
+#define FS_BZ_RX_PREFIX_HASH_OFST 12
+#define FS_BZ_RX_PREFIX_SIZE 16
-/* Board configuration v2 (v1 is obsolete; later versions are compatible) */
-struct falcon_nvconfig_board_v2 {
- __le16 nports;
- u8 port0_phy_addr;
- u8 port0_phy_type;
- u8 port1_phy_addr;
- u8 port1_phy_type;
- __le16 asic_sub_revision;
- __le16 board_revision;
-} __packed;
-
-/* Board configuration v3 extra information */
-struct falcon_nvconfig_board_v3 {
- __le32 spi_device_type[2];
-} __packed;
-
-/* Bit numbers for spi_device_type */
-#define SPI_DEV_TYPE_SIZE_LBN 0
-#define SPI_DEV_TYPE_SIZE_WIDTH 5
-#define SPI_DEV_TYPE_ADDR_LEN_LBN 6
-#define SPI_DEV_TYPE_ADDR_LEN_WIDTH 2
-#define SPI_DEV_TYPE_ERASE_CMD_LBN 8
-#define SPI_DEV_TYPE_ERASE_CMD_WIDTH 8
-#define SPI_DEV_TYPE_ERASE_SIZE_LBN 16
-#define SPI_DEV_TYPE_ERASE_SIZE_WIDTH 5
-#define SPI_DEV_TYPE_BLOCK_SIZE_LBN 24
-#define SPI_DEV_TYPE_BLOCK_SIZE_WIDTH 5
-#define SPI_DEV_TYPE_FIELD(type, field) \
- (((type) >> EFX_LOW_BIT(field)) & EFX_MASK32(EFX_WIDTH(field)))
-
-#define FALCON_NVCONFIG_OFFSET 0x300
-
-#define FALCON_NVCONFIG_BOARD_MAGIC_NUM 0xFA1C
-struct falcon_nvconfig {
- efx_oword_t ee_vpd_cfg_reg; /* 0x300 */
- u8 mac_address[2][8]; /* 0x310 */
- efx_oword_t pcie_sd_ctl0123_reg; /* 0x320 */
- efx_oword_t pcie_sd_ctl45_reg; /* 0x330 */
- efx_oword_t pcie_pcs_ctl_stat_reg; /* 0x340 */
- efx_oword_t hw_init_reg; /* 0x350 */
- efx_oword_t nic_stat_reg; /* 0x360 */
- efx_oword_t glb_ctl_reg; /* 0x370 */
- efx_oword_t srm_cfg_reg; /* 0x380 */
- efx_oword_t spare_reg; /* 0x390 */
- __le16 board_magic_num; /* 0x3A0 */
- __le16 board_struct_ver;
- __le16 board_checksum;
- struct falcon_nvconfig_board_v2 board_v2;
- efx_oword_t ee_base_page_reg; /* 0x3B0 */
- struct falcon_nvconfig_board_v3 board_v3; /* 0x3C0 */
-} __packed;
-
-#endif /* EFX_REGS_H */
+#endif /* EFX_FARCH_REGS_H */
diff --git a/drivers/net/ethernet/sfc/filter.c b/drivers/net/ethernet/sfc/filter.c
deleted file mode 100644
index 30d744235d27..000000000000
--- a/drivers/net/ethernet/sfc/filter.c
+++ /dev/null
@@ -1,1274 +0,0 @@
-/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2005-2010 Solarflare Communications Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation, incorporated herein by reference.
- */
-
-#include <linux/in.h>
-#include <net/ip.h>
-#include "efx.h"
-#include "filter.h"
-#include "io.h"
-#include "nic.h"
-#include "regs.h"
-
-/* "Fudge factors" - difference between programmed value and actual depth.
- * Due to pipelined implementation we need to program H/W with a value that
- * is larger than the hop limit we want.
- */
-#define FILTER_CTL_SRCH_FUDGE_WILD 3
-#define FILTER_CTL_SRCH_FUDGE_FULL 1
-
-/* Hard maximum hop limit. Hardware will time-out beyond 200-something.
- * We also need to avoid infinite loops in efx_filter_search() when the
- * table is full.
- */
-#define FILTER_CTL_SRCH_MAX 200
-
-/* Don't try very hard to find space for performance hints, as this is
- * counter-productive. */
-#define FILTER_CTL_SRCH_HINT_MAX 5
-
-enum efx_filter_table_id {
- EFX_FILTER_TABLE_RX_IP = 0,
- EFX_FILTER_TABLE_RX_MAC,
- EFX_FILTER_TABLE_RX_DEF,
- EFX_FILTER_TABLE_TX_MAC,
- EFX_FILTER_TABLE_COUNT,
-};
-
-enum efx_filter_index {
- EFX_FILTER_INDEX_UC_DEF,
- EFX_FILTER_INDEX_MC_DEF,
- EFX_FILTER_SIZE_RX_DEF,
-};
-
-struct efx_filter_table {
- enum efx_filter_table_id id;
- u32 offset; /* address of table relative to BAR */
- unsigned size; /* number of entries */
- unsigned step; /* step between entries */
- unsigned used; /* number currently used */
- unsigned long *used_bitmap;
- struct efx_filter_spec *spec;
- unsigned search_depth[EFX_FILTER_TYPE_COUNT];
-};
-
-struct efx_filter_state {
- spinlock_t lock;
- struct efx_filter_table table[EFX_FILTER_TABLE_COUNT];
-#ifdef CONFIG_RFS_ACCEL
- u32 *rps_flow_id;
- unsigned rps_expire_index;
-#endif
-};
-
-static void efx_filter_table_clear_entry(struct efx_nic *efx,
- struct efx_filter_table *table,
- unsigned int filter_idx);
-
-/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
- * key derived from the n-tuple. The initial LFSR state is 0xffff. */
-static u16 efx_filter_hash(u32 key)
-{
- u16 tmp;
-
- /* First 16 rounds */
- tmp = 0x1fff ^ key >> 16;
- tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
- tmp = tmp ^ tmp >> 9;
- /* Last 16 rounds */
- tmp = tmp ^ tmp << 13 ^ key;
- tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
- return tmp ^ tmp >> 9;
-}
-
-/* To allow for hash collisions, filter search continues at these
- * increments from the first possible entry selected by the hash. */
-static u16 efx_filter_increment(u32 key)
-{
- return key * 2 - 1;
-}
-
-static enum efx_filter_table_id
-efx_filter_spec_table_id(const struct efx_filter_spec *spec)
-{
- BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_TCP_FULL >> 2));
- BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_TCP_WILD >> 2));
- BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_UDP_FULL >> 2));
- BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_UDP_WILD >> 2));
- BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_MAC_FULL >> 2));
- BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_MAC_WILD >> 2));
- BUILD_BUG_ON(EFX_FILTER_TABLE_TX_MAC != EFX_FILTER_TABLE_RX_MAC + 2);
- EFX_BUG_ON_PARANOID(spec->type == EFX_FILTER_UNSPEC);
- return (spec->type >> 2) + ((spec->flags & EFX_FILTER_FLAG_TX) ? 2 : 0);
-}
-
-static struct efx_filter_table *
-efx_filter_spec_table(struct efx_filter_state *state,
- const struct efx_filter_spec *spec)
-{
- if (spec->type == EFX_FILTER_UNSPEC)
- return NULL;
- else
- return &state->table[efx_filter_spec_table_id(spec)];
-}
-
-static void efx_filter_table_reset_search_depth(struct efx_filter_table *table)
-{
- memset(table->search_depth, 0, sizeof(table->search_depth));
-}
-
-static void efx_filter_push_rx_config(struct efx_nic *efx)
-{
- struct efx_filter_state *state = efx->filter_state;
- struct efx_filter_table *table;
- efx_oword_t filter_ctl;
-
- efx_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
-
- table = &state->table[EFX_FILTER_TABLE_RX_IP];
- EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT,
- table->search_depth[EFX_FILTER_TCP_FULL] +
- FILTER_CTL_SRCH_FUDGE_FULL);
- EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT,
- table->search_depth[EFX_FILTER_TCP_WILD] +
- FILTER_CTL_SRCH_FUDGE_WILD);
- EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT,
- table->search_depth[EFX_FILTER_UDP_FULL] +
- FILTER_CTL_SRCH_FUDGE_FULL);
- EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT,
- table->search_depth[EFX_FILTER_UDP_WILD] +
- FILTER_CTL_SRCH_FUDGE_WILD);
-
- table = &state->table[EFX_FILTER_TABLE_RX_MAC];
- if (table->size) {
- EFX_SET_OWORD_FIELD(
- filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT,
- table->search_depth[EFX_FILTER_MAC_FULL] +
- FILTER_CTL_SRCH_FUDGE_FULL);
- EFX_SET_OWORD_FIELD(
- filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT,
- table->search_depth[EFX_FILTER_MAC_WILD] +
- FILTER_CTL_SRCH_FUDGE_WILD);
- }
-
- table = &state->table[EFX_FILTER_TABLE_RX_DEF];
- if (table->size) {
- EFX_SET_OWORD_FIELD(
- filter_ctl, FRF_CZ_UNICAST_NOMATCH_Q_ID,
- table->spec[EFX_FILTER_INDEX_UC_DEF].dmaq_id);
- EFX_SET_OWORD_FIELD(
- filter_ctl, FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED,
- !!(table->spec[EFX_FILTER_INDEX_UC_DEF].flags &
- EFX_FILTER_FLAG_RX_RSS));
- EFX_SET_OWORD_FIELD(
- filter_ctl, FRF_CZ_MULTICAST_NOMATCH_Q_ID,
- table->spec[EFX_FILTER_INDEX_MC_DEF].dmaq_id);
- EFX_SET_OWORD_FIELD(
- filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED,
- !!(table->spec[EFX_FILTER_INDEX_MC_DEF].flags &
- EFX_FILTER_FLAG_RX_RSS));
-
- /* There is a single bit to enable RX scatter for all
- * unmatched packets. Only set it if scatter is
- * enabled in both filter specs.
- */
- EFX_SET_OWORD_FIELD(
- filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
- !!(table->spec[EFX_FILTER_INDEX_UC_DEF].flags &
- table->spec[EFX_FILTER_INDEX_MC_DEF].flags &
- EFX_FILTER_FLAG_RX_SCATTER));
- } else if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
- /* We don't expose 'default' filters because unmatched
- * packets always go to the queue number found in the
- * RSS table. But we still need to set the RX scatter
- * bit here.
- */
- EFX_SET_OWORD_FIELD(
- filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
- efx->rx_scatter);
- }
-
- efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
-}
-
-static void efx_filter_push_tx_limits(struct efx_nic *efx)
-{
- struct efx_filter_state *state = efx->filter_state;
- struct efx_filter_table *table;
- efx_oword_t tx_cfg;
-
- efx_reado(efx, &tx_cfg, FR_AZ_TX_CFG);
-
- table = &state->table[EFX_FILTER_TABLE_TX_MAC];
- if (table->size) {
- EFX_SET_OWORD_FIELD(
- tx_cfg, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE,
- table->search_depth[EFX_FILTER_MAC_FULL] +
- FILTER_CTL_SRCH_FUDGE_FULL);
- EFX_SET_OWORD_FIELD(
- tx_cfg, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE,
- table->search_depth[EFX_FILTER_MAC_WILD] +
- FILTER_CTL_SRCH_FUDGE_WILD);
- }
-
- efx_writeo(efx, &tx_cfg, FR_AZ_TX_CFG);
-}
-
-static inline void __efx_filter_set_ipv4(struct efx_filter_spec *spec,
- __be32 host1, __be16 port1,
- __be32 host2, __be16 port2)
-{
- spec->data[0] = ntohl(host1) << 16 | ntohs(port1);
- spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16;
- spec->data[2] = ntohl(host2);
-}
-
-static inline void __efx_filter_get_ipv4(const struct efx_filter_spec *spec,
- __be32 *host1, __be16 *port1,
- __be32 *host2, __be16 *port2)
-{
- *host1 = htonl(spec->data[0] >> 16 | spec->data[1] << 16);
- *port1 = htons(spec->data[0]);
- *host2 = htonl(spec->data[2]);
- *port2 = htons(spec->data[1] >> 16);
-}
-
-/**
- * efx_filter_set_ipv4_local - specify IPv4 host, transport protocol and port
- * @spec: Specification to initialise
- * @proto: Transport layer protocol number
- * @host: Local host address (network byte order)
- * @port: Local port (network byte order)
- */
-int efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto,
- __be32 host, __be16 port)
-{
- __be32 host1;
- __be16 port1;
-
- EFX_BUG_ON_PARANOID(!(spec->flags & EFX_FILTER_FLAG_RX));
-
- /* This cannot currently be combined with other filtering */
- if (spec->type != EFX_FILTER_UNSPEC)
- return -EPROTONOSUPPORT;
-
- if (port == 0)
- return -EINVAL;
-
- switch (proto) {
- case IPPROTO_TCP:
- spec->type = EFX_FILTER_TCP_WILD;
- break;
- case IPPROTO_UDP:
- spec->type = EFX_FILTER_UDP_WILD;
- break;
- default:
- return -EPROTONOSUPPORT;
- }
-
- /* Filter is constructed in terms of source and destination,
- * with the odd wrinkle that the ports are swapped in a UDP
- * wildcard filter. We need to convert from local and remote
- * (= zero for wildcard) addresses.
- */
- host1 = 0;
- if (proto != IPPROTO_UDP) {
- port1 = 0;
- } else {
- port1 = port;
- port = 0;
- }
-
- __efx_filter_set_ipv4(spec, host1, port1, host, port);
- return 0;
-}
-
-int efx_filter_get_ipv4_local(const struct efx_filter_spec *spec,
- u8 *proto, __be32 *host, __be16 *port)
-{
- __be32 host1;
- __be16 port1;
-
- switch (spec->type) {
- case EFX_FILTER_TCP_WILD:
- *proto = IPPROTO_TCP;
- __efx_filter_get_ipv4(spec, &host1, &port1, host, port);
- return 0;
- case EFX_FILTER_UDP_WILD:
- *proto = IPPROTO_UDP;
- __efx_filter_get_ipv4(spec, &host1, port, host, &port1);
- return 0;
- default:
- return -EINVAL;
- }
-}
-
-/**
- * efx_filter_set_ipv4_full - specify IPv4 hosts, transport protocol and ports
- * @spec: Specification to initialise
- * @proto: Transport layer protocol number
- * @host: Local host address (network byte order)
- * @port: Local port (network byte order)
- * @rhost: Remote host address (network byte order)
- * @rport: Remote port (network byte order)
- */
-int efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto,
- __be32 host, __be16 port,
- __be32 rhost, __be16 rport)
-{
- EFX_BUG_ON_PARANOID(!(spec->flags & EFX_FILTER_FLAG_RX));
-
- /* This cannot currently be combined with other filtering */
- if (spec->type != EFX_FILTER_UNSPEC)
- return -EPROTONOSUPPORT;
-
- if (port == 0 || rport == 0)
- return -EINVAL;
-
- switch (proto) {
- case IPPROTO_TCP:
- spec->type = EFX_FILTER_TCP_FULL;
- break;
- case IPPROTO_UDP:
- spec->type = EFX_FILTER_UDP_FULL;
- break;
- default:
- return -EPROTONOSUPPORT;
- }
-
- __efx_filter_set_ipv4(spec, rhost, rport, host, port);
- return 0;
-}
-
-int efx_filter_get_ipv4_full(const struct efx_filter_spec *spec,
- u8 *proto, __be32 *host, __be16 *port,
- __be32 *rhost, __be16 *rport)
-{
- switch (spec->type) {
- case EFX_FILTER_TCP_FULL:
- *proto = IPPROTO_TCP;
- break;
- case EFX_FILTER_UDP_FULL:
- *proto = IPPROTO_UDP;
- break;
- default:
- return -EINVAL;
- }
-
- __efx_filter_get_ipv4(spec, rhost, rport, host, port);
- return 0;
-}
-
-/**
- * efx_filter_set_eth_local - specify local Ethernet address and optional VID
- * @spec: Specification to initialise
- * @vid: VLAN ID to match, or %EFX_FILTER_VID_UNSPEC
- * @addr: Local Ethernet MAC address
- */
-int efx_filter_set_eth_local(struct efx_filter_spec *spec,
- u16 vid, const u8 *addr)
-{
- EFX_BUG_ON_PARANOID(!(spec->flags &
- (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)));
-
- /* This cannot currently be combined with other filtering */
- if (spec->type != EFX_FILTER_UNSPEC)
- return -EPROTONOSUPPORT;
-
- if (vid == EFX_FILTER_VID_UNSPEC) {
- spec->type = EFX_FILTER_MAC_WILD;
- spec->data[0] = 0;
- } else {
- spec->type = EFX_FILTER_MAC_FULL;
- spec->data[0] = vid;
- }
-
- spec->data[1] = addr[2] << 24 | addr[3] << 16 | addr[4] << 8 | addr[5];
- spec->data[2] = addr[0] << 8 | addr[1];
- return 0;
-}
-
-/**
- * efx_filter_set_uc_def - specify matching otherwise-unmatched unicast
- * @spec: Specification to initialise
- */
-int efx_filter_set_uc_def(struct efx_filter_spec *spec)
-{
- EFX_BUG_ON_PARANOID(!(spec->flags &
- (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)));
-
- if (spec->type != EFX_FILTER_UNSPEC)
- return -EINVAL;
-
- spec->type = EFX_FILTER_UC_DEF;
- memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */
- return 0;
-}
-
-/**
- * efx_filter_set_mc_def - specify matching otherwise-unmatched multicast
- * @spec: Specification to initialise
- */
-int efx_filter_set_mc_def(struct efx_filter_spec *spec)
-{
- EFX_BUG_ON_PARANOID(!(spec->flags &
- (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)));
-
- if (spec->type != EFX_FILTER_UNSPEC)
- return -EINVAL;
-
- spec->type = EFX_FILTER_MC_DEF;
- memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */
- return 0;
-}
-
-static void efx_filter_reset_rx_def(struct efx_nic *efx, unsigned filter_idx)
-{
- struct efx_filter_state *state = efx->filter_state;
- struct efx_filter_table *table = &state->table[EFX_FILTER_TABLE_RX_DEF];
- struct efx_filter_spec *spec = &table->spec[filter_idx];
- enum efx_filter_flags flags = 0;
-
- /* If there's only one channel then disable RSS for non VF
- * traffic, thereby allowing VFs to use RSS when the PF can't.
- */
- if (efx->n_rx_channels > 1)
- flags |= EFX_FILTER_FLAG_RX_RSS;
-
- if (efx->rx_scatter)
- flags |= EFX_FILTER_FLAG_RX_SCATTER;
-
- efx_filter_init_rx(spec, EFX_FILTER_PRI_MANUAL, flags, 0);
- spec->type = EFX_FILTER_UC_DEF + filter_idx;
- table->used_bitmap[0] |= 1 << filter_idx;
-}
-
-int efx_filter_get_eth_local(const struct efx_filter_spec *spec,
- u16 *vid, u8 *addr)
-{
- switch (spec->type) {
- case EFX_FILTER_MAC_WILD:
- *vid = EFX_FILTER_VID_UNSPEC;
- break;
- case EFX_FILTER_MAC_FULL:
- *vid = spec->data[0];
- break;
- default:
- return -EINVAL;
- }
-
- addr[0] = spec->data[2] >> 8;
- addr[1] = spec->data[2];
- addr[2] = spec->data[1] >> 24;
- addr[3] = spec->data[1] >> 16;
- addr[4] = spec->data[1] >> 8;
- addr[5] = spec->data[1];
- return 0;
-}
-
-/* Build a filter entry and return its n-tuple key. */
-static u32 efx_filter_build(efx_oword_t *filter, struct efx_filter_spec *spec)
-{
- u32 data3;
-
- switch (efx_filter_spec_table_id(spec)) {
- case EFX_FILTER_TABLE_RX_IP: {
- bool is_udp = (spec->type == EFX_FILTER_UDP_FULL ||
- spec->type == EFX_FILTER_UDP_WILD);
- EFX_POPULATE_OWORD_7(
- *filter,
- FRF_BZ_RSS_EN,
- !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
- FRF_BZ_SCATTER_EN,
- !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
- FRF_BZ_TCP_UDP, is_udp,
- FRF_BZ_RXQ_ID, spec->dmaq_id,
- EFX_DWORD_2, spec->data[2],
- EFX_DWORD_1, spec->data[1],
- EFX_DWORD_0, spec->data[0]);
- data3 = is_udp;
- break;
- }
-
- case EFX_FILTER_TABLE_RX_MAC: {
- bool is_wild = spec->type == EFX_FILTER_MAC_WILD;
- EFX_POPULATE_OWORD_7(
- *filter,
- FRF_CZ_RMFT_RSS_EN,
- !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
- FRF_CZ_RMFT_SCATTER_EN,
- !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
- FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id,
- FRF_CZ_RMFT_WILDCARD_MATCH, is_wild,
- FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2],
- FRF_CZ_RMFT_DEST_MAC_LO, spec->data[1],
- FRF_CZ_RMFT_VLAN_ID, spec->data[0]);
- data3 = is_wild;
- break;
- }
-
- case EFX_FILTER_TABLE_TX_MAC: {
- bool is_wild = spec->type == EFX_FILTER_MAC_WILD;
- EFX_POPULATE_OWORD_5(*filter,
- FRF_CZ_TMFT_TXQ_ID, spec->dmaq_id,
- FRF_CZ_TMFT_WILDCARD_MATCH, is_wild,
- FRF_CZ_TMFT_SRC_MAC_HI, spec->data[2],
- FRF_CZ_TMFT_SRC_MAC_LO, spec->data[1],
- FRF_CZ_TMFT_VLAN_ID, spec->data[0]);
- data3 = is_wild | spec->dmaq_id << 1;
- break;
- }
-
- default:
- BUG();
- }
-
- return spec->data[0] ^ spec->data[1] ^ spec->data[2] ^ data3;
-}
-
-static bool efx_filter_equal(const struct efx_filter_spec *left,
- const struct efx_filter_spec *right)
-{
- if (left->type != right->type ||
- memcmp(left->data, right->data, sizeof(left->data)))
- return false;
-
- if (left->flags & EFX_FILTER_FLAG_TX &&
- left->dmaq_id != right->dmaq_id)
- return false;
-
- return true;
-}
-
-/*
- * Construct/deconstruct external filter IDs. At least the RX filter
- * IDs must be ordered by matching priority, for RX NFC semantics.
- *
- * Deconstruction needs to be robust against invalid IDs so that
- * efx_filter_remove_id_safe() and efx_filter_get_filter_safe() can
- * accept user-provided IDs.
- */
-
-#define EFX_FILTER_MATCH_PRI_COUNT 5
-
-static const u8 efx_filter_type_match_pri[EFX_FILTER_TYPE_COUNT] = {
- [EFX_FILTER_TCP_FULL] = 0,
- [EFX_FILTER_UDP_FULL] = 0,
- [EFX_FILTER_TCP_WILD] = 1,
- [EFX_FILTER_UDP_WILD] = 1,
- [EFX_FILTER_MAC_FULL] = 2,
- [EFX_FILTER_MAC_WILD] = 3,
- [EFX_FILTER_UC_DEF] = 4,
- [EFX_FILTER_MC_DEF] = 4,
-};
-
-static const enum efx_filter_table_id efx_filter_range_table[] = {
- EFX_FILTER_TABLE_RX_IP, /* RX match pri 0 */
- EFX_FILTER_TABLE_RX_IP,
- EFX_FILTER_TABLE_RX_MAC,
- EFX_FILTER_TABLE_RX_MAC,
- EFX_FILTER_TABLE_RX_DEF, /* RX match pri 4 */
- EFX_FILTER_TABLE_COUNT, /* TX match pri 0; invalid */
- EFX_FILTER_TABLE_COUNT, /* invalid */
- EFX_FILTER_TABLE_TX_MAC,
- EFX_FILTER_TABLE_TX_MAC, /* TX match pri 3 */
-};
-
-#define EFX_FILTER_INDEX_WIDTH 13
-#define EFX_FILTER_INDEX_MASK ((1 << EFX_FILTER_INDEX_WIDTH) - 1)
-
-static inline u32
-efx_filter_make_id(const struct efx_filter_spec *spec, unsigned int index)
-{
- unsigned int range;
-
- range = efx_filter_type_match_pri[spec->type];
- if (!(spec->flags & EFX_FILTER_FLAG_RX))
- range += EFX_FILTER_MATCH_PRI_COUNT;
-
- return range << EFX_FILTER_INDEX_WIDTH | index;
-}
-
-static inline enum efx_filter_table_id efx_filter_id_table_id(u32 id)
-{
- unsigned int range = id >> EFX_FILTER_INDEX_WIDTH;
-
- if (range < ARRAY_SIZE(efx_filter_range_table))
- return efx_filter_range_table[range];
- else
- return EFX_FILTER_TABLE_COUNT; /* invalid */
-}
-
-static inline unsigned int efx_filter_id_index(u32 id)
-{
- return id & EFX_FILTER_INDEX_MASK;
-}
-
-static inline u8 efx_filter_id_flags(u32 id)
-{
- unsigned int range = id >> EFX_FILTER_INDEX_WIDTH;
-
- if (range < EFX_FILTER_MATCH_PRI_COUNT)
- return EFX_FILTER_FLAG_RX;
- else
- return EFX_FILTER_FLAG_TX;
-}
-
-u32 efx_filter_get_rx_id_limit(struct efx_nic *efx)
-{
- struct efx_filter_state *state = efx->filter_state;
- unsigned int range = EFX_FILTER_MATCH_PRI_COUNT - 1;
- enum efx_filter_table_id table_id;
-
- do {
- table_id = efx_filter_range_table[range];
- if (state->table[table_id].size != 0)
- return range << EFX_FILTER_INDEX_WIDTH |
- state->table[table_id].size;
- } while (range--);
-
- return 0;
-}
-
-/**
- * efx_filter_insert_filter - add or replace a filter
- * @efx: NIC in which to insert the filter
- * @spec: Specification for the filter
- * @replace_equal: Flag for whether the specified filter may replace an
- * existing filter with equal priority
- *
- * On success, return the filter ID.
- * On failure, return a negative error code.
- *
- * If an existing filter has equal match values to the new filter
- * spec, then the new filter might replace it, depending on the
- * relative priorities. If the existing filter has lower priority, or
- * if @replace_equal is set and it has equal priority, then it is
- * replaced. Otherwise the function fails, returning -%EPERM if
- * the existing filter has higher priority or -%EEXIST if it has
- * equal priority.
- */
-s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
- bool replace_equal)
-{
- struct efx_filter_state *state = efx->filter_state;
- struct efx_filter_table *table = efx_filter_spec_table(state, spec);
- efx_oword_t filter;
- int rep_index, ins_index;
- unsigned int depth = 0;
- int rc;
-
- if (!table || table->size == 0)
- return -EINVAL;
-
- netif_vdbg(efx, hw, efx->net_dev,
- "%s: type %d search_depth=%d", __func__, spec->type,
- table->search_depth[spec->type]);
-
- if (table->id == EFX_FILTER_TABLE_RX_DEF) {
- /* One filter spec per type */
- BUILD_BUG_ON(EFX_FILTER_INDEX_UC_DEF != 0);
- BUILD_BUG_ON(EFX_FILTER_INDEX_MC_DEF !=
- EFX_FILTER_MC_DEF - EFX_FILTER_UC_DEF);
- rep_index = spec->type - EFX_FILTER_UC_DEF;
- ins_index = rep_index;
-
- spin_lock_bh(&state->lock);
- } else {
- /* Search concurrently for
- * (1) a filter to be replaced (rep_index): any filter
- * with the same match values, up to the current
- * search depth for this type, and
- * (2) the insertion point (ins_index): (1) or any
- * free slot before it or up to the maximum search
- * depth for this priority
- * We fail if we cannot find (2).
- *
- * We can stop once either
- * (a) we find (1), in which case we have definitely
- * found (2) as well; or
- * (b) we have searched exhaustively for (1), and have
- * either found (2) or searched exhaustively for it
- */
- u32 key = efx_filter_build(&filter, spec);
- unsigned int hash = efx_filter_hash(key);
- unsigned int incr = efx_filter_increment(key);
- unsigned int max_rep_depth = table->search_depth[spec->type];
- unsigned int max_ins_depth =
- spec->priority <= EFX_FILTER_PRI_HINT ?
- FILTER_CTL_SRCH_HINT_MAX : FILTER_CTL_SRCH_MAX;
- unsigned int i = hash & (table->size - 1);
-
- ins_index = -1;
- depth = 1;
-
- spin_lock_bh(&state->lock);
-
- for (;;) {
- if (!test_bit(i, table->used_bitmap)) {
- if (ins_index < 0)
- ins_index = i;
- } else if (efx_filter_equal(spec, &table->spec[i])) {
- /* Case (a) */
- if (ins_index < 0)
- ins_index = i;
- rep_index = i;
- break;
- }
-
- if (depth >= max_rep_depth &&
- (ins_index >= 0 || depth >= max_ins_depth)) {
- /* Case (b) */
- if (ins_index < 0) {
- rc = -EBUSY;
- goto out;
- }
- rep_index = -1;
- break;
- }
-
- i = (i + incr) & (table->size - 1);
- ++depth;
- }
- }
-
- /* If we found a filter to be replaced, check whether we
- * should do so
- */
- if (rep_index >= 0) {
- struct efx_filter_spec *saved_spec = &table->spec[rep_index];
-
- if (spec->priority == saved_spec->priority && !replace_equal) {
- rc = -EEXIST;
- goto out;
- }
- if (spec->priority < saved_spec->priority) {
- rc = -EPERM;
- goto out;
- }
- }
-
- /* Insert the filter */
- if (ins_index != rep_index) {
- __set_bit(ins_index, table->used_bitmap);
- ++table->used;
- }
- table->spec[ins_index] = *spec;
-
- if (table->id == EFX_FILTER_TABLE_RX_DEF) {
- efx_filter_push_rx_config(efx);
- } else {
- if (table->search_depth[spec->type] < depth) {
- table->search_depth[spec->type] = depth;
- if (spec->flags & EFX_FILTER_FLAG_TX)
- efx_filter_push_tx_limits(efx);
- else
- efx_filter_push_rx_config(efx);
- }
-
- efx_writeo(efx, &filter,
- table->offset + table->step * ins_index);
-
- /* If we were able to replace a filter by inserting
- * at a lower depth, clear the replaced filter
- */
- if (ins_index != rep_index && rep_index >= 0)
- efx_filter_table_clear_entry(efx, table, rep_index);
- }
-
- netif_vdbg(efx, hw, efx->net_dev,
- "%s: filter type %d index %d rxq %u set",
- __func__, spec->type, ins_index, spec->dmaq_id);
- rc = efx_filter_make_id(spec, ins_index);
-
-out:
- spin_unlock_bh(&state->lock);
- return rc;
-}
-
-static void efx_filter_table_clear_entry(struct efx_nic *efx,
- struct efx_filter_table *table,
- unsigned int filter_idx)
-{
- static efx_oword_t filter;
-
- if (table->id == EFX_FILTER_TABLE_RX_DEF) {
- /* RX default filters must always exist */
- efx_filter_reset_rx_def(efx, filter_idx);
- efx_filter_push_rx_config(efx);
- } else if (test_bit(filter_idx, table->used_bitmap)) {
- __clear_bit(filter_idx, table->used_bitmap);
- --table->used;
- memset(&table->spec[filter_idx], 0, sizeof(table->spec[0]));
-
- efx_writeo(efx, &filter,
- table->offset + table->step * filter_idx);
- }
-}
-
-/**
- * efx_filter_remove_id_safe - remove a filter by ID, carefully
- * @efx: NIC from which to remove the filter
- * @priority: Priority of filter, as passed to @efx_filter_insert_filter
- * @filter_id: ID of filter, as returned by @efx_filter_insert_filter
- *
- * This function will range-check @filter_id, so it is safe to call
- * with a value passed from userland.
- */
-int efx_filter_remove_id_safe(struct efx_nic *efx,
- enum efx_filter_priority priority,
- u32 filter_id)
-{
- struct efx_filter_state *state = efx->filter_state;
- enum efx_filter_table_id table_id;
- struct efx_filter_table *table;
- unsigned int filter_idx;
- struct efx_filter_spec *spec;
- u8 filter_flags;
- int rc;
-
- table_id = efx_filter_id_table_id(filter_id);
- if ((unsigned int)table_id >= EFX_FILTER_TABLE_COUNT)
- return -ENOENT;
- table = &state->table[table_id];
-
- filter_idx = efx_filter_id_index(filter_id);
- if (filter_idx >= table->size)
- return -ENOENT;
- spec = &table->spec[filter_idx];
-
- filter_flags = efx_filter_id_flags(filter_id);
-
- spin_lock_bh(&state->lock);
-
- if (test_bit(filter_idx, table->used_bitmap) &&
- spec->priority == priority) {
- efx_filter_table_clear_entry(efx, table, filter_idx);
- if (table->used == 0)
- efx_filter_table_reset_search_depth(table);
- rc = 0;
- } else {
- rc = -ENOENT;
- }
-
- spin_unlock_bh(&state->lock);
-
- return rc;
-}
-
-/**
- * efx_filter_get_filter_safe - retrieve a filter by ID, carefully
- * @efx: NIC from which to remove the filter
- * @priority: Priority of filter, as passed to @efx_filter_insert_filter
- * @filter_id: ID of filter, as returned by @efx_filter_insert_filter
- * @spec: Buffer in which to store filter specification
- *
- * This function will range-check @filter_id, so it is safe to call
- * with a value passed from userland.
- */
-int efx_filter_get_filter_safe(struct efx_nic *efx,
- enum efx_filter_priority priority,
- u32 filter_id, struct efx_filter_spec *spec_buf)
-{
- struct efx_filter_state *state = efx->filter_state;
- enum efx_filter_table_id table_id;
- struct efx_filter_table *table;
- struct efx_filter_spec *spec;
- unsigned int filter_idx;
- u8 filter_flags;
- int rc;
-
- table_id = efx_filter_id_table_id(filter_id);
- if ((unsigned int)table_id >= EFX_FILTER_TABLE_COUNT)
- return -ENOENT;
- table = &state->table[table_id];
-
- filter_idx = efx_filter_id_index(filter_id);
- if (filter_idx >= table->size)
- return -ENOENT;
- spec = &table->spec[filter_idx];
-
- filter_flags = efx_filter_id_flags(filter_id);
-
- spin_lock_bh(&state->lock);
-
- if (test_bit(filter_idx, table->used_bitmap) &&
- spec->priority == priority) {
- *spec_buf = *spec;
- rc = 0;
- } else {
- rc = -ENOENT;
- }
-
- spin_unlock_bh(&state->lock);
-
- return rc;
-}
-
-static void efx_filter_table_clear(struct efx_nic *efx,
- enum efx_filter_table_id table_id,
- enum efx_filter_priority priority)
-{
- struct efx_filter_state *state = efx->filter_state;
- struct efx_filter_table *table = &state->table[table_id];
- unsigned int filter_idx;
-
- spin_lock_bh(&state->lock);
-
- for (filter_idx = 0; filter_idx < table->size; ++filter_idx)
- if (table->spec[filter_idx].priority <= priority)
- efx_filter_table_clear_entry(efx, table, filter_idx);
- if (table->used == 0)
- efx_filter_table_reset_search_depth(table);
-
- spin_unlock_bh(&state->lock);
-}
-
-/**
- * efx_filter_clear_rx - remove RX filters by priority
- * @efx: NIC from which to remove the filters
- * @priority: Maximum priority to remove
- */
-void efx_filter_clear_rx(struct efx_nic *efx, enum efx_filter_priority priority)
-{
- efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_IP, priority);
- efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_MAC, priority);
-}
-
-u32 efx_filter_count_rx_used(struct efx_nic *efx,
- enum efx_filter_priority priority)
-{
- struct efx_filter_state *state = efx->filter_state;
- enum efx_filter_table_id table_id;
- struct efx_filter_table *table;
- unsigned int filter_idx;
- u32 count = 0;
-
- spin_lock_bh(&state->lock);
-
- for (table_id = EFX_FILTER_TABLE_RX_IP;
- table_id <= EFX_FILTER_TABLE_RX_DEF;
- table_id++) {
- table = &state->table[table_id];
- for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
- if (test_bit(filter_idx, table->used_bitmap) &&
- table->spec[filter_idx].priority == priority)
- ++count;
- }
- }
-
- spin_unlock_bh(&state->lock);
-
- return count;
-}
-
-s32 efx_filter_get_rx_ids(struct efx_nic *efx,
- enum efx_filter_priority priority,
- u32 *buf, u32 size)
-{
- struct efx_filter_state *state = efx->filter_state;
- enum efx_filter_table_id table_id;
- struct efx_filter_table *table;
- unsigned int filter_idx;
- s32 count = 0;
-
- spin_lock_bh(&state->lock);
-
- for (table_id = EFX_FILTER_TABLE_RX_IP;
- table_id <= EFX_FILTER_TABLE_RX_DEF;
- table_id++) {
- table = &state->table[table_id];
- for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
- if (test_bit(filter_idx, table->used_bitmap) &&
- table->spec[filter_idx].priority == priority) {
- if (count == size) {
- count = -EMSGSIZE;
- goto out;
- }
- buf[count++] = efx_filter_make_id(
- &table->spec[filter_idx], filter_idx);
- }
- }
- }
-out:
- spin_unlock_bh(&state->lock);
-
- return count;
-}
-
-/* Restore filter stater after reset */
-void efx_restore_filters(struct efx_nic *efx)
-{
- struct efx_filter_state *state = efx->filter_state;
- enum efx_filter_table_id table_id;
- struct efx_filter_table *table;
- efx_oword_t filter;
- unsigned int filter_idx;
-
- spin_lock_bh(&state->lock);
-
- for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) {
- table = &state->table[table_id];
-
- /* Check whether this is a regular register table */
- if (table->step == 0)
- continue;
-
- for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
- if (!test_bit(filter_idx, table->used_bitmap))
- continue;
- efx_filter_build(&filter, &table->spec[filter_idx]);
- efx_writeo(efx, &filter,
- table->offset + table->step * filter_idx);
- }
- }
-
- efx_filter_push_rx_config(efx);
- efx_filter_push_tx_limits(efx);
-
- spin_unlock_bh(&state->lock);
-}
-
-int efx_probe_filters(struct efx_nic *efx)
-{
- struct efx_filter_state *state;
- struct efx_filter_table *table;
- unsigned table_id;
-
- state = kzalloc(sizeof(*efx->filter_state), GFP_KERNEL);
- if (!state)
- return -ENOMEM;
- efx->filter_state = state;
-
- spin_lock_init(&state->lock);
-
- if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
-#ifdef CONFIG_RFS_ACCEL
- state->rps_flow_id = kcalloc(FR_BZ_RX_FILTER_TBL0_ROWS,
- sizeof(*state->rps_flow_id),
- GFP_KERNEL);
- if (!state->rps_flow_id)
- goto fail;
-#endif
- table = &state->table[EFX_FILTER_TABLE_RX_IP];
- table->id = EFX_FILTER_TABLE_RX_IP;
- table->offset = FR_BZ_RX_FILTER_TBL0;
- table->size = FR_BZ_RX_FILTER_TBL0_ROWS;
- table->step = FR_BZ_RX_FILTER_TBL0_STEP;
- }
-
- if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
- table = &state->table[EFX_FILTER_TABLE_RX_MAC];
- table->id = EFX_FILTER_TABLE_RX_MAC;
- table->offset = FR_CZ_RX_MAC_FILTER_TBL0;
- table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS;
- table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP;
-
- table = &state->table[EFX_FILTER_TABLE_RX_DEF];
- table->id = EFX_FILTER_TABLE_RX_DEF;
- table->size = EFX_FILTER_SIZE_RX_DEF;
-
- table = &state->table[EFX_FILTER_TABLE_TX_MAC];
- table->id = EFX_FILTER_TABLE_TX_MAC;
- table->offset = FR_CZ_TX_MAC_FILTER_TBL0;
- table->size = FR_CZ_TX_MAC_FILTER_TBL0_ROWS;
- table->step = FR_CZ_TX_MAC_FILTER_TBL0_STEP;
- }
-
- for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) {
- table = &state->table[table_id];
- if (table->size == 0)
- continue;
- table->used_bitmap = kcalloc(BITS_TO_LONGS(table->size),
- sizeof(unsigned long),
- GFP_KERNEL);
- if (!table->used_bitmap)
- goto fail;
- table->spec = vzalloc(table->size * sizeof(*table->spec));
- if (!table->spec)
- goto fail;
- }
-
- if (state->table[EFX_FILTER_TABLE_RX_DEF].size) {
- /* RX default filters must always exist */
- unsigned i;
- for (i = 0; i < EFX_FILTER_SIZE_RX_DEF; i++)
- efx_filter_reset_rx_def(efx, i);
- }
-
- efx_filter_push_rx_config(efx);
-
- return 0;
-
-fail:
- efx_remove_filters(efx);
- return -ENOMEM;
-}
-
-void efx_remove_filters(struct efx_nic *efx)
-{
- struct efx_filter_state *state = efx->filter_state;
- enum efx_filter_table_id table_id;
-
- for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) {
- kfree(state->table[table_id].used_bitmap);
- vfree(state->table[table_id].spec);
- }
-#ifdef CONFIG_RFS_ACCEL
- kfree(state->rps_flow_id);
-#endif
- kfree(state);
-}
-
-/* Update scatter enable flags for filters pointing to our own RX queues */
-void efx_filter_update_rx_scatter(struct efx_nic *efx)
-{
- struct efx_filter_state *state = efx->filter_state;
- enum efx_filter_table_id table_id;
- struct efx_filter_table *table;
- efx_oword_t filter;
- unsigned int filter_idx;
-
- spin_lock_bh(&state->lock);
-
- for (table_id = EFX_FILTER_TABLE_RX_IP;
- table_id <= EFX_FILTER_TABLE_RX_DEF;
- table_id++) {
- table = &state->table[table_id];
-
- for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
- if (!test_bit(filter_idx, table->used_bitmap) ||
- table->spec[filter_idx].dmaq_id >=
- efx->n_rx_channels)
- continue;
-
- if (efx->rx_scatter)
- table->spec[filter_idx].flags |=
- EFX_FILTER_FLAG_RX_SCATTER;
- else
- table->spec[filter_idx].flags &=
- ~EFX_FILTER_FLAG_RX_SCATTER;
-
- if (table_id == EFX_FILTER_TABLE_RX_DEF)
- /* Pushed by efx_filter_push_rx_config() */
- continue;
-
- efx_filter_build(&filter, &table->spec[filter_idx]);
- efx_writeo(efx, &filter,
- table->offset + table->step * filter_idx);
- }
- }
-
- efx_filter_push_rx_config(efx);
-
- spin_unlock_bh(&state->lock);
-}
-
-#ifdef CONFIG_RFS_ACCEL
-
-int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
- u16 rxq_index, u32 flow_id)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
- struct efx_channel *channel;
- struct efx_filter_state *state = efx->filter_state;
- struct efx_filter_spec spec;
- const struct iphdr *ip;
- const __be16 *ports;
- int nhoff;
- int rc;
-
- nhoff = skb_network_offset(skb);
-
- if (skb->protocol == htons(ETH_P_8021Q)) {
- EFX_BUG_ON_PARANOID(skb_headlen(skb) <
- nhoff + sizeof(struct vlan_hdr));
- if (((const struct vlan_hdr *)skb->data + nhoff)->
- h_vlan_encapsulated_proto != htons(ETH_P_IP))
- return -EPROTONOSUPPORT;
-
- /* This is IP over 802.1q VLAN. We can't filter on the
- * IP 5-tuple and the vlan together, so just strip the
- * vlan header and filter on the IP part.
- */
- nhoff += sizeof(struct vlan_hdr);
- } else if (skb->protocol != htons(ETH_P_IP)) {
- return -EPROTONOSUPPORT;
- }
-
- /* RFS must validate the IP header length before calling us */
- EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
- ip = (const struct iphdr *)(skb->data + nhoff);
- if (ip_is_fragment(ip))
- return -EPROTONOSUPPORT;
- EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
- ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
-
- efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT,
- efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
- rxq_index);
- rc = efx_filter_set_ipv4_full(&spec, ip->protocol,
- ip->daddr, ports[1], ip->saddr, ports[0]);
- if (rc)
- return rc;
-
- rc = efx_filter_insert_filter(efx, &spec, true);
- if (rc < 0)
- return rc;
-
- /* Remember this so we can check whether to expire the filter later */
- state->rps_flow_id[rc] = flow_id;
- channel = efx_get_channel(efx, skb_get_rx_queue(skb));
- ++channel->rfs_filters_added;
-
- netif_info(efx, rx_status, efx->net_dev,
- "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
- (ip->protocol == IPPROTO_TCP) ? "TCP" : "UDP",
- &ip->saddr, ntohs(ports[0]), &ip->daddr, ntohs(ports[1]),
- rxq_index, flow_id, rc);
-
- return rc;
-}
-
-bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota)
-{
- struct efx_filter_state *state = efx->filter_state;
- struct efx_filter_table *table = &state->table[EFX_FILTER_TABLE_RX_IP];
- unsigned mask = table->size - 1;
- unsigned index;
- unsigned stop;
-
- if (!spin_trylock_bh(&state->lock))
- return false;
-
- index = state->rps_expire_index;
- stop = (index + quota) & mask;
-
- while (index != stop) {
- if (test_bit(index, table->used_bitmap) &&
- table->spec[index].priority == EFX_FILTER_PRI_HINT &&
- rps_may_expire_flow(efx->net_dev,
- table->spec[index].dmaq_id,
- state->rps_flow_id[index], index)) {
- netif_info(efx, rx_status, efx->net_dev,
- "expiring filter %d [flow %u]\n",
- index, state->rps_flow_id[index]);
- efx_filter_table_clear_entry(efx, table, index);
- }
- index = (index + 1) & mask;
- }
-
- state->rps_expire_index = stop;
- if (table->used == 0)
- efx_filter_table_reset_search_depth(table);
-
- spin_unlock_bh(&state->lock);
- return true;
-}
-
-#endif /* CONFIG_RFS_ACCEL */
diff --git a/drivers/net/ethernet/sfc/filter.h b/drivers/net/ethernet/sfc/filter.h
index 5cb54723b824..63c77a557178 100644
--- a/drivers/net/ethernet/sfc/filter.h
+++ b/drivers/net/ethernet/sfc/filter.h
@@ -1,6 +1,6 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2005-2010 Solarflare Communications Inc.
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -11,32 +11,49 @@
#define EFX_FILTER_H
#include <linux/types.h>
+#include <linux/if_ether.h>
+#include <asm/byteorder.h>
/**
- * enum efx_filter_type - type of hardware filter
- * @EFX_FILTER_TCP_FULL: Matching TCP/IPv4 4-tuple
- * @EFX_FILTER_TCP_WILD: Matching TCP/IPv4 destination (host, port)
- * @EFX_FILTER_UDP_FULL: Matching UDP/IPv4 4-tuple
- * @EFX_FILTER_UDP_WILD: Matching UDP/IPv4 destination (host, port)
- * @EFX_FILTER_MAC_FULL: Matching Ethernet destination MAC address, VID
- * @EFX_FILTER_MAC_WILD: Matching Ethernet destination MAC address
- * @EFX_FILTER_UC_DEF: Matching all otherwise unmatched unicast
- * @EFX_FILTER_MC_DEF: Matching all otherwise unmatched multicast
- * @EFX_FILTER_UNSPEC: Match type is unspecified
+ * enum efx_filter_match_flags - Flags for hardware filter match type
+ * @EFX_FILTER_MATCH_REM_HOST: Match by remote IP host address
+ * @EFX_FILTER_MATCH_LOC_HOST: Match by local IP host address
+ * @EFX_FILTER_MATCH_REM_MAC: Match by remote MAC address
+ * @EFX_FILTER_MATCH_REM_PORT: Match by remote TCP/UDP port
+ * @EFX_FILTER_MATCH_LOC_MAC: Match by local MAC address
+ * @EFX_FILTER_MATCH_LOC_PORT: Match by local TCP/UDP port
+ * @EFX_FILTER_MATCH_ETHER_TYPE: Match by Ether-type
+ * @EFX_FILTER_MATCH_INNER_VID: Match by inner VLAN ID
+ * @EFX_FILTER_MATCH_OUTER_VID: Match by outer VLAN ID
+ * @EFX_FILTER_MATCH_IP_PROTO: Match by IP transport protocol
+ * @EFX_FILTER_MATCH_LOC_MAC_IG: Match by local MAC address I/G bit.
+ * Used for RX default unicast and multicast/broadcast filters.
*
- * Falcon NICs only support the TCP/IPv4 and UDP/IPv4 filter types.
+ * Only some combinations are supported, depending on NIC type:
+ *
+ * - Falcon supports RX filters matching by {TCP,UDP}/IPv4 4-tuple or
+ * local 2-tuple (only implemented for Falcon B0)
+ *
+ * - Siena supports RX and TX filters matching by {TCP,UDP}/IPv4 4-tuple
+ * or local 2-tuple, or local MAC with or without outer VID, and RX
+ * default filters
+ *
+ * - Huntington supports filter matching controlled by firmware, potentially
+ * using {TCP,UDP}/IPv{4,6} 4-tuple or local 2-tuple, local MAC or I/G bit,
+ * with or without outer and inner VID
*/
-enum efx_filter_type {
- EFX_FILTER_TCP_FULL = 0,
- EFX_FILTER_TCP_WILD,
- EFX_FILTER_UDP_FULL,
- EFX_FILTER_UDP_WILD,
- EFX_FILTER_MAC_FULL = 4,
- EFX_FILTER_MAC_WILD,
- EFX_FILTER_UC_DEF = 8,
- EFX_FILTER_MC_DEF,
- EFX_FILTER_TYPE_COUNT, /* number of specific types */
- EFX_FILTER_UNSPEC = 0xf,
+enum efx_filter_match_flags {
+ EFX_FILTER_MATCH_REM_HOST = 0x0001,
+ EFX_FILTER_MATCH_LOC_HOST = 0x0002,
+ EFX_FILTER_MATCH_REM_MAC = 0x0004,
+ EFX_FILTER_MATCH_REM_PORT = 0x0008,
+ EFX_FILTER_MATCH_LOC_MAC = 0x0010,
+ EFX_FILTER_MATCH_LOC_PORT = 0x0020,
+ EFX_FILTER_MATCH_ETHER_TYPE = 0x0040,
+ EFX_FILTER_MATCH_INNER_VID = 0x0080,
+ EFX_FILTER_MATCH_OUTER_VID = 0x0100,
+ EFX_FILTER_MATCH_IP_PROTO = 0x0200,
+ EFX_FILTER_MATCH_LOC_MAC_IG = 0x0400,
};
/**
@@ -61,37 +78,75 @@ enum efx_filter_priority {
* according to the indirection table.
* @EFX_FILTER_FLAG_RX_SCATTER: Enable DMA scatter on the receiving
* queue.
+ * @EFX_FILTER_FLAG_RX_STACK: Indicates a filter inserted for the
+ * network stack. The filter must have a priority of
+ * %EFX_FILTER_PRI_REQUIRED. It can be steered by a replacement
+ * request with priority %EFX_FILTER_PRI_MANUAL, and a removal
+ * request with priority %EFX_FILTER_PRI_MANUAL will reset the
+ * steering (but not remove the filter).
* @EFX_FILTER_FLAG_RX: Filter is for RX
* @EFX_FILTER_FLAG_TX: Filter is for TX
*/
enum efx_filter_flags {
EFX_FILTER_FLAG_RX_RSS = 0x01,
EFX_FILTER_FLAG_RX_SCATTER = 0x02,
+ EFX_FILTER_FLAG_RX_STACK = 0x04,
EFX_FILTER_FLAG_RX = 0x08,
EFX_FILTER_FLAG_TX = 0x10,
};
/**
* struct efx_filter_spec - specification for a hardware filter
- * @type: Type of match to be performed, from &enum efx_filter_type
+ * @match_flags: Match type flags, from &enum efx_filter_match_flags
* @priority: Priority of the filter, from &enum efx_filter_priority
* @flags: Miscellaneous flags, from &enum efx_filter_flags
- * @dmaq_id: Source/target queue index
- * @data: Match data (type-dependent)
+ * @rss_context: RSS context to use, if %EFX_FILTER_FLAG_RX_RSS is set
+ * @dmaq_id: Source/target queue index, or %EFX_FILTER_RX_DMAQ_ID_DROP for
+ * an RX drop filter
+ * @outer_vid: Outer VLAN ID to match, if %EFX_FILTER_MATCH_OUTER_VID is set
+ * @inner_vid: Inner VLAN ID to match, if %EFX_FILTER_MATCH_INNER_VID is set
+ * @loc_mac: Local MAC address to match, if %EFX_FILTER_MATCH_LOC_MAC or
+ * %EFX_FILTER_MATCH_LOC_MAC_IG is set
+ * @rem_mac: Remote MAC address to match, if %EFX_FILTER_MATCH_REM_MAC is set
+ * @ether_type: Ether-type to match, if %EFX_FILTER_MATCH_ETHER_TYPE is set
+ * @ip_proto: IP transport protocol to match, if %EFX_FILTER_MATCH_IP_PROTO
+ * is set
+ * @loc_host: Local IP host to match, if %EFX_FILTER_MATCH_LOC_HOST is set
+ * @rem_host: Remote IP host to match, if %EFX_FILTER_MATCH_REM_HOST is set
+ * @loc_port: Local TCP/UDP port to match, if %EFX_FILTER_MATCH_LOC_PORT is set
+ * @rem_port: Remote TCP/UDP port to match, if %EFX_FILTER_MATCH_REM_PORT is set
*
- * Use the efx_filter_set_*() functions to initialise the @type and
- * @data fields.
+ * The efx_filter_init_rx() or efx_filter_init_tx() function *must* be
+ * used to initialise the structure. The efx_filter_set_*() functions
+ * may then be used to set @rss_context, @match_flags and related
+ * fields.
*
* The @priority field is used by software to determine whether a new
* filter may replace an old one. The hardware priority of a filter
- * depends on the filter type.
+ * depends on which fields are matched.
*/
struct efx_filter_spec {
- u8 type:4;
- u8 priority:4;
- u8 flags;
- u16 dmaq_id;
- u32 data[3];
+ u32 match_flags:12;
+ u32 priority:2;
+ u32 flags:6;
+ u32 dmaq_id:12;
+ u32 rss_context;
+ __be16 outer_vid __aligned(4); /* allow jhash2() of match values */
+ __be16 inner_vid;
+ u8 loc_mac[ETH_ALEN];
+ u8 rem_mac[ETH_ALEN];
+ __be16 ether_type;
+ u8 ip_proto;
+ __be32 loc_host[4];
+ __be32 rem_host[4];
+ __be16 loc_port;
+ __be16 rem_port;
+ /* total 64 bytes */
+};
+
+enum {
+ EFX_FILTER_RSS_CONTEXT_DEFAULT = 0xffffffff,
+ EFX_FILTER_RX_DMAQ_ID_DROP = 0xfff
};
static inline void efx_filter_init_rx(struct efx_filter_spec *spec,
@@ -99,39 +154,116 @@ static inline void efx_filter_init_rx(struct efx_filter_spec *spec,
enum efx_filter_flags flags,
unsigned rxq_id)
{
- spec->type = EFX_FILTER_UNSPEC;
+ memset(spec, 0, sizeof(*spec));
spec->priority = priority;
spec->flags = EFX_FILTER_FLAG_RX | flags;
+ spec->rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT;
spec->dmaq_id = rxq_id;
}
static inline void efx_filter_init_tx(struct efx_filter_spec *spec,
unsigned txq_id)
{
- spec->type = EFX_FILTER_UNSPEC;
+ memset(spec, 0, sizeof(*spec));
spec->priority = EFX_FILTER_PRI_REQUIRED;
spec->flags = EFX_FILTER_FLAG_TX;
spec->dmaq_id = txq_id;
}
-extern int efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto,
- __be32 host, __be16 port);
-extern int efx_filter_get_ipv4_local(const struct efx_filter_spec *spec,
- u8 *proto, __be32 *host, __be16 *port);
-extern int efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto,
- __be32 host, __be16 port,
- __be32 rhost, __be16 rport);
-extern int efx_filter_get_ipv4_full(const struct efx_filter_spec *spec,
- u8 *proto, __be32 *host, __be16 *port,
- __be32 *rhost, __be16 *rport);
-extern int efx_filter_set_eth_local(struct efx_filter_spec *spec,
- u16 vid, const u8 *addr);
-extern int efx_filter_get_eth_local(const struct efx_filter_spec *spec,
- u16 *vid, u8 *addr);
-extern int efx_filter_set_uc_def(struct efx_filter_spec *spec);
-extern int efx_filter_set_mc_def(struct efx_filter_spec *spec);
+/**
+ * efx_filter_set_ipv4_local - specify IPv4 host, transport protocol and port
+ * @spec: Specification to initialise
+ * @proto: Transport layer protocol number
+ * @host: Local host address (network byte order)
+ * @port: Local port (network byte order)
+ */
+static inline int
+efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto,
+ __be32 host, __be16 port)
+{
+ spec->match_flags |=
+ EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT;
+ spec->ether_type = htons(ETH_P_IP);
+ spec->ip_proto = proto;
+ spec->loc_host[0] = host;
+ spec->loc_port = port;
+ return 0;
+}
+
+/**
+ * efx_filter_set_ipv4_full - specify IPv4 hosts, transport protocol and ports
+ * @spec: Specification to initialise
+ * @proto: Transport layer protocol number
+ * @lhost: Local host address (network byte order)
+ * @lport: Local port (network byte order)
+ * @rhost: Remote host address (network byte order)
+ * @rport: Remote port (network byte order)
+ */
+static inline int
+efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto,
+ __be32 lhost, __be16 lport,
+ __be32 rhost, __be16 rport)
+{
+ spec->match_flags |=
+ EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
+ EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
+ spec->ether_type = htons(ETH_P_IP);
+ spec->ip_proto = proto;
+ spec->loc_host[0] = lhost;
+ spec->loc_port = lport;
+ spec->rem_host[0] = rhost;
+ spec->rem_port = rport;
+ return 0;
+}
+
enum {
EFX_FILTER_VID_UNSPEC = 0xffff,
};
+/**
+ * efx_filter_set_eth_local - specify local Ethernet address and/or VID
+ * @spec: Specification to initialise
+ * @vid: Outer VLAN ID to match, or %EFX_FILTER_VID_UNSPEC
+ * @addr: Local Ethernet MAC address, or %NULL
+ */
+static inline int efx_filter_set_eth_local(struct efx_filter_spec *spec,
+ u16 vid, const u8 *addr)
+{
+ if (vid == EFX_FILTER_VID_UNSPEC && addr == NULL)
+ return -EINVAL;
+
+ if (vid != EFX_FILTER_VID_UNSPEC) {
+ spec->match_flags |= EFX_FILTER_MATCH_OUTER_VID;
+ spec->outer_vid = htons(vid);
+ }
+ if (addr != NULL) {
+ spec->match_flags |= EFX_FILTER_MATCH_LOC_MAC;
+ memcpy(spec->loc_mac, addr, ETH_ALEN);
+ }
+ return 0;
+}
+
+/**
+ * efx_filter_set_uc_def - specify matching otherwise-unmatched unicast
+ * @spec: Specification to initialise
+ */
+static inline int efx_filter_set_uc_def(struct efx_filter_spec *spec)
+{
+ spec->match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG;
+ return 0;
+}
+
+/**
+ * efx_filter_set_mc_def - specify matching otherwise-unmatched multicast
+ * @spec: Specification to initialise
+ */
+static inline int efx_filter_set_mc_def(struct efx_filter_spec *spec)
+{
+ spec->match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG;
+ spec->loc_mac[0] = 1;
+ return 0;
+}
+
#endif /* EFX_FILTER_H */
diff --git a/drivers/net/ethernet/sfc/io.h b/drivers/net/ethernet/sfc/io.h
index 96759aee1c6c..96ce507d8602 100644
--- a/drivers/net/ethernet/sfc/io.h
+++ b/drivers/net/ethernet/sfc/io.h
@@ -1,7 +1,7 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2010 Solarflare Communications Inc.
+ * Copyright 2006-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -20,7 +20,7 @@
*
**************************************************************************
*
- * Notes on locking strategy:
+ * Notes on locking strategy for the Falcon architecture:
*
* Many CSRs are very wide and cannot be read or written atomically.
* Writes from the host are buffered by the Bus Interface Unit (BIU)
@@ -54,6 +54,12 @@
* register while the collector already holds values for some other
* register, the write is discarded and the collector maintains its
* current state.
+ *
+ * The EF10 architecture exposes very few registers to the host and
+ * most of them are only 32 bits wide. The only exceptions are the MC
+ * doorbell register pair, which has its own latching, and
+ * TX_DESC_UPD, which works in a similar way to the Falcon
+ * architecture.
*/
#if BITS_PER_LONG == 64
@@ -83,7 +89,7 @@ static inline __le32 _efx_readd(struct efx_nic *efx, unsigned int reg)
}
/* Write a normal 128-bit CSR, locking as appropriate. */
-static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value,
+static inline void efx_writeo(struct efx_nic *efx, const efx_oword_t *value,
unsigned int reg)
{
unsigned long flags __attribute__ ((unused));
@@ -108,7 +114,7 @@ static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value,
/* Write 64-bit SRAM through the supplied mapping, locking as appropriate. */
static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase,
- efx_qword_t *value, unsigned int index)
+ const efx_qword_t *value, unsigned int index)
{
unsigned int addr = index * sizeof(*value);
unsigned long flags __attribute__ ((unused));
@@ -129,7 +135,7 @@ static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase,
}
/* Write a 32-bit CSR or the last dword of a special 128-bit CSR */
-static inline void efx_writed(struct efx_nic *efx, efx_dword_t *value,
+static inline void efx_writed(struct efx_nic *efx, const efx_dword_t *value,
unsigned int reg)
{
netif_vdbg(efx, hw, efx->net_dev,
@@ -190,8 +196,9 @@ static inline void efx_readd(struct efx_nic *efx, efx_dword_t *value,
}
/* Write a 128-bit CSR forming part of a table */
-static inline void efx_writeo_table(struct efx_nic *efx, efx_oword_t *value,
- unsigned int reg, unsigned int index)
+static inline void
+efx_writeo_table(struct efx_nic *efx, const efx_oword_t *value,
+ unsigned int reg, unsigned int index)
{
efx_writeo(efx, value, reg + index * sizeof(efx_oword_t));
}
@@ -203,12 +210,12 @@ static inline void efx_reado_table(struct efx_nic *efx, efx_oword_t *value,
efx_reado(efx, value, reg + index * sizeof(efx_oword_t));
}
-/* Page-mapped register block size */
-#define EFX_PAGE_BLOCK_SIZE 0x2000
+/* Page size used as step between per-VI registers */
+#define EFX_VI_PAGE_SIZE 0x2000
-/* Calculate offset to page-mapped register block */
+/* Calculate offset to page-mapped register */
#define EFX_PAGED_REG(page, reg) \
- ((page) * EFX_PAGE_BLOCK_SIZE + (reg))
+ ((page) * EFX_VI_PAGE_SIZE + (reg))
/* Write the whole of RX_DESC_UPD or TX_DESC_UPD */
static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value,
@@ -236,19 +243,24 @@ static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value,
BUILD_BUG_ON_ZERO((reg) != 0x830 && (reg) != 0xa10), \
page)
-/* Write a page-mapped 32-bit CSR (EVQ_RPTR or the high bits of
- * RX_DESC_UPD or TX_DESC_UPD)
+/* Write a page-mapped 32-bit CSR (EVQ_RPTR, EVQ_TMR (EF10), or the
+ * high bits of RX_DESC_UPD or TX_DESC_UPD)
*/
-static inline void _efx_writed_page(struct efx_nic *efx, efx_dword_t *value,
- unsigned int reg, unsigned int page)
+static inline void
+_efx_writed_page(struct efx_nic *efx, const efx_dword_t *value,
+ unsigned int reg, unsigned int page)
{
efx_writed(efx, value, EFX_PAGED_REG(page, reg));
}
#define efx_writed_page(efx, value, reg, page) \
_efx_writed_page(efx, value, \
reg + \
- BUILD_BUG_ON_ZERO((reg) != 0x400 && (reg) != 0x83c \
- && (reg) != 0xa1c), \
+ BUILD_BUG_ON_ZERO((reg) != 0x400 && \
+ (reg) != 0x420 && \
+ (reg) != 0x830 && \
+ (reg) != 0x83c && \
+ (reg) != 0xa18 && \
+ (reg) != 0xa1c), \
page)
/* Write TIMER_COMMAND. This is a page-mapped 32-bit CSR, but a bug
@@ -256,7 +268,7 @@ static inline void _efx_writed_page(struct efx_nic *efx, efx_dword_t *value,
* collector register.
*/
static inline void _efx_writed_page_locked(struct efx_nic *efx,
- efx_dword_t *value,
+ const efx_dword_t *value,
unsigned int reg,
unsigned int page)
{
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 97dd8f18c001..128d7cdf9eb2 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -1,6 +1,6 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2008-2011 Solarflare Communications Inc.
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2008-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -8,10 +8,11 @@
*/
#include <linux/delay.h>
+#include <asm/cmpxchg.h>
#include "net_driver.h"
#include "nic.h"
#include "io.h"
-#include "regs.h"
+#include "farch_regs.h"
#include "mcdi_pcol.h"
#include "phy.h"
@@ -24,112 +25,235 @@
#define MCDI_RPC_TIMEOUT (10 * HZ)
-#define MCDI_PDU(efx) \
- (efx_port_num(efx) ? MC_SMEM_P1_PDU_OFST : MC_SMEM_P0_PDU_OFST)
-#define MCDI_DOORBELL(efx) \
- (efx_port_num(efx) ? MC_SMEM_P1_DOORBELL_OFST : MC_SMEM_P0_DOORBELL_OFST)
-#define MCDI_STATUS(efx) \
- (efx_port_num(efx) ? MC_SMEM_P1_STATUS_OFST : MC_SMEM_P0_STATUS_OFST)
-
/* A reboot/assertion causes the MCDI status word to be set after the
* command word is set or a REBOOT event is sent. If we notice a reboot
- * via these mechanisms then wait 10ms for the status word to be set. */
+ * via these mechanisms then wait 20ms for the status word to be set.
+ */
#define MCDI_STATUS_DELAY_US 100
-#define MCDI_STATUS_DELAY_COUNT 100
+#define MCDI_STATUS_DELAY_COUNT 200
#define MCDI_STATUS_SLEEP_MS \
(MCDI_STATUS_DELAY_US * MCDI_STATUS_DELAY_COUNT / 1000)
#define SEQ_MASK \
EFX_MASK32(EFX_WIDTH(MCDI_HEADER_SEQ))
+struct efx_mcdi_async_param {
+ struct list_head list;
+ unsigned int cmd;
+ size_t inlen;
+ size_t outlen;
+ efx_mcdi_async_completer *complete;
+ unsigned long cookie;
+ /* followed by request/response buffer */
+};
+
+static void efx_mcdi_timeout_async(unsigned long context);
+static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
+ bool *was_attached_out);
+
static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
{
- struct siena_nic_data *nic_data;
- EFX_BUG_ON_PARANOID(efx_nic_rev(efx) < EFX_REV_SIENA_A0);
- nic_data = efx->nic_data;
- return &nic_data->mcdi;
+ EFX_BUG_ON_PARANOID(!efx->mcdi);
+ return &efx->mcdi->iface;
}
-void efx_mcdi_init(struct efx_nic *efx)
+int efx_mcdi_init(struct efx_nic *efx)
{
struct efx_mcdi_iface *mcdi;
+ bool already_attached;
+ int rc;
- if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
- return;
+ efx->mcdi = kzalloc(sizeof(*efx->mcdi), GFP_KERNEL);
+ if (!efx->mcdi)
+ return -ENOMEM;
mcdi = efx_mcdi(efx);
+ mcdi->efx = efx;
init_waitqueue_head(&mcdi->wq);
spin_lock_init(&mcdi->iface_lock);
- atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT);
+ mcdi->state = MCDI_STATE_QUIESCENT;
mcdi->mode = MCDI_MODE_POLL;
+ spin_lock_init(&mcdi->async_lock);
+ INIT_LIST_HEAD(&mcdi->async_list);
+ setup_timer(&mcdi->async_timer, efx_mcdi_timeout_async,
+ (unsigned long)mcdi);
(void) efx_mcdi_poll_reboot(efx);
+ mcdi->new_epoch = true;
+
+ /* Recover from a failed assertion before probing */
+ rc = efx_mcdi_handle_assertion(efx);
+ if (rc)
+ return rc;
+
+ /* Let the MC (and BMC, if this is a LOM) know that the driver
+ * is loaded. We should do this before we reset the NIC.
+ */
+ rc = efx_mcdi_drv_attach(efx, true, &already_attached);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "Unable to register driver with MCPU\n");
+ return rc;
+ }
+ if (already_attached)
+ /* Not a fatal error */
+ netif_err(efx, probe, efx->net_dev,
+ "Host already registered with MCPU\n");
+
+ return 0;
}
-static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd,
- const u8 *inbuf, size_t inlen)
+void efx_mcdi_fini(struct efx_nic *efx)
+{
+ if (!efx->mcdi)
+ return;
+
+ BUG_ON(efx->mcdi->iface.state != MCDI_STATE_QUIESCENT);
+
+ /* Relinquish the device (back to the BMC, if this is a LOM) */
+ efx_mcdi_drv_attach(efx, false, NULL);
+
+ kfree(efx->mcdi);
+}
+
+static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd,
+ const efx_dword_t *inbuf, size_t inlen)
{
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
- unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
- unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx);
- unsigned int i;
- efx_dword_t hdr;
+ efx_dword_t hdr[2];
+ size_t hdr_len;
u32 xflags, seqno;
- BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT);
- BUG_ON(inlen & 3 || inlen >= MC_SMEM_PDU_LEN);
+ BUG_ON(mcdi->state == MCDI_STATE_QUIESCENT);
+
+ /* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */
+ spin_lock_bh(&mcdi->iface_lock);
+ ++mcdi->seqno;
+ spin_unlock_bh(&mcdi->iface_lock);
seqno = mcdi->seqno & SEQ_MASK;
xflags = 0;
if (mcdi->mode == MCDI_MODE_EVENTS)
xflags |= MCDI_HEADER_XFLAGS_EVREQ;
- EFX_POPULATE_DWORD_6(hdr,
- MCDI_HEADER_RESPONSE, 0,
- MCDI_HEADER_RESYNC, 1,
- MCDI_HEADER_CODE, cmd,
- MCDI_HEADER_DATALEN, inlen,
- MCDI_HEADER_SEQ, seqno,
- MCDI_HEADER_XFLAGS, xflags);
-
- efx_writed(efx, &hdr, pdu);
+ if (efx->type->mcdi_max_ver == 1) {
+ /* MCDI v1 */
+ EFX_POPULATE_DWORD_7(hdr[0],
+ MCDI_HEADER_RESPONSE, 0,
+ MCDI_HEADER_RESYNC, 1,
+ MCDI_HEADER_CODE, cmd,
+ MCDI_HEADER_DATALEN, inlen,
+ MCDI_HEADER_SEQ, seqno,
+ MCDI_HEADER_XFLAGS, xflags,
+ MCDI_HEADER_NOT_EPOCH, !mcdi->new_epoch);
+ hdr_len = 4;
+ } else {
+ /* MCDI v2 */
+ BUG_ON(inlen > MCDI_CTL_SDU_LEN_MAX_V2);
+ EFX_POPULATE_DWORD_7(hdr[0],
+ MCDI_HEADER_RESPONSE, 0,
+ MCDI_HEADER_RESYNC, 1,
+ MCDI_HEADER_CODE, MC_CMD_V2_EXTN,
+ MCDI_HEADER_DATALEN, 0,
+ MCDI_HEADER_SEQ, seqno,
+ MCDI_HEADER_XFLAGS, xflags,
+ MCDI_HEADER_NOT_EPOCH, !mcdi->new_epoch);
+ EFX_POPULATE_DWORD_2(hdr[1],
+ MC_CMD_V2_EXTN_IN_EXTENDED_CMD, cmd,
+ MC_CMD_V2_EXTN_IN_ACTUAL_LEN, inlen);
+ hdr_len = 8;
+ }
- for (i = 0; i < inlen; i += 4)
- _efx_writed(efx, *((__le32 *)(inbuf + i)), pdu + 4 + i);
+ efx->type->mcdi_request(efx, hdr, hdr_len, inbuf, inlen);
- /* Ensure the payload is written out before the header */
- wmb();
+ mcdi->new_epoch = false;
+}
- /* ring the doorbell with a distinctive value */
- _efx_writed(efx, (__force __le32) 0x45789abc, doorbell);
+static int efx_mcdi_errno(unsigned int mcdi_err)
+{
+ switch (mcdi_err) {
+ case 0:
+ return 0;
+#define TRANSLATE_ERROR(name) \
+ case MC_CMD_ERR_ ## name: \
+ return -name;
+ TRANSLATE_ERROR(EPERM);
+ TRANSLATE_ERROR(ENOENT);
+ TRANSLATE_ERROR(EINTR);
+ TRANSLATE_ERROR(EAGAIN);
+ TRANSLATE_ERROR(EACCES);
+ TRANSLATE_ERROR(EBUSY);
+ TRANSLATE_ERROR(EINVAL);
+ TRANSLATE_ERROR(EDEADLK);
+ TRANSLATE_ERROR(ENOSYS);
+ TRANSLATE_ERROR(ETIME);
+ TRANSLATE_ERROR(EALREADY);
+ TRANSLATE_ERROR(ENOSPC);
+#undef TRANSLATE_ERROR
+ case MC_CMD_ERR_ALLOC_FAIL:
+ return -ENOBUFS;
+ case MC_CMD_ERR_MAC_EXIST:
+ return -EADDRINUSE;
+ default:
+ return -EPROTO;
+ }
}
-static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen)
+static void efx_mcdi_read_response_header(struct efx_nic *efx)
{
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
- unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
- int i;
+ unsigned int respseq, respcmd, error;
+ efx_dword_t hdr;
+
+ efx->type->mcdi_read_response(efx, &hdr, 0, 4);
+ respseq = EFX_DWORD_FIELD(hdr, MCDI_HEADER_SEQ);
+ respcmd = EFX_DWORD_FIELD(hdr, MCDI_HEADER_CODE);
+ error = EFX_DWORD_FIELD(hdr, MCDI_HEADER_ERROR);
- BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT);
- BUG_ON(outlen & 3 || outlen >= MC_SMEM_PDU_LEN);
+ if (respcmd != MC_CMD_V2_EXTN) {
+ mcdi->resp_hdr_len = 4;
+ mcdi->resp_data_len = EFX_DWORD_FIELD(hdr, MCDI_HEADER_DATALEN);
+ } else {
+ efx->type->mcdi_read_response(efx, &hdr, 4, 4);
+ mcdi->resp_hdr_len = 8;
+ mcdi->resp_data_len =
+ EFX_DWORD_FIELD(hdr, MC_CMD_V2_EXTN_IN_ACTUAL_LEN);
+ }
- for (i = 0; i < outlen; i += 4)
- *((__le32 *)(outbuf + i)) = _efx_readd(efx, pdu + 4 + i);
+ if (error && mcdi->resp_data_len == 0) {
+ netif_err(efx, hw, efx->net_dev, "MC rebooted\n");
+ mcdi->resprc = -EIO;
+ } else if ((respseq ^ mcdi->seqno) & SEQ_MASK) {
+ netif_err(efx, hw, efx->net_dev,
+ "MC response mismatch tx seq 0x%x rx seq 0x%x\n",
+ respseq, mcdi->seqno);
+ mcdi->resprc = -EIO;
+ } else if (error) {
+ efx->type->mcdi_read_response(efx, &hdr, mcdi->resp_hdr_len, 4);
+ mcdi->resprc =
+ efx_mcdi_errno(EFX_DWORD_FIELD(hdr, EFX_DWORD_0));
+ } else {
+ mcdi->resprc = 0;
+ }
}
static int efx_mcdi_poll(struct efx_nic *efx)
{
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
unsigned long time, finish;
- unsigned int respseq, respcmd, error;
- unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
- unsigned int rc, spins;
- efx_dword_t reg;
+ unsigned int spins;
+ int rc;
/* Check for a reboot atomically with respect to efx_mcdi_copyout() */
- rc = -efx_mcdi_poll_reboot(efx);
- if (rc)
- goto out;
+ rc = efx_mcdi_poll_reboot(efx);
+ if (rc) {
+ spin_lock_bh(&mcdi->iface_lock);
+ mcdi->resprc = rc;
+ mcdi->resp_hdr_len = 0;
+ mcdi->resp_data_len = 0;
+ spin_unlock_bh(&mcdi->iface_lock);
+ return 0;
+ }
/* Poll for completion. Poll quickly (once a us) for the 1st jiffy,
* because generally mcdi responses are fast. After that, back off
@@ -149,59 +273,16 @@ static int efx_mcdi_poll(struct efx_nic *efx)
time = jiffies;
rmb();
- efx_readd(efx, &reg, pdu);
-
- /* All 1's indicates that shared memory is in reset (and is
- * not a valid header). Wait for it to come out reset before
- * completing the command */
- if (EFX_DWORD_FIELD(reg, EFX_DWORD_0) != 0xffffffff &&
- EFX_DWORD_FIELD(reg, MCDI_HEADER_RESPONSE))
+ if (efx->type->mcdi_poll_response(efx))
break;
if (time_after(time, finish))
return -ETIMEDOUT;
}
- mcdi->resplen = EFX_DWORD_FIELD(reg, MCDI_HEADER_DATALEN);
- respseq = EFX_DWORD_FIELD(reg, MCDI_HEADER_SEQ);
- respcmd = EFX_DWORD_FIELD(reg, MCDI_HEADER_CODE);
- error = EFX_DWORD_FIELD(reg, MCDI_HEADER_ERROR);
-
- if (error && mcdi->resplen == 0) {
- netif_err(efx, hw, efx->net_dev, "MC rebooted\n");
- rc = EIO;
- } else if ((respseq ^ mcdi->seqno) & SEQ_MASK) {
- netif_err(efx, hw, efx->net_dev,
- "MC response mismatch tx seq 0x%x rx seq 0x%x\n",
- respseq, mcdi->seqno);
- rc = EIO;
- } else if (error) {
- efx_readd(efx, &reg, pdu + 4);
- switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) {
-#define TRANSLATE_ERROR(name) \
- case MC_CMD_ERR_ ## name: \
- rc = name; \
- break
- TRANSLATE_ERROR(ENOENT);
- TRANSLATE_ERROR(EINTR);
- TRANSLATE_ERROR(EACCES);
- TRANSLATE_ERROR(EBUSY);
- TRANSLATE_ERROR(EINVAL);
- TRANSLATE_ERROR(EDEADLK);
- TRANSLATE_ERROR(ENOSYS);
- TRANSLATE_ERROR(ETIME);
-#undef TRANSLATE_ERROR
- default:
- rc = EIO;
- break;
- }
- } else
- rc = 0;
-
-out:
- mcdi->resprc = rc;
- if (rc)
- mcdi->resplen = 0;
+ spin_lock_bh(&mcdi->iface_lock);
+ efx_mcdi_read_response_header(efx);
+ spin_unlock_bh(&mcdi->iface_lock);
/* Return rc=0 like wait_event_timeout() */
return 0;
@@ -212,52 +293,36 @@ out:
*/
int efx_mcdi_poll_reboot(struct efx_nic *efx)
{
- unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_STATUS(efx);
- efx_dword_t reg;
- uint32_t value;
-
- if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
- return false;
-
- efx_readd(efx, &reg, addr);
- value = EFX_DWORD_FIELD(reg, EFX_DWORD_0);
-
- if (value == 0)
+ if (!efx->mcdi)
return 0;
- /* MAC statistics have been cleared on the NIC; clear our copy
- * so that efx_update_diff_stat() can continue to work.
- */
- memset(&efx->mac_stats, 0, sizeof(efx->mac_stats));
-
- EFX_ZERO_DWORD(reg);
- efx_writed(efx, &reg, addr);
+ return efx->type->mcdi_poll_reboot(efx);
+}
- if (value == MC_STATUS_DWORD_ASSERT)
- return -EINTR;
- else
- return -EIO;
+static bool efx_mcdi_acquire_async(struct efx_mcdi_iface *mcdi)
+{
+ return cmpxchg(&mcdi->state,
+ MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING_ASYNC) ==
+ MCDI_STATE_QUIESCENT;
}
-static void efx_mcdi_acquire(struct efx_mcdi_iface *mcdi)
+static void efx_mcdi_acquire_sync(struct efx_mcdi_iface *mcdi)
{
/* Wait until the interface becomes QUIESCENT and we win the race
- * to mark it RUNNING. */
+ * to mark it RUNNING_SYNC.
+ */
wait_event(mcdi->wq,
- atomic_cmpxchg(&mcdi->state,
- MCDI_STATE_QUIESCENT,
- MCDI_STATE_RUNNING)
- == MCDI_STATE_QUIESCENT);
+ cmpxchg(&mcdi->state,
+ MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING_SYNC) ==
+ MCDI_STATE_QUIESCENT);
}
static int efx_mcdi_await_completion(struct efx_nic *efx)
{
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
- if (wait_event_timeout(
- mcdi->wq,
- atomic_read(&mcdi->state) == MCDI_STATE_COMPLETED,
- MCDI_RPC_TIMEOUT) == 0)
+ if (wait_event_timeout(mcdi->wq, mcdi->state == MCDI_STATE_COMPLETED,
+ MCDI_RPC_TIMEOUT) == 0)
return -ETIMEDOUT;
/* Check if efx_mcdi_set_mode() switched us back to polled completions.
@@ -274,17 +339,14 @@ static int efx_mcdi_await_completion(struct efx_nic *efx)
return 0;
}
-static bool efx_mcdi_complete(struct efx_mcdi_iface *mcdi)
+/* If the interface is RUNNING_SYNC, switch to COMPLETED and wake the
+ * requester. Return whether this was done. Does not take any locks.
+ */
+static bool efx_mcdi_complete_sync(struct efx_mcdi_iface *mcdi)
{
- /* If the interface is RUNNING, then move to COMPLETED and wake any
- * waiters. If the interface isn't in RUNNING then we've received a
- * duplicate completion after we've already transitioned back to
- * QUIESCENT. [A subsequent invocation would increment seqno, so would
- * have failed the seqno check].
- */
- if (atomic_cmpxchg(&mcdi->state,
- MCDI_STATE_RUNNING,
- MCDI_STATE_COMPLETED) == MCDI_STATE_RUNNING) {
+ if (cmpxchg(&mcdi->state,
+ MCDI_STATE_RUNNING_SYNC, MCDI_STATE_COMPLETED) ==
+ MCDI_STATE_RUNNING_SYNC) {
wake_up(&mcdi->wq);
return true;
}
@@ -294,12 +356,93 @@ static bool efx_mcdi_complete(struct efx_mcdi_iface *mcdi)
static void efx_mcdi_release(struct efx_mcdi_iface *mcdi)
{
- atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT);
+ if (mcdi->mode == MCDI_MODE_EVENTS) {
+ struct efx_mcdi_async_param *async;
+ struct efx_nic *efx = mcdi->efx;
+
+ /* Process the asynchronous request queue */
+ spin_lock_bh(&mcdi->async_lock);
+ async = list_first_entry_or_null(
+ &mcdi->async_list, struct efx_mcdi_async_param, list);
+ if (async) {
+ mcdi->state = MCDI_STATE_RUNNING_ASYNC;
+ efx_mcdi_send_request(efx, async->cmd,
+ (const efx_dword_t *)(async + 1),
+ async->inlen);
+ mod_timer(&mcdi->async_timer,
+ jiffies + MCDI_RPC_TIMEOUT);
+ }
+ spin_unlock_bh(&mcdi->async_lock);
+
+ if (async)
+ return;
+ }
+
+ mcdi->state = MCDI_STATE_QUIESCENT;
wake_up(&mcdi->wq);
}
+/* If the interface is RUNNING_ASYNC, switch to COMPLETED, call the
+ * asynchronous completion function, and release the interface.
+ * Return whether this was done. Must be called in bh-disabled
+ * context. Will take iface_lock and async_lock.
+ */
+static bool efx_mcdi_complete_async(struct efx_mcdi_iface *mcdi, bool timeout)
+{
+ struct efx_nic *efx = mcdi->efx;
+ struct efx_mcdi_async_param *async;
+ size_t hdr_len, data_len;
+ efx_dword_t *outbuf;
+ int rc;
+
+ if (cmpxchg(&mcdi->state,
+ MCDI_STATE_RUNNING_ASYNC, MCDI_STATE_COMPLETED) !=
+ MCDI_STATE_RUNNING_ASYNC)
+ return false;
+
+ spin_lock(&mcdi->iface_lock);
+ if (timeout) {
+ /* Ensure that if the completion event arrives later,
+ * the seqno check in efx_mcdi_ev_cpl() will fail
+ */
+ ++mcdi->seqno;
+ ++mcdi->credits;
+ rc = -ETIMEDOUT;
+ hdr_len = 0;
+ data_len = 0;
+ } else {
+ rc = mcdi->resprc;
+ hdr_len = mcdi->resp_hdr_len;
+ data_len = mcdi->resp_data_len;
+ }
+ spin_unlock(&mcdi->iface_lock);
+
+ /* Stop the timer. In case the timer function is running, we
+ * must wait for it to return so that there is no possibility
+ * of it aborting the next request.
+ */
+ if (!timeout)
+ del_timer_sync(&mcdi->async_timer);
+
+ spin_lock(&mcdi->async_lock);
+ async = list_first_entry(&mcdi->async_list,
+ struct efx_mcdi_async_param, list);
+ list_del(&async->list);
+ spin_unlock(&mcdi->async_lock);
+
+ outbuf = (efx_dword_t *)(async + 1);
+ efx->type->mcdi_read_response(efx, outbuf, hdr_len,
+ min(async->outlen, data_len));
+ async->complete(efx, async->cookie, rc, outbuf, data_len);
+ kfree(async);
+
+ efx_mcdi_release(mcdi);
+
+ return true;
+}
+
static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno,
- unsigned int datalen, unsigned int errno)
+ unsigned int datalen, unsigned int mcdi_err)
{
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
bool wake = false;
@@ -315,52 +458,161 @@ static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno,
"MC response mismatch tx seq 0x%x rx "
"seq 0x%x\n", seqno, mcdi->seqno);
} else {
- mcdi->resprc = errno;
- mcdi->resplen = datalen;
+ if (efx->type->mcdi_max_ver >= 2) {
+ /* MCDI v2 responses don't fit in an event */
+ efx_mcdi_read_response_header(efx);
+ } else {
+ mcdi->resprc = efx_mcdi_errno(mcdi_err);
+ mcdi->resp_hdr_len = 4;
+ mcdi->resp_data_len = datalen;
+ }
wake = true;
}
spin_unlock(&mcdi->iface_lock);
- if (wake)
- efx_mcdi_complete(mcdi);
+ if (wake) {
+ if (!efx_mcdi_complete_async(mcdi, false))
+ (void) efx_mcdi_complete_sync(mcdi);
+
+ /* If the interface isn't RUNNING_ASYNC or
+ * RUNNING_SYNC then we've received a duplicate
+ * completion after we've already transitioned back to
+ * QUIESCENT. [A subsequent invocation would increment
+ * seqno, so would have failed the seqno check].
+ */
+ }
+}
+
+static void efx_mcdi_timeout_async(unsigned long context)
+{
+ struct efx_mcdi_iface *mcdi = (struct efx_mcdi_iface *)context;
+
+ efx_mcdi_complete_async(mcdi, true);
+}
+
+static int
+efx_mcdi_check_supported(struct efx_nic *efx, unsigned int cmd, size_t inlen)
+{
+ if (efx->type->mcdi_max_ver < 0 ||
+ (efx->type->mcdi_max_ver < 2 &&
+ cmd > MC_CMD_CMD_SPACE_ESCAPE_7))
+ return -EINVAL;
+
+ if (inlen > MCDI_CTL_SDU_LEN_MAX_V2 ||
+ (efx->type->mcdi_max_ver < 2 &&
+ inlen > MCDI_CTL_SDU_LEN_MAX_V1))
+ return -EMSGSIZE;
+
+ return 0;
}
int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
- const u8 *inbuf, size_t inlen, u8 *outbuf, size_t outlen,
+ const efx_dword_t *inbuf, size_t inlen,
+ efx_dword_t *outbuf, size_t outlen,
size_t *outlen_actual)
{
- efx_mcdi_rpc_start(efx, cmd, inbuf, inlen);
+ int rc;
+
+ rc = efx_mcdi_rpc_start(efx, cmd, inbuf, inlen);
+ if (rc)
+ return rc;
return efx_mcdi_rpc_finish(efx, cmd, inlen,
outbuf, outlen, outlen_actual);
}
-void efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd, const u8 *inbuf,
- size_t inlen)
+int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
+ const efx_dword_t *inbuf, size_t inlen)
{
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+ int rc;
- BUG_ON(efx_nic_rev(efx) < EFX_REV_SIENA_A0);
+ rc = efx_mcdi_check_supported(efx, cmd, inlen);
+ if (rc)
+ return rc;
- efx_mcdi_acquire(mcdi);
+ efx_mcdi_acquire_sync(mcdi);
+ efx_mcdi_send_request(efx, cmd, inbuf, inlen);
+ return 0;
+}
- /* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */
- spin_lock_bh(&mcdi->iface_lock);
- ++mcdi->seqno;
- spin_unlock_bh(&mcdi->iface_lock);
+/**
+ * efx_mcdi_rpc_async - Schedule an MCDI command to run asynchronously
+ * @efx: NIC through which to issue the command
+ * @cmd: Command type number
+ * @inbuf: Command parameters
+ * @inlen: Length of command parameters, in bytes
+ * @outlen: Length to allocate for response buffer, in bytes
+ * @complete: Function to be called on completion or cancellation.
+ * @cookie: Arbitrary value to be passed to @complete.
+ *
+ * This function does not sleep and therefore may be called in atomic
+ * context. It will fail if event queues are disabled or if MCDI
+ * event completions have been disabled due to an error.
+ *
+ * If it succeeds, the @complete function will be called exactly once
+ * in atomic context, when one of the following occurs:
+ * (a) the completion event is received (in NAPI context)
+ * (b) event queues are disabled (in the process that disables them)
+ * (c) the request times-out (in timer context)
+ */
+int
+efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
+ const efx_dword_t *inbuf, size_t inlen, size_t outlen,
+ efx_mcdi_async_completer *complete, unsigned long cookie)
+{
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+ struct efx_mcdi_async_param *async;
+ int rc;
+
+ rc = efx_mcdi_check_supported(efx, cmd, inlen);
+ if (rc)
+ return rc;
+
+ async = kmalloc(sizeof(*async) + ALIGN(max(inlen, outlen), 4),
+ GFP_ATOMIC);
+ if (!async)
+ return -ENOMEM;
+
+ async->cmd = cmd;
+ async->inlen = inlen;
+ async->outlen = outlen;
+ async->complete = complete;
+ async->cookie = cookie;
+ memcpy(async + 1, inbuf, inlen);
+
+ spin_lock_bh(&mcdi->async_lock);
+
+ if (mcdi->mode == MCDI_MODE_EVENTS) {
+ list_add_tail(&async->list, &mcdi->async_list);
+
+ /* If this is at the front of the queue, try to start it
+ * immediately
+ */
+ if (mcdi->async_list.next == &async->list &&
+ efx_mcdi_acquire_async(mcdi)) {
+ efx_mcdi_send_request(efx, cmd, inbuf, inlen);
+ mod_timer(&mcdi->async_timer,
+ jiffies + MCDI_RPC_TIMEOUT);
+ }
+ } else {
+ kfree(async);
+ rc = -ENETDOWN;
+ }
+
+ spin_unlock_bh(&mcdi->async_lock);
- efx_mcdi_copyin(efx, cmd, inbuf, inlen);
+ return rc;
}
int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
- u8 *outbuf, size_t outlen, size_t *outlen_actual)
+ efx_dword_t *outbuf, size_t outlen,
+ size_t *outlen_actual)
{
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
int rc;
- BUG_ON(efx_nic_rev(efx) < EFX_REV_SIENA_A0);
-
if (mcdi->mode == MCDI_MODE_POLL)
rc = efx_mcdi_poll(efx);
else
@@ -380,22 +632,25 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
"MC command 0x%x inlen %d mode %d timed out\n",
cmd, (int)inlen, mcdi->mode);
} else {
- size_t resplen;
+ size_t hdr_len, data_len;
/* At the very least we need a memory barrier here to ensure
* we pick up changes from efx_mcdi_ev_cpl(). Protect against
* a spurious efx_mcdi_ev_cpl() running concurrently by
* acquiring the iface_lock. */
spin_lock_bh(&mcdi->iface_lock);
- rc = -mcdi->resprc;
- resplen = mcdi->resplen;
+ rc = mcdi->resprc;
+ hdr_len = mcdi->resp_hdr_len;
+ data_len = mcdi->resp_data_len;
spin_unlock_bh(&mcdi->iface_lock);
+ BUG_ON(rc > 0);
+
if (rc == 0) {
- efx_mcdi_copyout(efx, outbuf,
- min(outlen, mcdi->resplen + 3) & ~0x3);
+ efx->type->mcdi_read_response(efx, outbuf, hdr_len,
+ min(outlen, data_len));
if (outlen_actual != NULL)
- *outlen_actual = resplen;
+ *outlen_actual = data_len;
} else if (cmd == MC_CMD_REBOOT && rc == -EIO)
; /* Don't reset if MC_CMD_REBOOT returns EIO */
else if (rc == -EIO || rc == -EINTR) {
@@ -410,6 +665,7 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
if (rc == -EIO || rc == -EINTR) {
msleep(MCDI_STATUS_SLEEP_MS);
efx_mcdi_poll_reboot(efx);
+ mcdi->new_epoch = true;
}
}
@@ -417,11 +673,15 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
return rc;
}
+/* Switch to polled MCDI completions. This can be called in various
+ * error conditions with various locks held, so it must be lockless.
+ * Caller is responsible for flushing asynchronous requests later.
+ */
void efx_mcdi_mode_poll(struct efx_nic *efx)
{
struct efx_mcdi_iface *mcdi;
- if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
+ if (!efx->mcdi)
return;
mcdi = efx_mcdi(efx);
@@ -434,18 +694,57 @@ void efx_mcdi_mode_poll(struct efx_nic *efx)
* efx_mcdi_await_completion() will then call efx_mcdi_poll().
*
* We need an smp_wmb() to synchronise with efx_mcdi_await_completion(),
- * which efx_mcdi_complete() provides for us.
+ * which efx_mcdi_complete_sync() provides for us.
*/
mcdi->mode = MCDI_MODE_POLL;
- efx_mcdi_complete(mcdi);
+ efx_mcdi_complete_sync(mcdi);
+}
+
+/* Flush any running or queued asynchronous requests, after event processing
+ * is stopped
+ */
+void efx_mcdi_flush_async(struct efx_nic *efx)
+{
+ struct efx_mcdi_async_param *async, *next;
+ struct efx_mcdi_iface *mcdi;
+
+ if (!efx->mcdi)
+ return;
+
+ mcdi = efx_mcdi(efx);
+
+ /* We must be in polling mode so no more requests can be queued */
+ BUG_ON(mcdi->mode != MCDI_MODE_POLL);
+
+ del_timer_sync(&mcdi->async_timer);
+
+ /* If a request is still running, make sure we give the MC
+ * time to complete it so that the response won't overwrite our
+ * next request.
+ */
+ if (mcdi->state == MCDI_STATE_RUNNING_ASYNC) {
+ efx_mcdi_poll(efx);
+ mcdi->state = MCDI_STATE_QUIESCENT;
+ }
+
+ /* Nothing else will access the async list now, so it is safe
+ * to walk it without holding async_lock. If we hold it while
+ * calling a completer then lockdep may warn that we have
+ * acquired locks in the wrong order.
+ */
+ list_for_each_entry_safe(async, next, &mcdi->async_list, list) {
+ async->complete(efx, async->cookie, -ENETDOWN, NULL, 0);
+ list_del(&async->list);
+ kfree(async);
+ }
}
void efx_mcdi_mode_event(struct efx_nic *efx)
{
struct efx_mcdi_iface *mcdi;
- if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
+ if (!efx->mcdi)
return;
mcdi = efx_mcdi(efx);
@@ -460,7 +759,7 @@ void efx_mcdi_mode_event(struct efx_nic *efx)
* write memory barrier ensure that efx_mcdi_rpc() sees it, which
* efx_mcdi_acquire() provides.
*/
- efx_mcdi_acquire(mcdi);
+ efx_mcdi_acquire_sync(mcdi);
mcdi->mode = MCDI_MODE_EVENTS;
efx_mcdi_release(mcdi);
}
@@ -477,19 +776,25 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
* are sent to the same queue, we can't be racing with
* efx_mcdi_ev_cpl()]
*
- * There's a race here with efx_mcdi_rpc(), because we might receive
- * a REBOOT event *before* the request has been copied out. In polled
- * mode (during startup) this is irrelevant, because efx_mcdi_complete()
- * is ignored. In event mode, this condition is just an edge-case of
- * receiving a REBOOT event after posting the MCDI request. Did the mc
- * reboot before or after the copyout? The best we can do always is
- * just return failure.
+ * If there is an outstanding asynchronous request, we can't
+ * complete it now (efx_mcdi_complete() would deadlock). The
+ * reset process will take care of this.
+ *
+ * There's a race here with efx_mcdi_send_request(), because
+ * we might receive a REBOOT event *before* the request has
+ * been copied out. In polled mode (during startup) this is
+ * irrelevant, because efx_mcdi_complete_sync() is ignored. In
+ * event mode, this condition is just an edge-case of
+ * receiving a REBOOT event after posting the MCDI
+ * request. Did the mc reboot before or after the copyout? The
+ * best we can do always is just return failure.
*/
spin_lock(&mcdi->iface_lock);
- if (efx_mcdi_complete(mcdi)) {
+ if (efx_mcdi_complete_sync(mcdi)) {
if (mcdi->mode == MCDI_MODE_EVENTS) {
mcdi->resprc = rc;
- mcdi->resplen = 0;
+ mcdi->resp_hdr_len = 0;
+ mcdi->resp_data_len = 0;
++mcdi->credits;
}
} else {
@@ -504,41 +809,12 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
break;
udelay(MCDI_STATUS_DELAY_US);
}
+ mcdi->new_epoch = true;
}
spin_unlock(&mcdi->iface_lock);
}
-static unsigned int efx_mcdi_event_link_speed[] = {
- [MCDI_EVENT_LINKCHANGE_SPEED_100M] = 100,
- [MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000,
- [MCDI_EVENT_LINKCHANGE_SPEED_10G] = 10000,
-};
-
-
-static void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev)
-{
- u32 flags, fcntl, speed, lpa;
-
- speed = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_SPEED);
- EFX_BUG_ON_PARANOID(speed >= ARRAY_SIZE(efx_mcdi_event_link_speed));
- speed = efx_mcdi_event_link_speed[speed];
-
- flags = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LINK_FLAGS);
- fcntl = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_FCNTL);
- lpa = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LP_CAP);
-
- /* efx->link_state is only modified by efx_mcdi_phy_get_link(),
- * which is only run after flushing the event queues. Therefore, it
- * is safe to modify the link state outside of the mac_lock here.
- */
- efx_mcdi_phy_decode_link(efx, &efx->link_state, speed, flags, fcntl);
-
- efx_mcdi_phy_check_fcntl(efx, lpa);
-
- efx_link_status_changed(efx);
-}
-
/* Called from falcon_process_eventq for MCDI events */
void efx_mcdi_process_event(struct efx_channel *channel,
efx_qword_t *event)
@@ -551,7 +827,7 @@ void efx_mcdi_process_event(struct efx_channel *channel,
case MCDI_EVENT_CODE_BADSSERT:
netif_err(efx, hw, efx->net_dev,
"MC watchdog or assertion failure at 0x%x\n", data);
- efx_mcdi_ev_death(efx, EINTR);
+ efx_mcdi_ev_death(efx, -EINTR);
break;
case MCDI_EVENT_CODE_PMNOTICE:
@@ -576,8 +852,9 @@ void efx_mcdi_process_event(struct efx_channel *channel,
"MC Scheduler error address=0x%x\n", data);
break;
case MCDI_EVENT_CODE_REBOOT:
+ case MCDI_EVENT_CODE_MC_REBOOT:
netif_info(efx, hw, efx->net_dev, "MC Reboot\n");
- efx_mcdi_ev_death(efx, EIO);
+ efx_mcdi_ev_death(efx, -EIO);
break;
case MCDI_EVENT_CODE_MAC_STATS_DMA:
/* MAC stats are gather lazily. We can ignore this. */
@@ -590,7 +867,27 @@ void efx_mcdi_process_event(struct efx_channel *channel,
case MCDI_EVENT_CODE_PTP_PPS:
efx_ptp_event(efx, event);
break;
-
+ case MCDI_EVENT_CODE_TX_FLUSH:
+ case MCDI_EVENT_CODE_RX_FLUSH:
+ /* Two flush events will be sent: one to the same event
+ * queue as completions, and one to event queue 0.
+ * In the latter case the {RX,TX}_FLUSH_TO_DRIVER
+ * flag will be set, and we should ignore the event
+ * because we want to wait for all completions.
+ */
+ BUILD_BUG_ON(MCDI_EVENT_TX_FLUSH_TO_DRIVER_LBN !=
+ MCDI_EVENT_RX_FLUSH_TO_DRIVER_LBN);
+ if (!MCDI_EVENT_FIELD(*event, TX_FLUSH_TO_DRIVER))
+ efx_ef10_handle_drain_event(efx);
+ break;
+ case MCDI_EVENT_CODE_TX_ERR:
+ case MCDI_EVENT_CODE_RX_ERR:
+ netif_err(efx, hw, efx->net_dev,
+ "%s DMA error (event: "EFX_QWORD_FMT")\n",
+ code == MCDI_EVENT_CODE_TX_ERR ? "TX" : "RX",
+ EFX_QWORD_VAL(*event));
+ efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
+ break;
default:
netif_err(efx, hw, efx->net_dev, "Unknown MCDI event 0x%x\n",
code);
@@ -606,27 +903,55 @@ void efx_mcdi_process_event(struct efx_channel *channel,
void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
{
- u8 outbuf[ALIGN(MC_CMD_GET_VERSION_OUT_LEN, 4)];
+ MCDI_DECLARE_BUF(outbuf,
+ max(MC_CMD_GET_VERSION_OUT_LEN,
+ MC_CMD_GET_CAPABILITIES_OUT_LEN));
size_t outlength;
const __le16 *ver_words;
+ size_t offset;
int rc;
BUILD_BUG_ON(MC_CMD_GET_VERSION_IN_LEN != 0);
-
rc = efx_mcdi_rpc(efx, MC_CMD_GET_VERSION, NULL, 0,
outbuf, sizeof(outbuf), &outlength);
if (rc)
goto fail;
-
if (outlength < MC_CMD_GET_VERSION_OUT_LEN) {
rc = -EIO;
goto fail;
}
ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION);
- snprintf(buf, len, "%u.%u.%u.%u",
- le16_to_cpu(ver_words[0]), le16_to_cpu(ver_words[1]),
- le16_to_cpu(ver_words[2]), le16_to_cpu(ver_words[3]));
+ offset = snprintf(buf, len, "%u.%u.%u.%u",
+ le16_to_cpu(ver_words[0]), le16_to_cpu(ver_words[1]),
+ le16_to_cpu(ver_words[2]), le16_to_cpu(ver_words[3]));
+
+ /* EF10 may have multiple datapath firmware variants within a
+ * single version. Report which variants are running.
+ */
+ if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) {
+ BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0);
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0,
+ outbuf, sizeof(outbuf), &outlength);
+ if (rc || outlength < MC_CMD_GET_CAPABILITIES_OUT_LEN)
+ offset += snprintf(
+ buf + offset, len - offset, " rx? tx?");
+ else
+ offset += snprintf(
+ buf + offset, len - offset, " rx%x tx%x",
+ MCDI_WORD(outbuf,
+ GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID),
+ MCDI_WORD(outbuf,
+ GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID));
+
+ /* It's theoretically possible for the string to exceed 31
+ * characters, though in practice the first three version
+ * components are short enough that this doesn't happen.
+ */
+ if (WARN_ON(offset >= len))
+ buf[0] = 0;
+ }
+
return;
fail:
@@ -634,17 +959,18 @@ fail:
buf[0] = 0;
}
-int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
- bool *was_attached)
+static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
+ bool *was_attached)
{
- u8 inbuf[MC_CMD_DRV_ATTACH_IN_LEN];
- u8 outbuf[MC_CMD_DRV_ATTACH_OUT_LEN];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_DRV_ATTACH_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_OUT_LEN);
size_t outlen;
int rc;
MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_NEW_STATE,
driver_operating ? 1 : 0);
MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1);
+ MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID, MC_CMD_FW_LOW_LATENCY);
rc = efx_mcdi_rpc(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
@@ -667,8 +993,8 @@ fail:
int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
u16 *fw_subtype_list, u32 *capabilities)
{
- uint8_t outbuf[MC_CMD_GET_BOARD_CFG_OUT_LENMAX];
- size_t outlen, offset, i;
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_BOARD_CFG_OUT_LENMAX);
+ size_t outlen, i;
int port_num = efx_port_num(efx);
int rc;
@@ -684,22 +1010,21 @@ int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
goto fail;
}
- offset = (port_num)
- ? MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST
- : MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST;
if (mac_address)
- memcpy(mac_address, outbuf + offset, ETH_ALEN);
+ memcpy(mac_address,
+ port_num ?
+ MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1) :
+ MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0),
+ ETH_ALEN);
if (fw_subtype_list) {
- /* Byte-swap and truncate or zero-pad as necessary */
- offset = MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST;
for (i = 0;
- i < MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM;
- i++) {
- fw_subtype_list[i] =
- (offset + 2 <= outlen) ?
- le16_to_cpup((__le16 *)(outbuf + offset)) : 0;
- offset += 2;
- }
+ i < MCDI_VAR_ARRAY_LEN(outlen,
+ GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST);
+ i++)
+ fw_subtype_list[i] = MCDI_ARRAY_WORD(
+ outbuf, GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST, i);
+ for (; i < MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM; i++)
+ fw_subtype_list[i] = 0;
}
if (capabilities) {
if (port_num)
@@ -721,7 +1046,7 @@ fail:
int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq)
{
- u8 inbuf[MC_CMD_LOG_CTRL_IN_LEN];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_LOG_CTRL_IN_LEN);
u32 dest = 0;
int rc;
@@ -749,7 +1074,7 @@ fail:
int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out)
{
- u8 outbuf[MC_CMD_NVRAM_TYPES_OUT_LEN];
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TYPES_OUT_LEN);
size_t outlen;
int rc;
@@ -777,8 +1102,8 @@ int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
size_t *size_out, size_t *erase_size_out,
bool *protected_out)
{
- u8 inbuf[MC_CMD_NVRAM_INFO_IN_LEN];
- u8 outbuf[MC_CMD_NVRAM_INFO_OUT_LEN];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_INFO_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_INFO_OUT_LEN);
size_t outlen;
int rc;
@@ -804,127 +1129,10 @@ fail:
return rc;
}
-int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
-{
- u8 inbuf[MC_CMD_NVRAM_UPDATE_START_IN_LEN];
- int rc;
-
- MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type);
-
- BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0);
-
- rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf),
- NULL, 0, NULL);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
- return rc;
-}
-
-int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
- loff_t offset, u8 *buffer, size_t length)
-{
- u8 inbuf[MC_CMD_NVRAM_READ_IN_LEN];
- u8 outbuf[MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX)];
- size_t outlen;
- int rc;
-
- MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type);
- MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset);
- MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length);
-
- rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf),
- outbuf, sizeof(outbuf), &outlen);
- if (rc)
- goto fail;
-
- memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length);
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
- return rc;
-}
-
-int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
- loff_t offset, const u8 *buffer, size_t length)
-{
- u8 inbuf[MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX)];
- int rc;
-
- MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type);
- MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset);
- MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length);
- memcpy(MCDI_PTR(inbuf, NVRAM_WRITE_IN_WRITE_BUFFER), buffer, length);
-
- BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0);
-
- rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf,
- ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4),
- NULL, 0, NULL);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
- return rc;
-}
-
-int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
- loff_t offset, size_t length)
-{
- u8 inbuf[MC_CMD_NVRAM_ERASE_IN_LEN];
- int rc;
-
- MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_TYPE, type);
- MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_OFFSET, offset);
- MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_LENGTH, length);
-
- BUILD_BUG_ON(MC_CMD_NVRAM_ERASE_OUT_LEN != 0);
-
- rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf),
- NULL, 0, NULL);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
- return rc;
-}
-
-int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
-{
- u8 inbuf[MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN];
- int rc;
-
- MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type);
-
- BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN != 0);
-
- rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf),
- NULL, 0, NULL);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
- return rc;
-}
-
static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type)
{
- u8 inbuf[MC_CMD_NVRAM_TEST_IN_LEN];
- u8 outbuf[MC_CMD_NVRAM_TEST_OUT_LEN];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_TEST_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TEST_OUT_LEN);
int rc;
MCDI_SET_DWORD(inbuf, NVRAM_TEST_IN_TYPE, type);
@@ -976,9 +1184,9 @@ fail1:
static int efx_mcdi_read_assertion(struct efx_nic *efx)
{
- u8 inbuf[MC_CMD_GET_ASSERTS_IN_LEN];
- u8 outbuf[MC_CMD_GET_ASSERTS_OUT_LEN];
- unsigned int flags, index, ofst;
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_ASSERTS_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN);
+ unsigned int flags, index;
const char *reason;
size_t outlen;
int retry;
@@ -1020,19 +1228,20 @@ static int efx_mcdi_read_assertion(struct efx_nic *efx)
MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS));
/* Print out the registers */
- ofst = MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST;
- for (index = 1; index < 32; index++) {
- netif_err(efx, hw, efx->net_dev, "R%.2d (?): 0x%.8x\n", index,
- MCDI_DWORD2(outbuf, ofst));
- ofst += sizeof(efx_dword_t);
- }
+ for (index = 0;
+ index < MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM;
+ index++)
+ netif_err(efx, hw, efx->net_dev, "R%.2d (?): 0x%.8x\n",
+ 1 + index,
+ MCDI_ARRAY_DWORD(outbuf, GET_ASSERTS_OUT_GP_REGS_OFFS,
+ index));
return 0;
}
static void efx_mcdi_exit_assertion(struct efx_nic *efx)
{
- u8 inbuf[MC_CMD_REBOOT_IN_LEN];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN);
/* If the MC is running debug firmware, it might now be
* waiting for a debugger to attach, but we just want it to
@@ -1062,7 +1271,7 @@ int efx_mcdi_handle_assertion(struct efx_nic *efx)
void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
{
- u8 inbuf[MC_CMD_SET_ID_LED_IN_LEN];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_ID_LED_IN_LEN);
int rc;
BUILD_BUG_ON(EFX_LED_OFF != MC_CMD_LED_OFF);
@@ -1080,7 +1289,7 @@ void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
__func__, rc);
}
-int efx_mcdi_reset_port(struct efx_nic *efx)
+static int efx_mcdi_reset_port(struct efx_nic *efx)
{
int rc = efx_mcdi_rpc(efx, MC_CMD_ENTITY_RESET, NULL, 0, NULL, 0, NULL);
if (rc)
@@ -1089,9 +1298,9 @@ int efx_mcdi_reset_port(struct efx_nic *efx)
return rc;
}
-int efx_mcdi_reset_mc(struct efx_nic *efx)
+static int efx_mcdi_reset_mc(struct efx_nic *efx)
{
- u8 inbuf[MC_CMD_REBOOT_IN_LEN];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN);
int rc;
BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
@@ -1107,11 +1316,31 @@ int efx_mcdi_reset_mc(struct efx_nic *efx)
return rc;
}
+enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason)
+{
+ return RESET_TYPE_RECOVER_OR_ALL;
+}
+
+int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method)
+{
+ int rc;
+
+ /* Recover from a failed assertion pre-reset */
+ rc = efx_mcdi_handle_assertion(efx);
+ if (rc)
+ return rc;
+
+ if (method == RESET_TYPE_WORLD)
+ return efx_mcdi_reset_mc(efx);
+ else
+ return efx_mcdi_reset_port(efx);
+}
+
static int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type,
const u8 *mac, int *id_out)
{
- u8 inbuf[MC_CMD_WOL_FILTER_SET_IN_LEN];
- u8 outbuf[MC_CMD_WOL_FILTER_SET_OUT_LEN];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_SET_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_SET_OUT_LEN);
size_t outlen;
int rc;
@@ -1151,7 +1380,7 @@ efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac, int *id_out)
int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out)
{
- u8 outbuf[MC_CMD_WOL_FILTER_GET_OUT_LEN];
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_GET_OUT_LEN);
size_t outlen;
int rc;
@@ -1178,7 +1407,7 @@ fail:
int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id)
{
- u8 inbuf[MC_CMD_WOL_FILTER_REMOVE_IN_LEN];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_REMOVE_IN_LEN);
int rc;
MCDI_SET_DWORD(inbuf, WOL_FILTER_REMOVE_IN_FILTER_ID, (u32)id);
@@ -1199,34 +1428,31 @@ int efx_mcdi_flush_rxqs(struct efx_nic *efx)
{
struct efx_channel *channel;
struct efx_rx_queue *rx_queue;
- __le32 *qid;
+ MCDI_DECLARE_BUF(inbuf,
+ MC_CMD_FLUSH_RX_QUEUES_IN_LEN(EFX_MAX_CHANNELS));
int rc, count;
BUILD_BUG_ON(EFX_MAX_CHANNELS >
MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM);
- qid = kmalloc(EFX_MAX_CHANNELS * sizeof(*qid), GFP_KERNEL);
- if (qid == NULL)
- return -ENOMEM;
-
count = 0;
efx_for_each_channel(channel, efx) {
efx_for_each_channel_rx_queue(rx_queue, channel) {
if (rx_queue->flush_pending) {
rx_queue->flush_pending = false;
atomic_dec(&efx->rxq_flush_pending);
- qid[count++] = cpu_to_le32(
- efx_rx_queue_index(rx_queue));
+ MCDI_SET_ARRAY_DWORD(
+ inbuf, FLUSH_RX_QUEUES_IN_QID_OFST,
+ count, efx_rx_queue_index(rx_queue));
+ count++;
}
}
}
- rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, (u8 *)qid,
- count * sizeof(*qid), NULL, 0, NULL);
+ rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, inbuf,
+ MC_CMD_FLUSH_RX_QUEUES_IN_LEN(count), NULL, 0, NULL);
WARN_ON(rc < 0);
- kfree(qid);
-
return rc;
}
@@ -1245,3 +1471,247 @@ fail:
return rc;
}
+int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_WORKAROUND_IN_LEN);
+
+ BUILD_BUG_ON(MC_CMD_WORKAROUND_OUT_LEN != 0);
+ MCDI_SET_DWORD(inbuf, WORKAROUND_IN_TYPE, type);
+ MCDI_SET_DWORD(inbuf, WORKAROUND_IN_ENABLED, enabled);
+ return efx_mcdi_rpc(efx, MC_CMD_WORKAROUND, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+}
+
+#ifdef CONFIG_SFC_MTD
+
+#define EFX_MCDI_NVRAM_LEN_MAX 128
+
+static int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_START_IN_LEN);
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type);
+
+ BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ if (rc)
+ goto fail;
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+static int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
+ loff_t offset, u8 *buffer, size_t length)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_READ_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf,
+ MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX));
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type);
+ MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset);
+ MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto fail;
+
+ memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length);
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+static int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
+ loff_t offset, const u8 *buffer, size_t length)
+{
+ MCDI_DECLARE_BUF(inbuf,
+ MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX));
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type);
+ MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset);
+ MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length);
+ memcpy(MCDI_PTR(inbuf, NVRAM_WRITE_IN_WRITE_BUFFER), buffer, length);
+
+ BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf,
+ ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4),
+ NULL, 0, NULL);
+ if (rc)
+ goto fail;
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+static int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
+ loff_t offset, size_t length)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_ERASE_IN_LEN);
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_TYPE, type);
+ MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_OFFSET, offset);
+ MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_LENGTH, length);
+
+ BUILD_BUG_ON(MC_CMD_NVRAM_ERASE_OUT_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ if (rc)
+ goto fail;
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN);
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type);
+
+ BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ if (rc)
+ goto fail;
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start,
+ size_t len, size_t *retlen, u8 *buffer)
+{
+ struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
+ struct efx_nic *efx = mtd->priv;
+ loff_t offset = start;
+ loff_t end = min_t(loff_t, start + len, mtd->size);
+ size_t chunk;
+ int rc = 0;
+
+ while (offset < end) {
+ chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
+ rc = efx_mcdi_nvram_read(efx, part->nvram_type, offset,
+ buffer, chunk);
+ if (rc)
+ goto out;
+ offset += chunk;
+ buffer += chunk;
+ }
+out:
+ *retlen = offset - start;
+ return rc;
+}
+
+int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
+{
+ struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
+ struct efx_nic *efx = mtd->priv;
+ loff_t offset = start & ~((loff_t)(mtd->erasesize - 1));
+ loff_t end = min_t(loff_t, start + len, mtd->size);
+ size_t chunk = part->common.mtd.erasesize;
+ int rc = 0;
+
+ if (!part->updating) {
+ rc = efx_mcdi_nvram_update_start(efx, part->nvram_type);
+ if (rc)
+ goto out;
+ part->updating = true;
+ }
+
+ /* The MCDI interface can in fact do multiple erase blocks at once;
+ * but erasing may be slow, so we make multiple calls here to avoid
+ * tripping the MCDI RPC timeout. */
+ while (offset < end) {
+ rc = efx_mcdi_nvram_erase(efx, part->nvram_type, offset,
+ chunk);
+ if (rc)
+ goto out;
+ offset += chunk;
+ }
+out:
+ return rc;
+}
+
+int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start,
+ size_t len, size_t *retlen, const u8 *buffer)
+{
+ struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
+ struct efx_nic *efx = mtd->priv;
+ loff_t offset = start;
+ loff_t end = min_t(loff_t, start + len, mtd->size);
+ size_t chunk;
+ int rc = 0;
+
+ if (!part->updating) {
+ rc = efx_mcdi_nvram_update_start(efx, part->nvram_type);
+ if (rc)
+ goto out;
+ part->updating = true;
+ }
+
+ while (offset < end) {
+ chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
+ rc = efx_mcdi_nvram_write(efx, part->nvram_type, offset,
+ buffer, chunk);
+ if (rc)
+ goto out;
+ offset += chunk;
+ buffer += chunk;
+ }
+out:
+ *retlen = offset - start;
+ return rc;
+}
+
+int efx_mcdi_mtd_sync(struct mtd_info *mtd)
+{
+ struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
+ struct efx_nic *efx = mtd->priv;
+ int rc = 0;
+
+ if (part->updating) {
+ part->updating = false;
+ rc = efx_mcdi_nvram_update_finish(efx, part->nvram_type);
+ }
+
+ return rc;
+}
+
+void efx_mcdi_mtd_rename(struct efx_mtd_partition *part)
+{
+ struct efx_mcdi_mtd_partition *mcdi_part =
+ container_of(part, struct efx_mcdi_mtd_partition, common);
+ struct efx_nic *efx = part->mtd.priv;
+
+ snprintf(part->name, sizeof(part->name), "%s %s:%02x",
+ efx->name, part->type_name, mcdi_part->fw_subtype);
+}
+
+#endif /* CONFIG_SFC_MTD */
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index 3ba2e5b5a9cc..c34d0d4e10ee 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -1,6 +1,6 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2008-2010 Solarflare Communications Inc.
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2008-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -11,18 +11,20 @@
#define EFX_MCDI_H
/**
- * enum efx_mcdi_state
+ * enum efx_mcdi_state - MCDI request handling state
* @MCDI_STATE_QUIESCENT: No pending MCDI requests. If the caller holds the
- * mcdi_lock then they are able to move to MCDI_STATE_RUNNING
- * @MCDI_STATE_RUNNING: There is an MCDI request pending. Only the thread that
- * moved into this state is allowed to move out of it.
+ * mcdi @iface_lock then they are able to move to %MCDI_STATE_RUNNING
+ * @MCDI_STATE_RUNNING_SYNC: There is a synchronous MCDI request pending.
+ * Only the thread that moved into this state is allowed to move out of it.
+ * @MCDI_STATE_RUNNING_ASYNC: There is an asynchronous MCDI request pending.
* @MCDI_STATE_COMPLETED: An MCDI request has completed, but the owning thread
* has not yet consumed the result. For all other threads, equivalent to
- * MCDI_STATE_RUNNING.
+ * %MCDI_STATE_RUNNING.
*/
enum efx_mcdi_state {
MCDI_STATE_QUIESCENT,
- MCDI_STATE_RUNNING,
+ MCDI_STATE_RUNNING_SYNC,
+ MCDI_STATE_RUNNING_ASYNC,
MCDI_STATE_COMPLETED,
};
@@ -32,28 +34,39 @@ enum efx_mcdi_mode {
};
/**
- * struct efx_mcdi_iface
- * @state: Interface state. Waited for by mcdi_wq.
- * @wq: Wait queue for threads waiting for state != STATE_RUNNING
- * @iface_lock: Protects @credits, @seqno, @resprc, @resplen
+ * struct efx_mcdi_iface - MCDI protocol context
+ * @efx: The associated NIC.
+ * @state: Request handling state. Waited for by @wq.
* @mode: Poll for mcdi completion, or wait for an mcdi_event.
- * Serialised by @lock
+ * @wq: Wait queue for threads waiting for @state != %MCDI_STATE_RUNNING
+ * @new_epoch: Indicates start of day or start of MC reboot recovery
+ * @iface_lock: Serialises access to @seqno, @credits and response metadata
* @seqno: The next sequence number to use for mcdi requests.
- * Serialised by @lock
* @credits: Number of spurious MCDI completion events allowed before we
- * trigger a fatal error. Protected by @lock
- * @resprc: Returned MCDI completion
- * @resplen: Returned payload length
+ * trigger a fatal error
+ * @resprc: Response error/success code (Linux numbering)
+ * @resp_hdr_len: Response header length
+ * @resp_data_len: Response data (SDU or error) length
+ * @async_lock: Serialises access to @async_list while event processing is
+ * enabled
+ * @async_list: Queue of asynchronous requests
+ * @async_timer: Timer for asynchronous request timeout
*/
struct efx_mcdi_iface {
- atomic_t state;
+ struct efx_nic *efx;
+ enum efx_mcdi_state state;
+ enum efx_mcdi_mode mode;
wait_queue_head_t wq;
spinlock_t iface_lock;
- enum efx_mcdi_mode mode;
+ bool new_epoch;
unsigned int credits;
unsigned int seqno;
- unsigned int resprc;
- size_t resplen;
+ int resprc;
+ size_t resp_hdr_len;
+ size_t resp_data_len;
+ spinlock_t async_lock;
+ struct list_head async_list;
+ struct timer_list async_timer;
};
struct efx_mcdi_mon {
@@ -65,65 +78,204 @@ struct efx_mcdi_mon {
unsigned int n_attrs;
};
-extern void efx_mcdi_init(struct efx_nic *efx);
+struct efx_mcdi_mtd_partition {
+ struct efx_mtd_partition common;
+ bool updating;
+ u16 nvram_type;
+ u16 fw_subtype;
+};
+
+#define to_efx_mcdi_mtd_partition(mtd) \
+ container_of(mtd, struct efx_mcdi_mtd_partition, common.mtd)
+
+/**
+ * struct efx_mcdi_data - extra state for NICs that implement MCDI
+ * @iface: Interface/protocol state
+ * @hwmon: Hardware monitor state
+ */
+struct efx_mcdi_data {
+ struct efx_mcdi_iface iface;
+#ifdef CONFIG_SFC_MCDI_MON
+ struct efx_mcdi_mon hwmon;
+#endif
+};
+
+#ifdef CONFIG_SFC_MCDI_MON
+static inline struct efx_mcdi_mon *efx_mcdi_mon(struct efx_nic *efx)
+{
+ EFX_BUG_ON_PARANOID(!efx->mcdi);
+ return &efx->mcdi->hwmon;
+}
+#endif
+
+extern int efx_mcdi_init(struct efx_nic *efx);
+extern void efx_mcdi_fini(struct efx_nic *efx);
-extern int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, const u8 *inbuf,
- size_t inlen, u8 *outbuf, size_t outlen,
+extern int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
+ const efx_dword_t *inbuf, size_t inlen,
+ efx_dword_t *outbuf, size_t outlen,
size_t *outlen_actual);
-extern void efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
- const u8 *inbuf, size_t inlen);
+extern int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
+ const efx_dword_t *inbuf, size_t inlen);
extern int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
- u8 *outbuf, size_t outlen,
+ efx_dword_t *outbuf, size_t outlen,
size_t *outlen_actual);
+typedef void efx_mcdi_async_completer(struct efx_nic *efx,
+ unsigned long cookie, int rc,
+ efx_dword_t *outbuf,
+ size_t outlen_actual);
+extern int efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
+ const efx_dword_t *inbuf, size_t inlen,
+ size_t outlen,
+ efx_mcdi_async_completer *complete,
+ unsigned long cookie);
+
extern int efx_mcdi_poll_reboot(struct efx_nic *efx);
extern void efx_mcdi_mode_poll(struct efx_nic *efx);
extern void efx_mcdi_mode_event(struct efx_nic *efx);
+extern void efx_mcdi_flush_async(struct efx_nic *efx);
extern void efx_mcdi_process_event(struct efx_channel *channel,
efx_qword_t *event);
extern void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
-#define MCDI_PTR2(_buf, _ofst) \
- (((u8 *)_buf) + _ofst)
-#define MCDI_SET_DWORD2(_buf, _ofst, _value) \
- EFX_POPULATE_DWORD_1(*((efx_dword_t *)MCDI_PTR2(_buf, _ofst)), \
- EFX_DWORD_0, _value)
-#define MCDI_DWORD2(_buf, _ofst) \
- EFX_DWORD_FIELD(*((efx_dword_t *)MCDI_PTR2(_buf, _ofst)), \
- EFX_DWORD_0)
-#define MCDI_QWORD2(_buf, _ofst) \
- EFX_QWORD_FIELD64(*((efx_qword_t *)MCDI_PTR2(_buf, _ofst)), \
- EFX_QWORD_0)
-
-#define MCDI_PTR(_buf, _ofst) \
- MCDI_PTR2(_buf, MC_CMD_ ## _ofst ## _OFST)
-#define MCDI_ARRAY_PTR(_buf, _field, _type, _index) \
- MCDI_PTR2(_buf, \
- MC_CMD_ ## _field ## _OFST + \
- (_index) * MC_CMD_ ## _type ## _TYPEDEF_LEN)
-#define MCDI_SET_DWORD(_buf, _ofst, _value) \
- MCDI_SET_DWORD2(_buf, MC_CMD_ ## _ofst ## _OFST, _value)
-#define MCDI_DWORD(_buf, _ofst) \
- MCDI_DWORD2(_buf, MC_CMD_ ## _ofst ## _OFST)
-#define MCDI_QWORD(_buf, _ofst) \
- MCDI_QWORD2(_buf, MC_CMD_ ## _ofst ## _OFST)
+/* We expect that 16- and 32-bit fields in MCDI requests and responses
+ * are appropriately aligned, but 64-bit fields are only
+ * 32-bit-aligned. Also, on Siena we must copy to the MC shared
+ * memory strictly 32 bits at a time, so add any necessary padding.
+ */
+#define MCDI_DECLARE_BUF(_name, _len) \
+ efx_dword_t _name[DIV_ROUND_UP(_len, 4)]
+#define _MCDI_PTR(_buf, _offset) \
+ ((u8 *)(_buf) + (_offset))
+#define MCDI_PTR(_buf, _field) \
+ _MCDI_PTR(_buf, MC_CMD_ ## _field ## _OFST)
+#define _MCDI_CHECK_ALIGN(_ofst, _align) \
+ ((_ofst) + BUILD_BUG_ON_ZERO((_ofst) & (_align - 1)))
+#define _MCDI_DWORD(_buf, _field) \
+ ((_buf) + (_MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _OFST, 4) >> 2))
+
+#define MCDI_WORD(_buf, _field) \
+ ((u16)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2) + \
+ le16_to_cpu(*(__force const __le16 *)MCDI_PTR(_buf, _field)))
+#define MCDI_SET_DWORD(_buf, _field, _value) \
+ EFX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0, _value)
+#define MCDI_DWORD(_buf, _field) \
+ EFX_DWORD_FIELD(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0)
+#define MCDI_POPULATE_DWORD_1(_buf, _field, _name1, _value1) \
+ EFX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), \
+ MC_CMD_ ## _name1, _value1)
+#define MCDI_POPULATE_DWORD_2(_buf, _field, _name1, _value1, \
+ _name2, _value2) \
+ EFX_POPULATE_DWORD_2(*_MCDI_DWORD(_buf, _field), \
+ MC_CMD_ ## _name1, _value1, \
+ MC_CMD_ ## _name2, _value2)
+#define MCDI_POPULATE_DWORD_3(_buf, _field, _name1, _value1, \
+ _name2, _value2, _name3, _value3) \
+ EFX_POPULATE_DWORD_3(*_MCDI_DWORD(_buf, _field), \
+ MC_CMD_ ## _name1, _value1, \
+ MC_CMD_ ## _name2, _value2, \
+ MC_CMD_ ## _name3, _value3)
+#define MCDI_POPULATE_DWORD_4(_buf, _field, _name1, _value1, \
+ _name2, _value2, _name3, _value3, \
+ _name4, _value4) \
+ EFX_POPULATE_DWORD_4(*_MCDI_DWORD(_buf, _field), \
+ MC_CMD_ ## _name1, _value1, \
+ MC_CMD_ ## _name2, _value2, \
+ MC_CMD_ ## _name3, _value3, \
+ MC_CMD_ ## _name4, _value4)
+#define MCDI_POPULATE_DWORD_5(_buf, _field, _name1, _value1, \
+ _name2, _value2, _name3, _value3, \
+ _name4, _value4, _name5, _value5) \
+ EFX_POPULATE_DWORD_5(*_MCDI_DWORD(_buf, _field), \
+ MC_CMD_ ## _name1, _value1, \
+ MC_CMD_ ## _name2, _value2, \
+ MC_CMD_ ## _name3, _value3, \
+ MC_CMD_ ## _name4, _value4, \
+ MC_CMD_ ## _name5, _value5)
+#define MCDI_POPULATE_DWORD_6(_buf, _field, _name1, _value1, \
+ _name2, _value2, _name3, _value3, \
+ _name4, _value4, _name5, _value5, \
+ _name6, _value6) \
+ EFX_POPULATE_DWORD_6(*_MCDI_DWORD(_buf, _field), \
+ MC_CMD_ ## _name1, _value1, \
+ MC_CMD_ ## _name2, _value2, \
+ MC_CMD_ ## _name3, _value3, \
+ MC_CMD_ ## _name4, _value4, \
+ MC_CMD_ ## _name5, _value5, \
+ MC_CMD_ ## _name6, _value6)
+#define MCDI_POPULATE_DWORD_7(_buf, _field, _name1, _value1, \
+ _name2, _value2, _name3, _value3, \
+ _name4, _value4, _name5, _value5, \
+ _name6, _value6, _name7, _value7) \
+ EFX_POPULATE_DWORD_7(*_MCDI_DWORD(_buf, _field), \
+ MC_CMD_ ## _name1, _value1, \
+ MC_CMD_ ## _name2, _value2, \
+ MC_CMD_ ## _name3, _value3, \
+ MC_CMD_ ## _name4, _value4, \
+ MC_CMD_ ## _name5, _value5, \
+ MC_CMD_ ## _name6, _value6, \
+ MC_CMD_ ## _name7, _value7)
+#define MCDI_SET_QWORD(_buf, _field, _value) \
+ do { \
+ EFX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[0], \
+ EFX_DWORD_0, (u32)(_value)); \
+ EFX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[1], \
+ EFX_DWORD_0, (u64)(_value) >> 32); \
+ } while (0)
+#define MCDI_QWORD(_buf, _field) \
+ (EFX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[0], EFX_DWORD_0) | \
+ (u64)EFX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[1], EFX_DWORD_0) << 32)
+#define MCDI_FIELD(_ptr, _type, _field) \
+ EFX_EXTRACT_DWORD( \
+ *(efx_dword_t *) \
+ _MCDI_PTR(_ptr, MC_CMD_ ## _type ## _ ## _field ## _OFST & ~3),\
+ MC_CMD_ ## _type ## _ ## _field ## _LBN & 0x1f, \
+ (MC_CMD_ ## _type ## _ ## _field ## _LBN & 0x1f) + \
+ MC_CMD_ ## _type ## _ ## _field ## _WIDTH - 1)
+
+#define _MCDI_ARRAY_PTR(_buf, _field, _index, _align) \
+ (_MCDI_PTR(_buf, _MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _OFST, _align))\
+ + (_index) * _MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _LEN, _align))
+#define MCDI_DECLARE_STRUCT_PTR(_name) \
+ efx_dword_t *_name
+#define MCDI_ARRAY_STRUCT_PTR(_buf, _field, _index) \
+ ((efx_dword_t *)_MCDI_ARRAY_PTR(_buf, _field, _index, 4))
+#define MCDI_VAR_ARRAY_LEN(_len, _field) \
+ min_t(size_t, MC_CMD_ ## _field ## _MAXNUM, \
+ ((_len) - MC_CMD_ ## _field ## _OFST) / MC_CMD_ ## _field ## _LEN)
+#define MCDI_ARRAY_WORD(_buf, _field, _index) \
+ (BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2) + \
+ le16_to_cpu(*(__force const __le16 *) \
+ _MCDI_ARRAY_PTR(_buf, _field, _index, 2)))
+#define _MCDI_ARRAY_DWORD(_buf, _field, _index) \
+ (BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 4) + \
+ (efx_dword_t *)_MCDI_ARRAY_PTR(_buf, _field, _index, 4))
+#define MCDI_SET_ARRAY_DWORD(_buf, _field, _index, _value) \
+ EFX_SET_DWORD_FIELD(*_MCDI_ARRAY_DWORD(_buf, _field, _index), \
+ EFX_DWORD_0, _value)
+#define MCDI_ARRAY_DWORD(_buf, _field, _index) \
+ EFX_DWORD_FIELD(*_MCDI_ARRAY_DWORD(_buf, _field, _index), EFX_DWORD_0)
+#define _MCDI_ARRAY_QWORD(_buf, _field, _index) \
+ (BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 8) + \
+ (efx_dword_t *)_MCDI_ARRAY_PTR(_buf, _field, _index, 4))
+#define MCDI_SET_ARRAY_QWORD(_buf, _field, _index, _value) \
+ do { \
+ EFX_SET_DWORD_FIELD(_MCDI_ARRAY_QWORD(_buf, _field, _index)[0],\
+ EFX_DWORD_0, (u32)(_value)); \
+ EFX_SET_DWORD_FIELD(_MCDI_ARRAY_QWORD(_buf, _field, _index)[1],\
+ EFX_DWORD_0, (u64)(_value) >> 32); \
+ } while (0)
+#define MCDI_ARRAY_FIELD(_buf, _field1, _type, _index, _field2) \
+ MCDI_FIELD(MCDI_ARRAY_STRUCT_PTR(_buf, _field1, _index), \
+ _type ## _TYPEDEF, _field2)
#define MCDI_EVENT_FIELD(_ev, _field) \
EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field)
-#define MCDI_ARRAY_FIELD(_buf, _field1, _type, _index, _field2) \
- EFX_EXTRACT_DWORD( \
- *((efx_dword_t *) \
- (MCDI_ARRAY_PTR(_buf, _field1, _type, _index) + \
- (MC_CMD_ ## _type ## _TYPEDEF_ ## _field2 ## _OFST & ~3))), \
- MC_CMD_ ## _type ## _TYPEDEF_ ## _field2 ## _LBN & 0x1f, \
- (MC_CMD_ ## _type ## _TYPEDEF_ ## _field2 ## _LBN & 0x1f) + \
- MC_CMD_ ## _type ## _TYPEDEF_ ## _field2 ## _WIDTH - 1)
extern void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len);
-extern int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
- bool *was_attached_out);
extern int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
u16 *fw_subtype_list, u32 *capabilities);
extern int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart,
@@ -132,34 +284,29 @@ extern int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out);
extern int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
size_t *size_out, size_t *erase_size_out,
bool *protected_out);
-extern int efx_mcdi_nvram_update_start(struct efx_nic *efx,
- unsigned int type);
-extern int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
- loff_t offset, u8 *buffer, size_t length);
-extern int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
- loff_t offset, const u8 *buffer,
- size_t length);
-#define EFX_MCDI_NVRAM_LEN_MAX 128
-extern int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
- loff_t offset, size_t length);
-extern int efx_mcdi_nvram_update_finish(struct efx_nic *efx,
- unsigned int type);
extern int efx_mcdi_nvram_test_all(struct efx_nic *efx);
extern int efx_mcdi_handle_assertion(struct efx_nic *efx);
extern void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
-extern int efx_mcdi_reset_port(struct efx_nic *efx);
-extern int efx_mcdi_reset_mc(struct efx_nic *efx);
extern int efx_mcdi_wol_filter_set_magic(struct efx_nic *efx,
const u8 *mac, int *id_out);
extern int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out);
extern int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id);
extern int efx_mcdi_wol_filter_reset(struct efx_nic *efx);
extern int efx_mcdi_flush_rxqs(struct efx_nic *efx);
+extern int efx_mcdi_port_probe(struct efx_nic *efx);
+extern void efx_mcdi_port_remove(struct efx_nic *efx);
+extern int efx_mcdi_port_reconfigure(struct efx_nic *efx);
+extern int efx_mcdi_port_get_number(struct efx_nic *efx);
+extern u32 efx_mcdi_phy_get_caps(struct efx_nic *efx);
+extern void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev);
extern int efx_mcdi_set_mac(struct efx_nic *efx);
-extern int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
- u32 dma_len, int enable, int clear);
-extern int efx_mcdi_mac_reconfigure(struct efx_nic *efx);
+#define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1))
+extern void efx_mcdi_mac_start_stats(struct efx_nic *efx);
+extern void efx_mcdi_mac_stop_stats(struct efx_nic *efx);
extern bool efx_mcdi_mac_check_fault(struct efx_nic *efx);
+extern enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason);
+extern int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method);
+extern int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled);
#ifdef CONFIG_SFC_MCDI_MON
extern int efx_mcdi_mon_probe(struct efx_nic *efx);
@@ -169,4 +316,14 @@ static inline int efx_mcdi_mon_probe(struct efx_nic *efx) { return 0; }
static inline void efx_mcdi_mon_remove(struct efx_nic *efx) {}
#endif
+#ifdef CONFIG_SFC_MTD
+extern int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start,
+ size_t len, size_t *retlen, u8 *buffer);
+extern int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len);
+extern int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start,
+ size_t len, size_t *retlen, const u8 *buffer);
+extern int efx_mcdi_mtd_sync(struct mtd_info *mtd);
+extern void efx_mcdi_mtd_rename(struct efx_mtd_partition *part);
+#endif
+
#endif /* EFX_MCDI_H */
diff --git a/drivers/net/ethernet/sfc/mcdi_mac.c b/drivers/net/ethernet/sfc/mcdi_mac.c
deleted file mode 100644
index 1003f309cba7..000000000000
--- a/drivers/net/ethernet/sfc/mcdi_mac.c
+++ /dev/null
@@ -1,130 +0,0 @@
-/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2009-2010 Solarflare Communications Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation, incorporated herein by reference.
- */
-
-#include "net_driver.h"
-#include "efx.h"
-#include "mcdi.h"
-#include "mcdi_pcol.h"
-
-int efx_mcdi_set_mac(struct efx_nic *efx)
-{
- u32 reject, fcntl;
- u8 cmdbytes[MC_CMD_SET_MAC_IN_LEN];
-
- memcpy(cmdbytes + MC_CMD_SET_MAC_IN_ADDR_OFST,
- efx->net_dev->dev_addr, ETH_ALEN);
-
- MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_MTU,
- EFX_MAX_FRAME_LEN(efx->net_dev->mtu));
- MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_DRAIN, 0);
-
- /* The MCDI command provides for controlling accept/reject
- * of broadcast packets too, but the driver doesn't currently
- * expose this. */
- reject = (efx->promiscuous) ? 0 :
- (1 << MC_CMD_SET_MAC_IN_REJECT_UNCST_LBN);
- MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_REJECT, reject);
-
- switch (efx->wanted_fc) {
- case EFX_FC_RX | EFX_FC_TX:
- fcntl = MC_CMD_FCNTL_BIDIR;
- break;
- case EFX_FC_RX:
- fcntl = MC_CMD_FCNTL_RESPOND;
- break;
- default:
- fcntl = MC_CMD_FCNTL_OFF;
- break;
- }
- if (efx->wanted_fc & EFX_FC_AUTO)
- fcntl = MC_CMD_FCNTL_AUTO;
- if (efx->fc_disable)
- fcntl = MC_CMD_FCNTL_OFF;
-
- MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_FCNTL, fcntl);
-
- return efx_mcdi_rpc(efx, MC_CMD_SET_MAC, cmdbytes, sizeof(cmdbytes),
- NULL, 0, NULL);
-}
-
-bool efx_mcdi_mac_check_fault(struct efx_nic *efx)
-{
- u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
- size_t outlength;
- int rc;
-
- BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
-
- rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
- outbuf, sizeof(outbuf), &outlength);
- if (rc) {
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
- __func__, rc);
- return true;
- }
-
- return MCDI_DWORD(outbuf, GET_LINK_OUT_MAC_FAULT) != 0;
-}
-
-int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
- u32 dma_len, int enable, int clear)
-{
- u8 inbuf[MC_CMD_MAC_STATS_IN_LEN];
- int rc;
- efx_dword_t *cmd_ptr;
- int period = enable ? 1000 : 0;
- u32 addr_hi;
- u32 addr_lo;
-
- BUILD_BUG_ON(MC_CMD_MAC_STATS_OUT_DMA_LEN != 0);
-
- addr_lo = ((u64)dma_addr) >> 0;
- addr_hi = ((u64)dma_addr) >> 32;
-
- MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_ADDR_LO, addr_lo);
- MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_ADDR_HI, addr_hi);
- cmd_ptr = (efx_dword_t *)MCDI_PTR(inbuf, MAC_STATS_IN_CMD);
- EFX_POPULATE_DWORD_7(*cmd_ptr,
- MC_CMD_MAC_STATS_IN_DMA, !!enable,
- MC_CMD_MAC_STATS_IN_CLEAR, clear,
- MC_CMD_MAC_STATS_IN_PERIODIC_CHANGE, 1,
- MC_CMD_MAC_STATS_IN_PERIODIC_ENABLE, !!enable,
- MC_CMD_MAC_STATS_IN_PERIODIC_CLEAR, 0,
- MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT, 1,
- MC_CMD_MAC_STATS_IN_PERIOD_MS, period);
- MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
-
- rc = efx_mcdi_rpc(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
- NULL, 0, NULL);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: %s failed rc=%d\n",
- __func__, enable ? "enable" : "disable", rc);
- return rc;
-}
-
-int efx_mcdi_mac_reconfigure(struct efx_nic *efx)
-{
- int rc;
-
- WARN_ON(!mutex_is_locked(&efx->mac_lock));
-
- rc = efx_mcdi_set_mac(efx);
- if (rc != 0)
- return rc;
-
- return efx_mcdi_rpc(efx, MC_CMD_SET_MCAST_HASH,
- efx->multicast_hash.byte,
- sizeof(efx->multicast_hash),
- NULL, 0, NULL);
-}
diff --git a/drivers/net/ethernet/sfc/mcdi_mon.c b/drivers/net/ethernet/sfc/mcdi_mon.c
index 1d552f0664d7..4cc5d95b2a5a 100644
--- a/drivers/net/ethernet/sfc/mcdi_mon.c
+++ b/drivers/net/ethernet/sfc/mcdi_mon.c
@@ -1,6 +1,6 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2011 Solarflare Communications Inc.
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2011-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -21,31 +21,62 @@ enum efx_hwmon_type {
EFX_HWMON_UNKNOWN,
EFX_HWMON_TEMP, /* temperature */
EFX_HWMON_COOL, /* cooling device, probably a heatsink */
- EFX_HWMON_IN /* input voltage */
+ EFX_HWMON_IN, /* voltage */
+ EFX_HWMON_CURR, /* current */
+ EFX_HWMON_POWER, /* power */
};
static const struct {
const char *label;
enum efx_hwmon_type hwmon_type;
int port;
-} efx_mcdi_sensor_type[MC_CMD_SENSOR_ENTRY_MAXNUM] = {
-#define SENSOR(name, label, hwmon_type, port) \
- [MC_CMD_SENSOR_##name] = { label, hwmon_type, port }
- SENSOR(CONTROLLER_TEMP, "Controller temp.", EFX_HWMON_TEMP, -1),
- SENSOR(PHY_COMMON_TEMP, "PHY temp.", EFX_HWMON_TEMP, -1),
- SENSOR(CONTROLLER_COOLING, "Controller cooling", EFX_HWMON_COOL, -1),
- SENSOR(PHY0_TEMP, "PHY temp.", EFX_HWMON_TEMP, 0),
- SENSOR(PHY0_COOLING, "PHY cooling", EFX_HWMON_COOL, 0),
- SENSOR(PHY1_TEMP, "PHY temp.", EFX_HWMON_TEMP, 1),
- SENSOR(PHY1_COOLING, "PHY cooling", EFX_HWMON_COOL, 1),
- SENSOR(IN_1V0, "1.0V supply", EFX_HWMON_IN, -1),
- SENSOR(IN_1V2, "1.2V supply", EFX_HWMON_IN, -1),
- SENSOR(IN_1V8, "1.8V supply", EFX_HWMON_IN, -1),
- SENSOR(IN_2V5, "2.5V supply", EFX_HWMON_IN, -1),
- SENSOR(IN_3V3, "3.3V supply", EFX_HWMON_IN, -1),
- SENSOR(IN_12V0, "12.0V supply", EFX_HWMON_IN, -1),
- SENSOR(IN_1V2A, "1.2V analogue supply", EFX_HWMON_IN, -1),
- SENSOR(IN_VREF, "ref. voltage", EFX_HWMON_IN, -1),
+} efx_mcdi_sensor_type[] = {
+#define SENSOR(name, label, hwmon_type, port) \
+ [MC_CMD_SENSOR_##name] = { label, EFX_HWMON_ ## hwmon_type, port }
+ SENSOR(CONTROLLER_TEMP, "Controller ext. temp.", TEMP, -1),
+ SENSOR(PHY_COMMON_TEMP, "PHY temp.", TEMP, -1),
+ SENSOR(CONTROLLER_COOLING, "Controller cooling", COOL, -1),
+ SENSOR(PHY0_TEMP, "PHY temp.", TEMP, 0),
+ SENSOR(PHY0_COOLING, "PHY cooling", COOL, 0),
+ SENSOR(PHY1_TEMP, "PHY temp.", TEMP, 1),
+ SENSOR(PHY1_COOLING, "PHY cooling", COOL, 1),
+ SENSOR(IN_1V0, "1.0V supply", IN, -1),
+ SENSOR(IN_1V2, "1.2V supply", IN, -1),
+ SENSOR(IN_1V8, "1.8V supply", IN, -1),
+ SENSOR(IN_2V5, "2.5V supply", IN, -1),
+ SENSOR(IN_3V3, "3.3V supply", IN, -1),
+ SENSOR(IN_12V0, "12.0V supply", IN, -1),
+ SENSOR(IN_1V2A, "1.2V analogue supply", IN, -1),
+ SENSOR(IN_VREF, "ref. voltage", IN, -1),
+ SENSOR(OUT_VAOE, "AOE power supply", IN, -1),
+ SENSOR(AOE_TEMP, "AOE temp.", TEMP, -1),
+ SENSOR(PSU_AOE_TEMP, "AOE PSU temp.", TEMP, -1),
+ SENSOR(PSU_TEMP, "Controller PSU temp.", TEMP, -1),
+ SENSOR(FAN_0, NULL, COOL, -1),
+ SENSOR(FAN_1, NULL, COOL, -1),
+ SENSOR(FAN_2, NULL, COOL, -1),
+ SENSOR(FAN_3, NULL, COOL, -1),
+ SENSOR(FAN_4, NULL, COOL, -1),
+ SENSOR(IN_VAOE, "AOE input supply", IN, -1),
+ SENSOR(OUT_IAOE, "AOE output current", CURR, -1),
+ SENSOR(IN_IAOE, "AOE input current", CURR, -1),
+ SENSOR(NIC_POWER, "Board power use", POWER, -1),
+ SENSOR(IN_0V9, "0.9V supply", IN, -1),
+ SENSOR(IN_I0V9, "0.9V input current", CURR, -1),
+ SENSOR(IN_I1V2, "1.2V input current", CURR, -1),
+ SENSOR(IN_0V9_ADC, "0.9V supply (at ADC)", IN, -1),
+ SENSOR(CONTROLLER_2_TEMP, "Controller ext. temp. 2", TEMP, -1),
+ SENSOR(VREG_INTERNAL_TEMP, "Voltage regulator temp.", TEMP, -1),
+ SENSOR(VREG_0V9_TEMP, "0.9V regulator temp.", TEMP, -1),
+ SENSOR(VREG_1V2_TEMP, "1.2V regulator temp.", TEMP, -1),
+ SENSOR(CONTROLLER_VPTAT, "Controller int. temp. raw", IN, -1),
+ SENSOR(CONTROLLER_INTERNAL_TEMP, "Controller int. temp.", TEMP, -1),
+ SENSOR(CONTROLLER_VPTAT_EXTADC,
+ "Controller int. temp. raw (at ADC)", IN, -1),
+ SENSOR(CONTROLLER_INTERNAL_TEMP_EXTADC,
+ "Controller int. temp. (via ADC)", TEMP, -1),
+ SENSOR(AMBIENT_TEMP, "Ambient temp.", TEMP, -1),
+ SENSOR(AIRFLOW, "Air flow raw", IN, -1),
#undef SENSOR
};
@@ -54,6 +85,7 @@ static const char *const sensor_status_names[] = {
[MC_CMD_SENSOR_STATE_WARNING] = "Warning",
[MC_CMD_SENSOR_STATE_FATAL] = "Fatal",
[MC_CMD_SENSOR_STATE_BROKEN] = "Device failure",
+ [MC_CMD_SENSOR_STATE_NO_READING] = "No reading",
};
void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev)
@@ -85,6 +117,7 @@ struct efx_mcdi_mon_attribute {
struct device_attribute dev_attr;
unsigned int index;
unsigned int type;
+ enum efx_hwmon_type hwmon_type;
unsigned int limit_value;
char name[12];
};
@@ -92,13 +125,12 @@ struct efx_mcdi_mon_attribute {
static int efx_mcdi_mon_update(struct efx_nic *efx)
{
struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
- u8 inbuf[MC_CMD_READ_SENSORS_IN_LEN];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_READ_SENSORS_EXT_IN_LEN);
int rc;
- MCDI_SET_DWORD(inbuf, READ_SENSORS_IN_DMA_ADDR_LO,
- hwmon->dma_buf.dma_addr & 0xffffffff);
- MCDI_SET_DWORD(inbuf, READ_SENSORS_IN_DMA_ADDR_HI,
- (u64)hwmon->dma_buf.dma_addr >> 32);
+ MCDI_SET_QWORD(inbuf, READ_SENSORS_EXT_IN_DMA_ADDR,
+ hwmon->dma_buf.dma_addr);
+ MCDI_SET_DWORD(inbuf, READ_SENSORS_EXT_IN_LENGTH, hwmon->dma_buf.len);
rc = efx_mcdi_rpc(efx, MC_CMD_READ_SENSORS,
inbuf, sizeof(inbuf), NULL, 0, NULL);
@@ -146,18 +178,32 @@ static ssize_t efx_mcdi_mon_show_value(struct device *dev,
struct efx_mcdi_mon_attribute *mon_attr =
container_of(attr, struct efx_mcdi_mon_attribute, dev_attr);
efx_dword_t entry;
- unsigned int value;
+ unsigned int value, state;
int rc;
rc = efx_mcdi_mon_get_entry(dev, mon_attr->index, &entry);
if (rc)
return rc;
+ state = EFX_DWORD_FIELD(entry, MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE);
+ if (state == MC_CMD_SENSOR_STATE_NO_READING)
+ return -EBUSY;
+
value = EFX_DWORD_FIELD(entry, MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE);
- /* Convert temperature from degrees to milli-degrees Celsius */
- if (efx_mcdi_sensor_type[mon_attr->type].hwmon_type == EFX_HWMON_TEMP)
+ switch (mon_attr->hwmon_type) {
+ case EFX_HWMON_TEMP:
+ /* Convert temperature from degrees to milli-degrees Celsius */
value *= 1000;
+ break;
+ case EFX_HWMON_POWER:
+ /* Convert power from watts to microwatts */
+ value *= 1000000;
+ break;
+ default:
+ /* No conversion needed */
+ break;
+ }
return sprintf(buf, "%u\n", value);
}
@@ -172,9 +218,19 @@ static ssize_t efx_mcdi_mon_show_limit(struct device *dev,
value = mon_attr->limit_value;
- /* Convert temperature from degrees to milli-degrees Celsius */
- if (efx_mcdi_sensor_type[mon_attr->type].hwmon_type == EFX_HWMON_TEMP)
+ switch (mon_attr->hwmon_type) {
+ case EFX_HWMON_TEMP:
+ /* Convert temperature from degrees to milli-degrees Celsius */
value *= 1000;
+ break;
+ case EFX_HWMON_POWER:
+ /* Convert power from watts to microwatts */
+ value *= 1000000;
+ break;
+ default:
+ /* No conversion needed */
+ break;
+ }
return sprintf(buf, "%u\n", value);
}
@@ -221,6 +277,10 @@ efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name,
strlcpy(attr->name, name, sizeof(attr->name));
attr->index = index;
attr->type = type;
+ if (type < ARRAY_SIZE(efx_mcdi_sensor_type))
+ attr->hwmon_type = efx_mcdi_sensor_type[type].hwmon_type;
+ else
+ attr->hwmon_type = EFX_HWMON_UNKNOWN;
attr->limit_value = limit_value;
sysfs_attr_init(&attr->dev_attr.attr);
attr->dev_attr.attr.name = attr->name;
@@ -234,36 +294,43 @@ efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name,
int efx_mcdi_mon_probe(struct efx_nic *efx)
{
+ unsigned int n_temp = 0, n_cool = 0, n_in = 0, n_curr = 0, n_power = 0;
struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
- unsigned int n_attrs, n_temp = 0, n_cool = 0, n_in = 0;
- u8 outbuf[MC_CMD_SENSOR_INFO_OUT_LENMAX];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_SENSOR_INFO_EXT_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_SENSOR_INFO_OUT_LENMAX);
+ unsigned int n_pages, n_sensors, n_attrs, page;
size_t outlen;
char name[12];
u32 mask;
- int rc, i, type;
+ int rc, i, j, type;
- BUILD_BUG_ON(MC_CMD_SENSOR_INFO_IN_LEN != 0);
+ /* Find out how many sensors are present */
+ n_sensors = 0;
+ page = 0;
+ do {
+ MCDI_SET_DWORD(inbuf, SENSOR_INFO_EXT_IN_PAGE, page);
- rc = efx_mcdi_rpc(efx, MC_CMD_SENSOR_INFO, NULL, 0,
- outbuf, sizeof(outbuf), &outlen);
- if (rc)
- return rc;
- if (outlen < MC_CMD_SENSOR_INFO_OUT_LENMIN)
- return -EIO;
-
- /* Find out which sensors are present. Don't create a device
- * if there are none.
- */
- mask = MCDI_DWORD(outbuf, SENSOR_INFO_OUT_MASK);
- if (mask == 0)
+ rc = efx_mcdi_rpc(efx, MC_CMD_SENSOR_INFO, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < MC_CMD_SENSOR_INFO_OUT_LENMIN)
+ return -EIO;
+
+ mask = MCDI_DWORD(outbuf, SENSOR_INFO_OUT_MASK);
+ n_sensors += hweight32(mask & ~(1 << MC_CMD_SENSOR_PAGE0_NEXT));
+ ++page;
+ } while (mask & (1 << MC_CMD_SENSOR_PAGE0_NEXT));
+ n_pages = page;
+
+ /* Don't create a device if there are none */
+ if (n_sensors == 0)
return 0;
- /* Check again for short response */
- if (outlen < MC_CMD_SENSOR_INFO_OUT_LEN(hweight32(mask)))
- return -EIO;
-
- rc = efx_nic_alloc_buffer(efx, &hwmon->dma_buf,
- 4 * MC_CMD_SENSOR_ENTRY_MAXNUM);
+ rc = efx_nic_alloc_buffer(
+ efx, &hwmon->dma_buf,
+ n_sensors * MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_LEN,
+ GFP_KERNEL);
if (rc)
return rc;
@@ -274,7 +341,7 @@ int efx_mcdi_mon_probe(struct efx_nic *efx)
* attributes for this set of sensors: name of the driver plus
* value, min, max, crit, alarm and label for each sensor.
*/
- n_attrs = 1 + 6 * hweight32(mask);
+ n_attrs = 1 + 6 * n_sensors;
hwmon->attrs = kcalloc(n_attrs, sizeof(*hwmon->attrs), GFP_KERNEL);
if (!hwmon->attrs) {
rc = -ENOMEM;
@@ -291,26 +358,63 @@ int efx_mcdi_mon_probe(struct efx_nic *efx)
if (rc)
goto fail;
- for (i = 0, type = -1; ; i++) {
+ for (i = 0, j = -1, type = -1; ; i++) {
+ enum efx_hwmon_type hwmon_type;
const char *hwmon_prefix;
unsigned hwmon_index;
u16 min1, max1, min2, max2;
/* Find next sensor type or exit if there is none */
- type++;
- while (!(mask & (1 << type))) {
+ do {
type++;
- if (type == 32)
- return 0;
- }
- /* Skip sensors specific to a different port */
- if (efx_mcdi_sensor_type[type].hwmon_type != EFX_HWMON_UNKNOWN &&
- efx_mcdi_sensor_type[type].port >= 0 &&
- efx_mcdi_sensor_type[type].port != efx_port_num(efx))
- continue;
+ if ((type % 32) == 0) {
+ page = type / 32;
+ j = -1;
+ if (page == n_pages)
+ return 0;
+
+ MCDI_SET_DWORD(inbuf, SENSOR_INFO_EXT_IN_PAGE,
+ page);
+ rc = efx_mcdi_rpc(efx, MC_CMD_SENSOR_INFO,
+ inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf),
+ &outlen);
+ if (rc)
+ goto fail;
+ if (outlen < MC_CMD_SENSOR_INFO_OUT_LENMIN) {
+ rc = -EIO;
+ goto fail;
+ }
+
+ mask = (MCDI_DWORD(outbuf,
+ SENSOR_INFO_OUT_MASK) &
+ ~(1 << MC_CMD_SENSOR_PAGE0_NEXT));
- switch (efx_mcdi_sensor_type[type].hwmon_type) {
+ /* Check again for short response */
+ if (outlen <
+ MC_CMD_SENSOR_INFO_OUT_LEN(hweight32(mask))) {
+ rc = -EIO;
+ goto fail;
+ }
+ }
+ } while (!(mask & (1 << type % 32)));
+ j++;
+
+ if (type < ARRAY_SIZE(efx_mcdi_sensor_type)) {
+ hwmon_type = efx_mcdi_sensor_type[type].hwmon_type;
+
+ /* Skip sensors specific to a different port */
+ if (hwmon_type != EFX_HWMON_UNKNOWN &&
+ efx_mcdi_sensor_type[type].port >= 0 &&
+ efx_mcdi_sensor_type[type].port !=
+ efx_port_num(efx))
+ continue;
+ } else {
+ hwmon_type = EFX_HWMON_UNKNOWN;
+ }
+
+ switch (hwmon_type) {
case EFX_HWMON_TEMP:
hwmon_prefix = "temp";
hwmon_index = ++n_temp; /* 1-based */
@@ -327,16 +431,24 @@ int efx_mcdi_mon_probe(struct efx_nic *efx)
hwmon_prefix = "in";
hwmon_index = n_in++; /* 0-based */
break;
+ case EFX_HWMON_CURR:
+ hwmon_prefix = "curr";
+ hwmon_index = ++n_curr; /* 1-based */
+ break;
+ case EFX_HWMON_POWER:
+ hwmon_prefix = "power";
+ hwmon_index = ++n_power; /* 1-based */
+ break;
}
min1 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
- SENSOR_INFO_ENTRY, i, MIN1);
+ SENSOR_INFO_ENTRY, j, MIN1);
max1 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
- SENSOR_INFO_ENTRY, i, MAX1);
+ SENSOR_INFO_ENTRY, j, MAX1);
min2 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
- SENSOR_INFO_ENTRY, i, MIN2);
+ SENSOR_INFO_ENTRY, j, MIN2);
max2 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
- SENSOR_INFO_ENTRY, i, MAX2);
+ SENSOR_INFO_ENTRY, j, MAX2);
if (min1 != max1) {
snprintf(name, sizeof(name), "%s%u_input",
@@ -346,13 +458,15 @@ int efx_mcdi_mon_probe(struct efx_nic *efx)
if (rc)
goto fail;
- snprintf(name, sizeof(name), "%s%u_min",
- hwmon_prefix, hwmon_index);
- rc = efx_mcdi_mon_add_attr(
- efx, name, efx_mcdi_mon_show_limit,
- i, type, min1);
- if (rc)
- goto fail;
+ if (hwmon_type != EFX_HWMON_POWER) {
+ snprintf(name, sizeof(name), "%s%u_min",
+ hwmon_prefix, hwmon_index);
+ rc = efx_mcdi_mon_add_attr(
+ efx, name, efx_mcdi_mon_show_limit,
+ i, type, min1);
+ if (rc)
+ goto fail;
+ }
snprintf(name, sizeof(name), "%s%u_max",
hwmon_prefix, hwmon_index);
@@ -383,7 +497,8 @@ int efx_mcdi_mon_probe(struct efx_nic *efx)
if (rc)
goto fail;
- if (efx_mcdi_sensor_type[type].label) {
+ if (type < ARRAY_SIZE(efx_mcdi_sensor_type) &&
+ efx_mcdi_sensor_type[type].label) {
snprintf(name, sizeof(name), "%s%u_label",
hwmon_prefix, hwmon_index);
rc = efx_mcdi_mon_add_attr(
@@ -400,8 +515,7 @@ fail:
void efx_mcdi_mon_remove(struct efx_nic *efx)
{
- struct siena_nic_data *nic_data = efx->nic_data;
- struct efx_mcdi_mon *hwmon = &nic_data->hwmon;
+ struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
unsigned int i;
for (i = 0; i < hwmon->n_attrs; i++)
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index c5c9747861ba..b5cf62492f8e 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -1,6 +1,6 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2009-2011 Solarflare Communications Inc.
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2009-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -21,6 +21,13 @@
#define MC_FW_STATE_BOOTING (4)
/* The Scheduler has started. */
#define MC_FW_STATE_SCHED (8)
+/* If this is set in MC_RESET_STATE_REG then it should be
+ * possible to jump into IMEM without loading code from flash.
+ * Unlike a warm boot, assume DMEM has been reloaded, so that
+ * the MC persistent data must be reinitialised. */
+#define MC_FW_TEPID_BOOT_OK (16)
+/* BIST state has been initialized */
+#define MC_FW_BIST_INIT_OK (128)
/* Siena MC shared memmory offsets */
/* The 'doorbell' addresses are hard-wired to alert the MC when written */
@@ -39,18 +46,21 @@
#define MC_STATUS_DWORD_REBOOT (0xb007b007)
#define MC_STATUS_DWORD_ASSERT (0xdeaddead)
+/* Check whether an mcfw version (in host order) belongs to a bootloader */
+#define MC_FW_VERSION_IS_BOOTLOADER(_v) (((_v) >> 16) == 0xb007)
+
/* The current version of the MCDI protocol.
*
* Note that the ROM burnt into the card only talks V0, so at the very
* least every driver must support version 0 and MCDI_PCOL_VERSION
*/
-#define MCDI_PCOL_VERSION 1
+#define MCDI_PCOL_VERSION 2
/* Unused commands: 0x23, 0x27, 0x30, 0x31 */
/* MCDI version 1
*
- * Each MCDI request starts with an MCDI_HEADER, which is a 32byte
+ * Each MCDI request starts with an MCDI_HEADER, which is a 32bit
* structure, filled in by the client.
*
* 0 7 8 16 20 22 23 24 31
@@ -87,9 +97,11 @@
#define MCDI_HEADER_DATALEN_LBN 8
#define MCDI_HEADER_DATALEN_WIDTH 8
#define MCDI_HEADER_SEQ_LBN 16
-#define MCDI_HEADER_RSVD_LBN 20
-#define MCDI_HEADER_RSVD_WIDTH 2
#define MCDI_HEADER_SEQ_WIDTH 4
+#define MCDI_HEADER_RSVD_LBN 20
+#define MCDI_HEADER_RSVD_WIDTH 1
+#define MCDI_HEADER_NOT_EPOCH_LBN 21
+#define MCDI_HEADER_NOT_EPOCH_WIDTH 1
#define MCDI_HEADER_ERROR_LBN 22
#define MCDI_HEADER_ERROR_WIDTH 1
#define MCDI_HEADER_RESPONSE_LBN 23
@@ -100,7 +112,11 @@
#define MCDI_HEADER_XFLAGS_EVREQ 0x01
/* Maximum number of payload bytes */
-#define MCDI_CTL_SDU_LEN_MAX 0xfc
+#define MCDI_CTL_SDU_LEN_MAX_V1 0xfc
+#define MCDI_CTL_SDU_LEN_MAX_V2 0x400
+
+#define MCDI_CTL_SDU_LEN_MAX MCDI_CTL_SDU_LEN_MAX_V2
+
/* The MC can generate events for two reasons:
* - To complete a shared memory request if XFLAGS_EVREQ was set
@@ -145,22 +161,69 @@
#define FSE_AZ_EV_CODE_MCDI_EVRESPONSE 0xc
+/* Operation not permitted. */
+#define MC_CMD_ERR_EPERM 1
/* Non-existent command target */
#define MC_CMD_ERR_ENOENT 2
/* assert() has killed the MC */
#define MC_CMD_ERR_EINTR 4
+/* I/O failure */
+#define MC_CMD_ERR_EIO 5
+/* Try again */
+#define MC_CMD_ERR_EAGAIN 11
+/* Out of memory */
+#define MC_CMD_ERR_ENOMEM 12
/* Caller does not hold required locks */
#define MC_CMD_ERR_EACCES 13
/* Resource is currently unavailable (e.g. lock contention) */
#define MC_CMD_ERR_EBUSY 16
+/* No such device */
+#define MC_CMD_ERR_ENODEV 19
/* Invalid argument to target */
#define MC_CMD_ERR_EINVAL 22
+/* Out of range */
+#define MC_CMD_ERR_ERANGE 34
/* Non-recursive resource is already acquired */
#define MC_CMD_ERR_EDEADLK 35
/* Operation not implemented */
#define MC_CMD_ERR_ENOSYS 38
/* Operation timed out */
#define MC_CMD_ERR_ETIME 62
+/* Link has been severed */
+#define MC_CMD_ERR_ENOLINK 67
+/* Protocol error */
+#define MC_CMD_ERR_EPROTO 71
+/* Operation not supported */
+#define MC_CMD_ERR_ENOTSUP 95
+/* Address not available */
+#define MC_CMD_ERR_EADDRNOTAVAIL 99
+/* Not connected */
+#define MC_CMD_ERR_ENOTCONN 107
+/* Operation already in progress */
+#define MC_CMD_ERR_EALREADY 114
+
+/* Resource allocation failed. */
+#define MC_CMD_ERR_ALLOC_FAIL 0x1000
+/* V-adaptor not found. */
+#define MC_CMD_ERR_NO_VADAPTOR 0x1001
+/* EVB port not found. */
+#define MC_CMD_ERR_NO_EVB_PORT 0x1002
+/* V-switch not found. */
+#define MC_CMD_ERR_NO_VSWITCH 0x1003
+/* Too many VLAN tags. */
+#define MC_CMD_ERR_VLAN_LIMIT 0x1004
+/* Bad PCI function number. */
+#define MC_CMD_ERR_BAD_PCI_FUNC 0x1005
+/* Invalid VLAN mode. */
+#define MC_CMD_ERR_BAD_VLAN_MODE 0x1006
+/* Invalid v-switch type. */
+#define MC_CMD_ERR_BAD_VSWITCH_TYPE 0x1007
+/* Invalid v-port type. */
+#define MC_CMD_ERR_BAD_VPORT_TYPE 0x1008
+/* MAC address exists. */
+#define MC_CMD_ERR_MAC_EXIST 0x1009
+/* Slave core not present */
+#define MC_CMD_ERR_SLAVE_NOT_PRESENT 0x100a
#define MC_CMD_ERR_CODE_OFST 0
@@ -178,9 +241,11 @@
/* Vectors in the boot ROM */
/* Point to the copycode entry point. */
-#define MC_BOOTROM_COPYCODE_VEC (0x7f4)
+#define SIENA_MC_BOOTROM_COPYCODE_VEC (0x800 - 3 * 0x4)
+#define HUNT_MC_BOOTROM_COPYCODE_VEC (0x8000 - 3 * 0x4)
/* Points to the recovery mode entry point. */
-#define MC_BOOTROM_NOFLASH_VEC (0x7f8)
+#define SIENA_MC_BOOTROM_NOFLASH_VEC (0x800 - 2 * 0x4)
+#define HUNT_MC_BOOTROM_NOFLASH_VEC (0x8000 - 2 * 0x4)
/* The command set exported by the boot ROM (MCDI v0) */
#define MC_CMD_GET_VERSION_V0_SUPPORTED_FUNCS { \
@@ -209,16 +274,29 @@
(n) * MC_CMD_DBIWROP_TYPEDEF_LEN)
+/* Version 2 adds an optional argument to error returns: the errno value
+ * may be followed by the (0-based) number of the first argument that
+ * could not be processed.
+ */
+#define MC_CMD_ERR_ARG_OFST 4
+
+/* No space */
+#define MC_CMD_ERR_ENOSPC 28
+
/* MCDI_EVENT structuredef */
#define MCDI_EVENT_LEN 8
#define MCDI_EVENT_CONT_LBN 32
#define MCDI_EVENT_CONT_WIDTH 1
#define MCDI_EVENT_LEVEL_LBN 33
#define MCDI_EVENT_LEVEL_WIDTH 3
-#define MCDI_EVENT_LEVEL_INFO 0x0 /* enum */
-#define MCDI_EVENT_LEVEL_WARN 0x1 /* enum */
-#define MCDI_EVENT_LEVEL_ERR 0x2 /* enum */
-#define MCDI_EVENT_LEVEL_FATAL 0x3 /* enum */
+/* enum: Info. */
+#define MCDI_EVENT_LEVEL_INFO 0x0
+/* enum: Warning. */
+#define MCDI_EVENT_LEVEL_WARN 0x1
+/* enum: Error. */
+#define MCDI_EVENT_LEVEL_ERR 0x2
+/* enum: Fatal. */
+#define MCDI_EVENT_LEVEL_FATAL 0x3
#define MCDI_EVENT_DATA_OFST 0
#define MCDI_EVENT_CMDDONE_SEQ_LBN 0
#define MCDI_EVENT_CMDDONE_SEQ_WIDTH 8
@@ -230,9 +308,14 @@
#define MCDI_EVENT_LINKCHANGE_LP_CAP_WIDTH 16
#define MCDI_EVENT_LINKCHANGE_SPEED_LBN 16
#define MCDI_EVENT_LINKCHANGE_SPEED_WIDTH 4
-#define MCDI_EVENT_LINKCHANGE_SPEED_100M 0x1 /* enum */
-#define MCDI_EVENT_LINKCHANGE_SPEED_1G 0x2 /* enum */
-#define MCDI_EVENT_LINKCHANGE_SPEED_10G 0x3 /* enum */
+/* enum: 100Mbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_100M 0x1
+/* enum: 1Gbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_1G 0x2
+/* enum: 10Gbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_10G 0x3
+/* enum: 40Gbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_40G 0x4
#define MCDI_EVENT_LINKCHANGE_FCNTL_LBN 20
#define MCDI_EVENT_LINKCHANGE_FCNTL_WIDTH 4
#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_LBN 24
@@ -247,26 +330,80 @@
#define MCDI_EVENT_FWALERT_DATA_WIDTH 24
#define MCDI_EVENT_FWALERT_REASON_LBN 0
#define MCDI_EVENT_FWALERT_REASON_WIDTH 8
-#define MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS 0x1 /* enum */
+/* enum: SRAM Access. */
+#define MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS 0x1
#define MCDI_EVENT_FLR_VF_LBN 0
#define MCDI_EVENT_FLR_VF_WIDTH 8
#define MCDI_EVENT_TX_ERR_TXQ_LBN 0
#define MCDI_EVENT_TX_ERR_TXQ_WIDTH 12
#define MCDI_EVENT_TX_ERR_TYPE_LBN 12
#define MCDI_EVENT_TX_ERR_TYPE_WIDTH 4
-#define MCDI_EVENT_TX_ERR_DL_FAIL 0x1 /* enum */
-#define MCDI_EVENT_TX_ERR_NO_EOP 0x2 /* enum */
-#define MCDI_EVENT_TX_ERR_2BIG 0x3 /* enum */
+/* enum: Descriptor loader reported failure */
+#define MCDI_EVENT_TX_ERR_DL_FAIL 0x1
+/* enum: Descriptor ring empty and no EOP seen for packet */
+#define MCDI_EVENT_TX_ERR_NO_EOP 0x2
+/* enum: Overlength packet */
+#define MCDI_EVENT_TX_ERR_2BIG 0x3
+/* enum: Malformed option descriptor */
+#define MCDI_EVENT_TX_BAD_OPTDESC 0x5
+/* enum: Option descriptor part way through a packet */
+#define MCDI_EVENT_TX_OPT_IN_PKT 0x8
+/* enum: DMA or PIO data access error */
+#define MCDI_EVENT_TX_ERR_BAD_DMA_OR_PIO 0x9
#define MCDI_EVENT_TX_ERR_INFO_LBN 16
#define MCDI_EVENT_TX_ERR_INFO_WIDTH 16
+#define MCDI_EVENT_TX_FLUSH_TO_DRIVER_LBN 12
+#define MCDI_EVENT_TX_FLUSH_TO_DRIVER_WIDTH 1
#define MCDI_EVENT_TX_FLUSH_TXQ_LBN 0
#define MCDI_EVENT_TX_FLUSH_TXQ_WIDTH 12
#define MCDI_EVENT_PTP_ERR_TYPE_LBN 0
#define MCDI_EVENT_PTP_ERR_TYPE_WIDTH 8
-#define MCDI_EVENT_PTP_ERR_PLL_LOST 0x1 /* enum */
-#define MCDI_EVENT_PTP_ERR_FILTER 0x2 /* enum */
-#define MCDI_EVENT_PTP_ERR_FIFO 0x3 /* enum */
-#define MCDI_EVENT_PTP_ERR_QUEUE 0x4 /* enum */
+/* enum: PLL lost lock */
+#define MCDI_EVENT_PTP_ERR_PLL_LOST 0x1
+/* enum: Filter overflow (PDMA) */
+#define MCDI_EVENT_PTP_ERR_FILTER 0x2
+/* enum: FIFO overflow (FPGA) */
+#define MCDI_EVENT_PTP_ERR_FIFO 0x3
+/* enum: Merge queue overflow */
+#define MCDI_EVENT_PTP_ERR_QUEUE 0x4
+#define MCDI_EVENT_AOE_ERR_TYPE_LBN 0
+#define MCDI_EVENT_AOE_ERR_TYPE_WIDTH 8
+/* enum: AOE failed to load - no valid image? */
+#define MCDI_EVENT_AOE_NO_LOAD 0x1
+/* enum: AOE FC reported an exception */
+#define MCDI_EVENT_AOE_FC_ASSERT 0x2
+/* enum: AOE FC watchdogged */
+#define MCDI_EVENT_AOE_FC_WATCHDOG 0x3
+/* enum: AOE FC failed to start */
+#define MCDI_EVENT_AOE_FC_NO_START 0x4
+/* enum: Generic AOE fault - likely to have been reported via other means too
+ * but intended for use by aoex driver.
+ */
+#define MCDI_EVENT_AOE_FAULT 0x5
+/* enum: Results of reprogramming the CPLD (status in AOE_ERR_DATA) */
+#define MCDI_EVENT_AOE_CPLD_REPROGRAMMED 0x6
+/* enum: AOE loaded successfully */
+#define MCDI_EVENT_AOE_LOAD 0x7
+/* enum: AOE DMA operation completed (LSB of HOST_HANDLE in AOE_ERR_DATA) */
+#define MCDI_EVENT_AOE_DMA 0x8
+/* enum: AOE byteblaster connected/disconnected (Connection status in
+ * AOE_ERR_DATA)
+ */
+#define MCDI_EVENT_AOE_BYTEBLASTER 0x9
+#define MCDI_EVENT_AOE_ERR_DATA_LBN 8
+#define MCDI_EVENT_AOE_ERR_DATA_WIDTH 8
+#define MCDI_EVENT_RX_ERR_RXQ_LBN 0
+#define MCDI_EVENT_RX_ERR_RXQ_WIDTH 12
+#define MCDI_EVENT_RX_ERR_TYPE_LBN 12
+#define MCDI_EVENT_RX_ERR_TYPE_WIDTH 4
+#define MCDI_EVENT_RX_ERR_INFO_LBN 16
+#define MCDI_EVENT_RX_ERR_INFO_WIDTH 16
+#define MCDI_EVENT_RX_FLUSH_TO_DRIVER_LBN 12
+#define MCDI_EVENT_RX_FLUSH_TO_DRIVER_WIDTH 1
+#define MCDI_EVENT_RX_FLUSH_RXQ_LBN 0
+#define MCDI_EVENT_RX_FLUSH_RXQ_WIDTH 12
+#define MCDI_EVENT_MC_REBOOT_COUNT_LBN 0
+#define MCDI_EVENT_MC_REBOOT_COUNT_WIDTH 16
#define MCDI_EVENT_DATA_LBN 0
#define MCDI_EVENT_DATA_WIDTH 32
#define MCDI_EVENT_SRC_LBN 36
@@ -275,21 +412,60 @@
#define MCDI_EVENT_EV_CODE_WIDTH 4
#define MCDI_EVENT_CODE_LBN 44
#define MCDI_EVENT_CODE_WIDTH 8
-#define MCDI_EVENT_CODE_BADSSERT 0x1 /* enum */
-#define MCDI_EVENT_CODE_PMNOTICE 0x2 /* enum */
-#define MCDI_EVENT_CODE_CMDDONE 0x3 /* enum */
-#define MCDI_EVENT_CODE_LINKCHANGE 0x4 /* enum */
-#define MCDI_EVENT_CODE_SENSOREVT 0x5 /* enum */
-#define MCDI_EVENT_CODE_SCHEDERR 0x6 /* enum */
-#define MCDI_EVENT_CODE_REBOOT 0x7 /* enum */
-#define MCDI_EVENT_CODE_MAC_STATS_DMA 0x8 /* enum */
-#define MCDI_EVENT_CODE_FWALERT 0x9 /* enum */
-#define MCDI_EVENT_CODE_FLR 0xa /* enum */
-#define MCDI_EVENT_CODE_TX_ERR 0xb /* enum */
-#define MCDI_EVENT_CODE_TX_FLUSH 0xc /* enum */
-#define MCDI_EVENT_CODE_PTP_RX 0xd /* enum */
-#define MCDI_EVENT_CODE_PTP_FAULT 0xe /* enum */
-#define MCDI_EVENT_CODE_PTP_PPS 0xf /* enum */
+/* enum: Bad assert. */
+#define MCDI_EVENT_CODE_BADSSERT 0x1
+/* enum: PM Notice. */
+#define MCDI_EVENT_CODE_PMNOTICE 0x2
+/* enum: Command done. */
+#define MCDI_EVENT_CODE_CMDDONE 0x3
+/* enum: Link change. */
+#define MCDI_EVENT_CODE_LINKCHANGE 0x4
+/* enum: Sensor Event. */
+#define MCDI_EVENT_CODE_SENSOREVT 0x5
+/* enum: Schedule error. */
+#define MCDI_EVENT_CODE_SCHEDERR 0x6
+/* enum: Reboot. */
+#define MCDI_EVENT_CODE_REBOOT 0x7
+/* enum: Mac stats DMA. */
+#define MCDI_EVENT_CODE_MAC_STATS_DMA 0x8
+/* enum: Firmware alert. */
+#define MCDI_EVENT_CODE_FWALERT 0x9
+/* enum: Function level reset. */
+#define MCDI_EVENT_CODE_FLR 0xa
+/* enum: Transmit error */
+#define MCDI_EVENT_CODE_TX_ERR 0xb
+/* enum: Tx flush has completed */
+#define MCDI_EVENT_CODE_TX_FLUSH 0xc
+/* enum: PTP packet received timestamp */
+#define MCDI_EVENT_CODE_PTP_RX 0xd
+/* enum: PTP NIC failure */
+#define MCDI_EVENT_CODE_PTP_FAULT 0xe
+/* enum: PTP PPS event */
+#define MCDI_EVENT_CODE_PTP_PPS 0xf
+/* enum: Rx flush has completed */
+#define MCDI_EVENT_CODE_RX_FLUSH 0x10
+/* enum: Receive error */
+#define MCDI_EVENT_CODE_RX_ERR 0x11
+/* enum: AOE fault */
+#define MCDI_EVENT_CODE_AOE 0x12
+/* enum: Network port calibration failed (VCAL). */
+#define MCDI_EVENT_CODE_VCAL_FAIL 0x13
+/* enum: HW PPS event */
+#define MCDI_EVENT_CODE_HW_PPS 0x14
+/* enum: The MC has rebooted (huntington and later, siena uses CODE_REBOOT and
+ * a different format)
+ */
+#define MCDI_EVENT_CODE_MC_REBOOT 0x15
+/* enum: the MC has detected a parity error */
+#define MCDI_EVENT_CODE_PAR_ERR 0x16
+/* enum: the MC has detected a correctable error */
+#define MCDI_EVENT_CODE_ECC_CORR_ERR 0x17
+/* enum: the MC has detected an uncorrectable error */
+#define MCDI_EVENT_CODE_ECC_FATAL_ERR 0x18
+/* enum: Artificial event generated by host and posted via MC for test
+ * purposes.
+ */
+#define MCDI_EVENT_CODE_TESTGEN 0xfa
#define MCDI_EVENT_CMDDONE_DATA_OFST 0
#define MCDI_EVENT_CMDDONE_DATA_LBN 0
#define MCDI_EVENT_CMDDONE_DATA_WIDTH 32
@@ -305,15 +481,114 @@
#define MCDI_EVENT_TX_ERR_DATA_OFST 0
#define MCDI_EVENT_TX_ERR_DATA_LBN 0
#define MCDI_EVENT_TX_ERR_DATA_WIDTH 32
+/* Seconds field of timestamp */
#define MCDI_EVENT_PTP_SECONDS_OFST 0
#define MCDI_EVENT_PTP_SECONDS_LBN 0
#define MCDI_EVENT_PTP_SECONDS_WIDTH 32
+/* Nanoseconds field of timestamp */
#define MCDI_EVENT_PTP_NANOSECONDS_OFST 0
#define MCDI_EVENT_PTP_NANOSECONDS_LBN 0
#define MCDI_EVENT_PTP_NANOSECONDS_WIDTH 32
+/* Lowest four bytes of sourceUUID from PTP packet */
#define MCDI_EVENT_PTP_UUID_OFST 0
#define MCDI_EVENT_PTP_UUID_LBN 0
#define MCDI_EVENT_PTP_UUID_WIDTH 32
+#define MCDI_EVENT_RX_ERR_DATA_OFST 0
+#define MCDI_EVENT_RX_ERR_DATA_LBN 0
+#define MCDI_EVENT_RX_ERR_DATA_WIDTH 32
+#define MCDI_EVENT_PAR_ERR_DATA_OFST 0
+#define MCDI_EVENT_PAR_ERR_DATA_LBN 0
+#define MCDI_EVENT_PAR_ERR_DATA_WIDTH 32
+#define MCDI_EVENT_ECC_CORR_ERR_DATA_OFST 0
+#define MCDI_EVENT_ECC_CORR_ERR_DATA_LBN 0
+#define MCDI_EVENT_ECC_CORR_ERR_DATA_WIDTH 32
+#define MCDI_EVENT_ECC_FATAL_ERR_DATA_OFST 0
+#define MCDI_EVENT_ECC_FATAL_ERR_DATA_LBN 0
+#define MCDI_EVENT_ECC_FATAL_ERR_DATA_WIDTH 32
+
+/* FCDI_EVENT structuredef */
+#define FCDI_EVENT_LEN 8
+#define FCDI_EVENT_CONT_LBN 32
+#define FCDI_EVENT_CONT_WIDTH 1
+#define FCDI_EVENT_LEVEL_LBN 33
+#define FCDI_EVENT_LEVEL_WIDTH 3
+/* enum: Info. */
+#define FCDI_EVENT_LEVEL_INFO 0x0
+/* enum: Warning. */
+#define FCDI_EVENT_LEVEL_WARN 0x1
+/* enum: Error. */
+#define FCDI_EVENT_LEVEL_ERR 0x2
+/* enum: Fatal. */
+#define FCDI_EVENT_LEVEL_FATAL 0x3
+#define FCDI_EVENT_DATA_OFST 0
+#define FCDI_EVENT_LINK_STATE_STATUS_LBN 0
+#define FCDI_EVENT_LINK_STATE_STATUS_WIDTH 1
+#define FCDI_EVENT_LINK_DOWN 0x0 /* enum */
+#define FCDI_EVENT_LINK_UP 0x1 /* enum */
+#define FCDI_EVENT_DATA_LBN 0
+#define FCDI_EVENT_DATA_WIDTH 32
+#define FCDI_EVENT_SRC_LBN 36
+#define FCDI_EVENT_SRC_WIDTH 8
+#define FCDI_EVENT_EV_CODE_LBN 60
+#define FCDI_EVENT_EV_CODE_WIDTH 4
+#define FCDI_EVENT_CODE_LBN 44
+#define FCDI_EVENT_CODE_WIDTH 8
+/* enum: The FC was rebooted. */
+#define FCDI_EVENT_CODE_REBOOT 0x1
+/* enum: Bad assert. */
+#define FCDI_EVENT_CODE_ASSERT 0x2
+/* enum: DDR3 test result. */
+#define FCDI_EVENT_CODE_DDR_TEST_RESULT 0x3
+/* enum: Link status. */
+#define FCDI_EVENT_CODE_LINK_STATE 0x4
+/* enum: A timed read is ready to be serviced. */
+#define FCDI_EVENT_CODE_TIMED_READ 0x5
+/* enum: One or more PPS IN events */
+#define FCDI_EVENT_CODE_PPS_IN 0x6
+/* enum: One or more PPS OUT events */
+#define FCDI_EVENT_CODE_PPS_OUT 0x7
+#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_OFST 0
+#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LBN 0
+#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_WIDTH 32
+#define FCDI_EVENT_ASSERT_TYPE_LBN 36
+#define FCDI_EVENT_ASSERT_TYPE_WIDTH 8
+#define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_LBN 36
+#define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_WIDTH 8
+#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_OFST 0
+#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_LBN 0
+#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_WIDTH 32
+#define FCDI_EVENT_LINK_STATE_DATA_OFST 0
+#define FCDI_EVENT_LINK_STATE_DATA_LBN 0
+#define FCDI_EVENT_LINK_STATE_DATA_WIDTH 32
+#define FCDI_EVENT_PPS_COUNT_OFST 0
+#define FCDI_EVENT_PPS_COUNT_LBN 0
+#define FCDI_EVENT_PPS_COUNT_WIDTH 32
+
+/* FCDI_EXTENDED_EVENT structuredef */
+#define FCDI_EXTENDED_EVENT_LENMIN 16
+#define FCDI_EXTENDED_EVENT_LENMAX 248
+#define FCDI_EXTENDED_EVENT_LEN(num) (8+8*(num))
+/* Number of timestamps following */
+#define FCDI_EXTENDED_EVENT_PPS_COUNT_OFST 0
+#define FCDI_EXTENDED_EVENT_PPS_COUNT_LBN 0
+#define FCDI_EXTENDED_EVENT_PPS_COUNT_WIDTH 32
+/* Seconds field of a timestamp record */
+#define FCDI_EXTENDED_EVENT_PPS_SECONDS_OFST 8
+#define FCDI_EXTENDED_EVENT_PPS_SECONDS_LBN 64
+#define FCDI_EXTENDED_EVENT_PPS_SECONDS_WIDTH 32
+/* Nanoseconds field of a timestamp record */
+#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_OFST 12
+#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LBN 96
+#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_WIDTH 32
+/* Timestamp records comprising the event */
+#define FCDI_EXTENDED_EVENT_PPS_TIME_OFST 8
+#define FCDI_EXTENDED_EVENT_PPS_TIME_LEN 8
+#define FCDI_EXTENDED_EVENT_PPS_TIME_LO_OFST 8
+#define FCDI_EXTENDED_EVENT_PPS_TIME_HI_OFST 12
+#define FCDI_EXTENDED_EVENT_PPS_TIME_MINNUM 1
+#define FCDI_EXTENDED_EVENT_PPS_TIME_MAXNUM 30
+#define FCDI_EXTENDED_EVENT_PPS_TIME_LBN 64
+#define FCDI_EXTENDED_EVENT_PPS_TIME_WIDTH 64
/***********************************/
@@ -365,11 +640,27 @@
/* MC_CMD_COPYCODE_IN msgrequest */
#define MC_CMD_COPYCODE_IN_LEN 16
+/* Source address */
#define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0
+/* enum: Entering the main image via a copy of a single word from and to this
+ * address indicates that it should not attempt to start the datapath CPUs.
+ * This is useful for certain soft rebooting scenarios. (Huntington only)
+ */
+#define MC_CMD_COPYCODE_HUNT_NO_DATAPATH_MAGIC_ADDR 0x1d0d0
+/* enum: Entering the main image via a copy of a single word from and to this
+ * address indicates that it should not attempt to parse any configuration from
+ * flash. (In addition, the datapath CPUs will not be started, as for
+ * MC_CMD_COPYCODE_HUNT_NO_DATAPATH_MAGIC_ADDR above.) This is useful for
+ * certain soft rebooting scenarios. (Huntington only)
+ */
+#define MC_CMD_COPYCODE_HUNT_IGNORE_CONFIG_MAGIC_ADDR 0x1badc
+/* Destination address */
#define MC_CMD_COPYCODE_IN_DEST_ADDR_OFST 4
#define MC_CMD_COPYCODE_IN_NUMWORDS_OFST 8
+/* Address of where to jump after copy. */
#define MC_CMD_COPYCODE_IN_JUMP_OFST 12
-#define MC_CMD_COPYCODE_JUMP_NONE 0x1 /* enum */
+/* enum: Control should return to the caller rather than jumping */
+#define MC_CMD_COPYCODE_JUMP_NONE 0x1
/* MC_CMD_COPYCODE_OUT msgresponse */
#define MC_CMD_COPYCODE_OUT_LEN 0
@@ -377,11 +668,13 @@
/***********************************/
/* MC_CMD_SET_FUNC
+ * Select function for function-specific commands.
*/
#define MC_CMD_SET_FUNC 0x4
/* MC_CMD_SET_FUNC_IN msgrequest */
#define MC_CMD_SET_FUNC_IN_LEN 4
+/* Set function */
#define MC_CMD_SET_FUNC_IN_FUNC_OFST 0
/* MC_CMD_SET_FUNC_OUT msgresponse */
@@ -390,6 +683,7 @@
/***********************************/
/* MC_CMD_GET_BOOT_STATUS
+ * Get the instruction address from which the MC booted.
*/
#define MC_CMD_GET_BOOT_STATUS 0x5
@@ -398,7 +692,10 @@
/* MC_CMD_GET_BOOT_STATUS_OUT msgresponse */
#define MC_CMD_GET_BOOT_STATUS_OUT_LEN 8
+/* ?? */
#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_OFST 0
+/* enum: indicates that the MC wasn't flash booted */
+#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_NULL 0xdeadbeef
#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_OFST 4
#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_LBN 0
#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_WIDTH 1
@@ -410,25 +707,38 @@
/***********************************/
/* MC_CMD_GET_ASSERTS
- * Get and clear any assertion status.
+ * Get (and optionally clear) the current assertion status. Only
+ * OUT.GLOBAL_FLAGS is guaranteed to exist in the completion payload. The other
+ * fields will only be present if OUT.GLOBAL_FLAGS != NO_FAILS
*/
#define MC_CMD_GET_ASSERTS 0x6
/* MC_CMD_GET_ASSERTS_IN msgrequest */
#define MC_CMD_GET_ASSERTS_IN_LEN 4
+/* Set to clear assertion */
#define MC_CMD_GET_ASSERTS_IN_CLEAR_OFST 0
/* MC_CMD_GET_ASSERTS_OUT msgresponse */
#define MC_CMD_GET_ASSERTS_OUT_LEN 140
+/* Assertion status flag. */
#define MC_CMD_GET_ASSERTS_OUT_GLOBAL_FLAGS_OFST 0
-#define MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 0x1 /* enum */
-#define MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL 0x2 /* enum */
-#define MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL 0x3 /* enum */
-#define MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED 0x4 /* enum */
+/* enum: No assertions have failed. */
+#define MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 0x1
+/* enum: A system-level assertion has failed. */
+#define MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL 0x2
+/* enum: A thread-level assertion has failed. */
+#define MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL 0x3
+/* enum: The system was reset by the watchdog. */
+#define MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED 0x4
+/* enum: An illegal address trap stopped the system (huntington and later) */
+#define MC_CMD_GET_ASSERTS_FLAGS_ADDR_TRAP 0x5
+/* Failing PC value */
#define MC_CMD_GET_ASSERTS_OUT_SAVED_PC_OFFS_OFST 4
+/* Saved GP regs */
#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST 8
#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_LEN 4
#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM 31
+/* Failing thread address */
#define MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_OFST 132
#define MC_CMD_GET_ASSERTS_OUT_RESERVED_OFST 136
@@ -441,9 +751,12 @@
/* MC_CMD_LOG_CTRL_IN msgrequest */
#define MC_CMD_LOG_CTRL_IN_LEN 8
+/* Log destination */
#define MC_CMD_LOG_CTRL_IN_LOG_DEST_OFST 0
-#define MC_CMD_LOG_CTRL_IN_LOG_DEST_UART 0x1 /* enum */
-#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ 0x2 /* enum */
+/* enum: UART. */
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_UART 0x1
+/* enum: Event queue. */
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ 0x2
#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_OFST 4
/* MC_CMD_LOG_CTRL_OUT msgresponse */
@@ -459,11 +772,20 @@
/* MC_CMD_GET_VERSION_IN msgrequest */
#define MC_CMD_GET_VERSION_IN_LEN 0
-/* MC_CMD_GET_VERSION_V0_OUT msgresponse */
+/* MC_CMD_GET_VERSION_EXT_IN msgrequest: Asks for the extended version */
+#define MC_CMD_GET_VERSION_EXT_IN_LEN 4
+/* placeholder, set to 0 */
+#define MC_CMD_GET_VERSION_EXT_IN_EXT_FLAGS_OFST 0
+
+/* MC_CMD_GET_VERSION_V0_OUT msgresponse: deprecated version format */
#define MC_CMD_GET_VERSION_V0_OUT_LEN 4
#define MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0
-#define MC_CMD_GET_VERSION_OUT_FIRMWARE_ANY 0xffffffff /* enum */
-#define MC_CMD_GET_VERSION_OUT_FIRMWARE_BOOTROM 0xb0070000 /* enum */
+/* enum: Reserved version number to indicate "any" version. */
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_ANY 0xffffffff
+/* enum: Bootrom version value for Siena. */
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_SIENA_BOOTROM 0xb0070000
+/* enum: Bootrom version value for Huntington. */
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_HUNT_BOOTROM 0xb0070001
/* MC_CMD_GET_VERSION_OUT msgresponse */
#define MC_CMD_GET_VERSION_OUT_LEN 32
@@ -471,6 +793,7 @@
/* Enum values, see field(s): */
/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */
#define MC_CMD_GET_VERSION_OUT_PCOL_OFST 4
+/* 128bit mask of functions supported by the current firmware */
#define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_OFST 8
#define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_LEN 16
#define MC_CMD_GET_VERSION_OUT_VERSION_OFST 24
@@ -478,46 +801,22 @@
#define MC_CMD_GET_VERSION_OUT_VERSION_LO_OFST 24
#define MC_CMD_GET_VERSION_OUT_VERSION_HI_OFST 28
-
-/***********************************/
-/* MC_CMD_GET_FPGAREG
- * Read multiple bytes from PTP FPGA.
- */
-#define MC_CMD_GET_FPGAREG 0x9
-
-/* MC_CMD_GET_FPGAREG_IN msgrequest */
-#define MC_CMD_GET_FPGAREG_IN_LEN 8
-#define MC_CMD_GET_FPGAREG_IN_ADDR_OFST 0
-#define MC_CMD_GET_FPGAREG_IN_NUMBYTES_OFST 4
-
-/* MC_CMD_GET_FPGAREG_OUT msgresponse */
-#define MC_CMD_GET_FPGAREG_OUT_LENMIN 1
-#define MC_CMD_GET_FPGAREG_OUT_LENMAX 252
-#define MC_CMD_GET_FPGAREG_OUT_LEN(num) (0+1*(num))
-#define MC_CMD_GET_FPGAREG_OUT_BUFFER_OFST 0
-#define MC_CMD_GET_FPGAREG_OUT_BUFFER_LEN 1
-#define MC_CMD_GET_FPGAREG_OUT_BUFFER_MINNUM 1
-#define MC_CMD_GET_FPGAREG_OUT_BUFFER_MAXNUM 252
-
-
-/***********************************/
-/* MC_CMD_PUT_FPGAREG
- * Write multiple bytes to PTP FPGA.
- */
-#define MC_CMD_PUT_FPGAREG 0xa
-
-/* MC_CMD_PUT_FPGAREG_IN msgrequest */
-#define MC_CMD_PUT_FPGAREG_IN_LENMIN 5
-#define MC_CMD_PUT_FPGAREG_IN_LENMAX 252
-#define MC_CMD_PUT_FPGAREG_IN_LEN(num) (4+1*(num))
-#define MC_CMD_PUT_FPGAREG_IN_ADDR_OFST 0
-#define MC_CMD_PUT_FPGAREG_IN_BUFFER_OFST 4
-#define MC_CMD_PUT_FPGAREG_IN_BUFFER_LEN 1
-#define MC_CMD_PUT_FPGAREG_IN_BUFFER_MINNUM 1
-#define MC_CMD_PUT_FPGAREG_IN_BUFFER_MAXNUM 248
-
-/* MC_CMD_PUT_FPGAREG_OUT msgresponse */
-#define MC_CMD_PUT_FPGAREG_OUT_LEN 0
+/* MC_CMD_GET_VERSION_EXT_OUT msgresponse */
+#define MC_CMD_GET_VERSION_EXT_OUT_LEN 48
+/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
+/* Enum values, see field(s): */
+/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */
+#define MC_CMD_GET_VERSION_EXT_OUT_PCOL_OFST 4
+/* 128bit mask of functions supported by the current firmware */
+#define MC_CMD_GET_VERSION_EXT_OUT_SUPPORTED_FUNCS_OFST 8
+#define MC_CMD_GET_VERSION_EXT_OUT_SUPPORTED_FUNCS_LEN 16
+#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_OFST 24
+#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_LEN 8
+#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_LO_OFST 24
+#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_HI_OFST 28
+/* extra info */
+#define MC_CMD_GET_VERSION_EXT_OUT_EXTRA_OFST 32
+#define MC_CMD_GET_VERSION_EXT_OUT_EXTRA_LEN 16
/***********************************/
@@ -528,32 +827,74 @@
/* MC_CMD_PTP_IN msgrequest */
#define MC_CMD_PTP_IN_LEN 1
+/* PTP operation code */
#define MC_CMD_PTP_IN_OP_OFST 0
#define MC_CMD_PTP_IN_OP_LEN 1
-#define MC_CMD_PTP_OP_ENABLE 0x1 /* enum */
-#define MC_CMD_PTP_OP_DISABLE 0x2 /* enum */
-#define MC_CMD_PTP_OP_TRANSMIT 0x3 /* enum */
-#define MC_CMD_PTP_OP_READ_NIC_TIME 0x4 /* enum */
-#define MC_CMD_PTP_OP_STATUS 0x5 /* enum */
-#define MC_CMD_PTP_OP_ADJUST 0x6 /* enum */
-#define MC_CMD_PTP_OP_SYNCHRONIZE 0x7 /* enum */
-#define MC_CMD_PTP_OP_MANFTEST_BASIC 0x8 /* enum */
-#define MC_CMD_PTP_OP_MANFTEST_PACKET 0x9 /* enum */
-#define MC_CMD_PTP_OP_RESET_STATS 0xa /* enum */
-#define MC_CMD_PTP_OP_DEBUG 0xb /* enum */
-#define MC_CMD_PTP_OP_MAX 0xc /* enum */
+/* enum: Enable PTP packet timestamping operation. */
+#define MC_CMD_PTP_OP_ENABLE 0x1
+/* enum: Disable PTP packet timestamping operation. */
+#define MC_CMD_PTP_OP_DISABLE 0x2
+/* enum: Send a PTP packet. */
+#define MC_CMD_PTP_OP_TRANSMIT 0x3
+/* enum: Read the current NIC time. */
+#define MC_CMD_PTP_OP_READ_NIC_TIME 0x4
+/* enum: Get the current PTP status. */
+#define MC_CMD_PTP_OP_STATUS 0x5
+/* enum: Adjust the PTP NIC's time. */
+#define MC_CMD_PTP_OP_ADJUST 0x6
+/* enum: Synchronize host and NIC time. */
+#define MC_CMD_PTP_OP_SYNCHRONIZE 0x7
+/* enum: Basic manufacturing tests. */
+#define MC_CMD_PTP_OP_MANFTEST_BASIC 0x8
+/* enum: Packet based manufacturing tests. */
+#define MC_CMD_PTP_OP_MANFTEST_PACKET 0x9
+/* enum: Reset some of the PTP related statistics */
+#define MC_CMD_PTP_OP_RESET_STATS 0xa
+/* enum: Debug operations to MC. */
+#define MC_CMD_PTP_OP_DEBUG 0xb
+/* enum: Read an FPGA register */
+#define MC_CMD_PTP_OP_FPGAREAD 0xc
+/* enum: Write an FPGA register */
+#define MC_CMD_PTP_OP_FPGAWRITE 0xd
+/* enum: Apply an offset to the NIC clock */
+#define MC_CMD_PTP_OP_CLOCK_OFFSET_ADJUST 0xe
+/* enum: Change Apply an offset to the NIC clock */
+#define MC_CMD_PTP_OP_CLOCK_FREQ_ADJUST 0xf
+/* enum: Set the MC packet filter VLAN tags for received PTP packets */
+#define MC_CMD_PTP_OP_RX_SET_VLAN_FILTER 0x10
+/* enum: Set the MC packet filter UUID for received PTP packets */
+#define MC_CMD_PTP_OP_RX_SET_UUID_FILTER 0x11
+/* enum: Set the MC packet filter Domain for received PTP packets */
+#define MC_CMD_PTP_OP_RX_SET_DOMAIN_FILTER 0x12
+/* enum: Set the clock source */
+#define MC_CMD_PTP_OP_SET_CLK_SRC 0x13
+/* enum: Reset value of Timer Reg. */
+#define MC_CMD_PTP_OP_RST_CLK 0x14
+/* enum: Enable the forwarding of PPS events to the host */
+#define MC_CMD_PTP_OP_PPS_ENABLE 0x15
+/* enum: Above this for future use. */
+#define MC_CMD_PTP_OP_MAX 0x16
/* MC_CMD_PTP_IN_ENABLE msgrequest */
#define MC_CMD_PTP_IN_ENABLE_LEN 16
#define MC_CMD_PTP_IN_CMD_OFST 0
#define MC_CMD_PTP_IN_PERIPH_ID_OFST 4
+/* Event queue for PTP events */
#define MC_CMD_PTP_IN_ENABLE_QUEUE_OFST 8
+/* PTP timestamping mode */
#define MC_CMD_PTP_IN_ENABLE_MODE_OFST 12
-#define MC_CMD_PTP_MODE_V1 0x0 /* enum */
-#define MC_CMD_PTP_MODE_V1_VLAN 0x1 /* enum */
-#define MC_CMD_PTP_MODE_V2 0x2 /* enum */
-#define MC_CMD_PTP_MODE_V2_VLAN 0x3 /* enum */
-#define MC_CMD_PTP_MODE_V2_ENHANCED 0x4 /* enum */
+/* enum: PTP, version 1 */
+#define MC_CMD_PTP_MODE_V1 0x0
+/* enum: PTP, version 1, with VLAN headers - deprecated */
+#define MC_CMD_PTP_MODE_V1_VLAN 0x1
+/* enum: PTP, version 2 */
+#define MC_CMD_PTP_MODE_V2 0x2
+/* enum: PTP, version 2, with VLAN headers - deprecated */
+#define MC_CMD_PTP_MODE_V2_VLAN 0x3
+/* enum: PTP, version 2, with improved UUID filtering */
+#define MC_CMD_PTP_MODE_V2_ENHANCED 0x4
+/* enum: FCoE (seconds and microseconds) */
+#define MC_CMD_PTP_MODE_FCOE 0x5
/* MC_CMD_PTP_IN_DISABLE msgrequest */
#define MC_CMD_PTP_IN_DISABLE_LEN 8
@@ -566,7 +907,9 @@
#define MC_CMD_PTP_IN_TRANSMIT_LEN(num) (12+1*(num))
/* MC_CMD_PTP_IN_CMD_OFST 0 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Transmit packet length */
#define MC_CMD_PTP_IN_TRANSMIT_LENGTH_OFST 8
+/* Transmit packet data */
#define MC_CMD_PTP_IN_TRANSMIT_PACKET_OFST 12
#define MC_CMD_PTP_IN_TRANSMIT_PACKET_LEN 1
#define MC_CMD_PTP_IN_TRANSMIT_PACKET_MINNUM 1
@@ -586,19 +929,27 @@
#define MC_CMD_PTP_IN_ADJUST_LEN 24
/* MC_CMD_PTP_IN_CMD_OFST 0 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Frequency adjustment 40 bit fixed point ns */
#define MC_CMD_PTP_IN_ADJUST_FREQ_OFST 8
#define MC_CMD_PTP_IN_ADJUST_FREQ_LEN 8
#define MC_CMD_PTP_IN_ADJUST_FREQ_LO_OFST 8
#define MC_CMD_PTP_IN_ADJUST_FREQ_HI_OFST 12
-#define MC_CMD_PTP_IN_ADJUST_BITS 0x28 /* enum */
+/* enum: Number of fractional bits in frequency adjustment */
+#define MC_CMD_PTP_IN_ADJUST_BITS 0x28
+/* Time adjustment in seconds */
#define MC_CMD_PTP_IN_ADJUST_SECONDS_OFST 16
+/* Time adjustment in nanoseconds */
#define MC_CMD_PTP_IN_ADJUST_NANOSECONDS_OFST 20
/* MC_CMD_PTP_IN_SYNCHRONIZE msgrequest */
#define MC_CMD_PTP_IN_SYNCHRONIZE_LEN 20
/* MC_CMD_PTP_IN_CMD_OFST 0 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Number of time readings to capture */
#define MC_CMD_PTP_IN_SYNCHRONIZE_NUMTIMESETS_OFST 8
+/* Host address in which to write "synchronization started" indication (64
+ * bits)
+ */
#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_OFST 12
#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LEN 8
#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LO_OFST 12
@@ -613,86 +964,240 @@
#define MC_CMD_PTP_IN_MANFTEST_PACKET_LEN 12
/* MC_CMD_PTP_IN_CMD_OFST 0 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Enable or disable packet testing */
#define MC_CMD_PTP_IN_MANFTEST_PACKET_TEST_ENABLE_OFST 8
/* MC_CMD_PTP_IN_RESET_STATS msgrequest */
#define MC_CMD_PTP_IN_RESET_STATS_LEN 8
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* Reset PTP statistics */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
/* MC_CMD_PTP_IN_DEBUG msgrequest */
#define MC_CMD_PTP_IN_DEBUG_LEN 12
/* MC_CMD_PTP_IN_CMD_OFST 0 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Debug operations */
#define MC_CMD_PTP_IN_DEBUG_DEBUG_PARAM_OFST 8
+/* MC_CMD_PTP_IN_FPGAREAD msgrequest */
+#define MC_CMD_PTP_IN_FPGAREAD_LEN 16
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+#define MC_CMD_PTP_IN_FPGAREAD_ADDR_OFST 8
+#define MC_CMD_PTP_IN_FPGAREAD_NUMBYTES_OFST 12
+
+/* MC_CMD_PTP_IN_FPGAWRITE msgrequest */
+#define MC_CMD_PTP_IN_FPGAWRITE_LENMIN 13
+#define MC_CMD_PTP_IN_FPGAWRITE_LENMAX 252
+#define MC_CMD_PTP_IN_FPGAWRITE_LEN(num) (12+1*(num))
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+#define MC_CMD_PTP_IN_FPGAWRITE_ADDR_OFST 8
+#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_OFST 12
+#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_LEN 1
+#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_MINNUM 1
+#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_MAXNUM 240
+
+/* MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST msgrequest */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_LEN 16
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Time adjustment in seconds */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_SECONDS_OFST 8
+/* Time adjustment in nanoseconds */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_NANOSECONDS_OFST 12
+
+/* MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST msgrequest */
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_LEN 16
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Frequency adjustment 40 bit fixed point ns */
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_OFST 8
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LEN 8
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LO_OFST 8
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_HI_OFST 12
+/* enum: Number of fractional bits in frequency adjustment */
+/* MC_CMD_PTP_IN_ADJUST_BITS 0x28 */
+
+/* MC_CMD_PTP_IN_RX_SET_VLAN_FILTER msgrequest */
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_LEN 24
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Number of VLAN tags, 0 if not VLAN */
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_NUM_VLAN_TAGS_OFST 8
+/* Set of VLAN tags to filter against */
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_OFST 12
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_LEN 4
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_NUM 3
+
+/* MC_CMD_PTP_IN_RX_SET_UUID_FILTER msgrequest */
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_LEN 20
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* 1 to enable UUID filtering, 0 to disable */
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_ENABLE_OFST 8
+/* UUID to filter against */
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_OFST 12
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LEN 8
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LO_OFST 12
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_HI_OFST 16
+
+/* MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER msgrequest */
+#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_LEN 16
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* 1 to enable Domain filtering, 0 to disable */
+#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_ENABLE_OFST 8
+/* Domain number to filter against */
+#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_DOMAIN_OFST 12
+
+/* MC_CMD_PTP_IN_SET_CLK_SRC msgrequest */
+#define MC_CMD_PTP_IN_SET_CLK_SRC_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Set the clock source. */
+#define MC_CMD_PTP_IN_SET_CLK_SRC_CLK_OFST 8
+/* enum: Internal. */
+#define MC_CMD_PTP_CLK_SRC_INTERNAL 0x0
+/* enum: External. */
+#define MC_CMD_PTP_CLK_SRC_EXTERNAL 0x1
+
+/* MC_CMD_PTP_IN_RST_CLK msgrequest */
+#define MC_CMD_PTP_IN_RST_CLK_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* Reset value of Timer Reg. */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_PPS_ENABLE msgrequest */
+#define MC_CMD_PTP_IN_PPS_ENABLE_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* Enable or disable */
+#define MC_CMD_PTP_IN_PPS_ENABLE_OP_OFST 4
+/* enum: Enable */
+#define MC_CMD_PTP_ENABLE_PPS 0x0
+/* enum: Disable */
+#define MC_CMD_PTP_DISABLE_PPS 0x1
+/* Queueid to send events back */
+#define MC_CMD_PTP_IN_PPS_ENABLE_QUEUE_ID_OFST 8
+
/* MC_CMD_PTP_OUT msgresponse */
#define MC_CMD_PTP_OUT_LEN 0
/* MC_CMD_PTP_OUT_TRANSMIT msgresponse */
#define MC_CMD_PTP_OUT_TRANSMIT_LEN 8
+/* Value of seconds timestamp */
#define MC_CMD_PTP_OUT_TRANSMIT_SECONDS_OFST 0
+/* Value of nanoseconds timestamp */
#define MC_CMD_PTP_OUT_TRANSMIT_NANOSECONDS_OFST 4
/* MC_CMD_PTP_OUT_READ_NIC_TIME msgresponse */
#define MC_CMD_PTP_OUT_READ_NIC_TIME_LEN 8
+/* Value of seconds timestamp */
#define MC_CMD_PTP_OUT_READ_NIC_TIME_SECONDS_OFST 0
+/* Value of nanoseconds timestamp */
#define MC_CMD_PTP_OUT_READ_NIC_TIME_NANOSECONDS_OFST 4
/* MC_CMD_PTP_OUT_STATUS msgresponse */
#define MC_CMD_PTP_OUT_STATUS_LEN 64
+/* Frequency of NIC's hardware clock */
#define MC_CMD_PTP_OUT_STATUS_CLOCK_FREQ_OFST 0
+/* Number of packets transmitted and timestamped */
#define MC_CMD_PTP_OUT_STATUS_STATS_TX_OFST 4
+/* Number of packets received and timestamped */
#define MC_CMD_PTP_OUT_STATUS_STATS_RX_OFST 8
+/* Number of packets timestamped by the FPGA */
#define MC_CMD_PTP_OUT_STATUS_STATS_TS_OFST 12
+/* Number of packets filter matched */
#define MC_CMD_PTP_OUT_STATUS_STATS_FM_OFST 16
+/* Number of packets not filter matched */
#define MC_CMD_PTP_OUT_STATUS_STATS_NFM_OFST 20
+/* Number of PPS overflows (noise on input?) */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFLOW_OFST 24
+/* Number of PPS bad periods */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_BAD_OFST 28
+/* Minimum period of PPS pulse */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MIN_OFST 32
+/* Maximum period of PPS pulse */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MAX_OFST 36
+/* Last period of PPS pulse */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_LAST_OFST 40
+/* Mean period of PPS pulse */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MEAN_OFST 44
+/* Minimum offset of PPS pulse (signed) */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MIN_OFST 48
+/* Maximum offset of PPS pulse (signed) */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MAX_OFST 52
+/* Last offset of PPS pulse (signed) */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_LAST_OFST 56
+/* Mean offset of PPS pulse (signed) */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MEAN_OFST 60
/* MC_CMD_PTP_OUT_SYNCHRONIZE msgresponse */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_LENMIN 20
#define MC_CMD_PTP_OUT_SYNCHRONIZE_LENMAX 240
#define MC_CMD_PTP_OUT_SYNCHRONIZE_LEN(num) (0+20*(num))
+/* A set of host and NIC times */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_OFST 0
#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN 20
#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MINNUM 1
#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM 12
+/* Host time immediately before NIC's hardware clock read */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_OFST 0
+/* Value of seconds timestamp */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_SECONDS_OFST 4
+/* Value of nanoseconds timestamp */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_NANOSECONDS_OFST 8
+/* Host time immediately after NIC's hardware clock read */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTEND_OFST 12
+/* Number of nanoseconds waited after reading NIC's hardware clock */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_WAITNS_OFST 16
/* MC_CMD_PTP_OUT_MANFTEST_BASIC msgresponse */
#define MC_CMD_PTP_OUT_MANFTEST_BASIC_LEN 8
+/* Results of testing */
#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_RESULT_OFST 0
-#define MC_CMD_PTP_MANF_SUCCESS 0x0 /* enum */
-#define MC_CMD_PTP_MANF_FPGA_LOAD 0x1 /* enum */
-#define MC_CMD_PTP_MANF_FPGA_VERSION 0x2 /* enum */
-#define MC_CMD_PTP_MANF_FPGA_REGISTERS 0x3 /* enum */
-#define MC_CMD_PTP_MANF_OSCILLATOR 0x4 /* enum */
-#define MC_CMD_PTP_MANF_TIMESTAMPS 0x5 /* enum */
-#define MC_CMD_PTP_MANF_PACKET_COUNT 0x6 /* enum */
-#define MC_CMD_PTP_MANF_FILTER_COUNT 0x7 /* enum */
-#define MC_CMD_PTP_MANF_PACKET_ENOUGH 0x8 /* enum */
-#define MC_CMD_PTP_MANF_GPIO_TRIGGER 0x9 /* enum */
+/* enum: Successful test */
+#define MC_CMD_PTP_MANF_SUCCESS 0x0
+/* enum: FPGA load failed */
+#define MC_CMD_PTP_MANF_FPGA_LOAD 0x1
+/* enum: FPGA version invalid */
+#define MC_CMD_PTP_MANF_FPGA_VERSION 0x2
+/* enum: FPGA registers incorrect */
+#define MC_CMD_PTP_MANF_FPGA_REGISTERS 0x3
+/* enum: Oscillator possibly not working? */
+#define MC_CMD_PTP_MANF_OSCILLATOR 0x4
+/* enum: Timestamps not increasing */
+#define MC_CMD_PTP_MANF_TIMESTAMPS 0x5
+/* enum: Mismatched packet count */
+#define MC_CMD_PTP_MANF_PACKET_COUNT 0x6
+/* enum: Mismatched packet count (Siena filter and FPGA) */
+#define MC_CMD_PTP_MANF_FILTER_COUNT 0x7
+/* enum: Not enough packets to perform timestamp check */
+#define MC_CMD_PTP_MANF_PACKET_ENOUGH 0x8
+/* enum: Timestamp trigger GPIO not working */
+#define MC_CMD_PTP_MANF_GPIO_TRIGGER 0x9
+/* Presence of external oscillator */
#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_EXTOSC_OFST 4
/* MC_CMD_PTP_OUT_MANFTEST_PACKET msgresponse */
#define MC_CMD_PTP_OUT_MANFTEST_PACKET_LEN 12
+/* Results of testing */
#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_RESULT_OFST 0
+/* Number of packets received by FPGA */
#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FPGACOUNT_OFST 4
+/* Number of packets received by Siena filters */
#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FILTERCOUNT_OFST 8
+/* MC_CMD_PTP_OUT_FPGAREAD msgresponse */
+#define MC_CMD_PTP_OUT_FPGAREAD_LENMIN 1
+#define MC_CMD_PTP_OUT_FPGAREAD_LENMAX 252
+#define MC_CMD_PTP_OUT_FPGAREAD_LEN(num) (0+1*(num))
+#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_OFST 0
+#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_LEN 1
+#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MINNUM 1
+#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MAXNUM 252
+
/***********************************/
/* MC_CMD_CSR_READ32
@@ -702,6 +1207,7 @@
/* MC_CMD_CSR_READ32_IN msgrequest */
#define MC_CMD_CSR_READ32_IN_LEN 12
+/* Address */
#define MC_CMD_CSR_READ32_IN_ADDR_OFST 0
#define MC_CMD_CSR_READ32_IN_STEP_OFST 4
#define MC_CMD_CSR_READ32_IN_NUMWORDS_OFST 8
@@ -710,6 +1216,7 @@
#define MC_CMD_CSR_READ32_OUT_LENMIN 4
#define MC_CMD_CSR_READ32_OUT_LENMAX 252
#define MC_CMD_CSR_READ32_OUT_LEN(num) (0+4*(num))
+/* The last dword is the status, not a value read */
#define MC_CMD_CSR_READ32_OUT_BUFFER_OFST 0
#define MC_CMD_CSR_READ32_OUT_BUFFER_LEN 4
#define MC_CMD_CSR_READ32_OUT_BUFFER_MINNUM 1
@@ -726,6 +1233,7 @@
#define MC_CMD_CSR_WRITE32_IN_LENMIN 12
#define MC_CMD_CSR_WRITE32_IN_LENMAX 252
#define MC_CMD_CSR_WRITE32_IN_LEN(num) (8+4*(num))
+/* Address */
#define MC_CMD_CSR_WRITE32_IN_ADDR_OFST 0
#define MC_CMD_CSR_WRITE32_IN_STEP_OFST 4
#define MC_CMD_CSR_WRITE32_IN_BUFFER_OFST 8
@@ -739,6 +1247,48 @@
/***********************************/
+/* MC_CMD_HP
+ * These commands are used for HP related features. They are grouped under one
+ * MCDI command to avoid creating too many MCDI commands.
+ */
+#define MC_CMD_HP 0x54
+
+/* MC_CMD_HP_IN msgrequest */
+#define MC_CMD_HP_IN_LEN 16
+/* HP OCSD sub-command. When address is not NULL, request activation of OCSD at
+ * the specified address with the specified interval.When address is NULL,
+ * INTERVAL is interpreted as a command: 0: stop OCSD / 1: Report OCSD current
+ * state / 2: (debug) Show temperature reported by one of the supported
+ * sensors.
+ */
+#define MC_CMD_HP_IN_SUBCMD_OFST 0
+/* enum: OCSD (Option Card Sensor Data) sub-command. */
+#define MC_CMD_HP_IN_OCSD_SUBCMD 0x0
+/* enum: Last known valid HP sub-command. */
+#define MC_CMD_HP_IN_LAST_SUBCMD 0x0
+/* The address to the array of sensor fields. (Or NULL to use a sub-command.)
+ */
+#define MC_CMD_HP_IN_OCSD_ADDR_OFST 4
+#define MC_CMD_HP_IN_OCSD_ADDR_LEN 8
+#define MC_CMD_HP_IN_OCSD_ADDR_LO_OFST 4
+#define MC_CMD_HP_IN_OCSD_ADDR_HI_OFST 8
+/* The requested update interval, in seconds. (Or the sub-command if ADDR is
+ * NULL.)
+ */
+#define MC_CMD_HP_IN_OCSD_INTERVAL_OFST 12
+
+/* MC_CMD_HP_OUT msgresponse */
+#define MC_CMD_HP_OUT_LEN 4
+#define MC_CMD_HP_OUT_OCSD_STATUS_OFST 0
+/* enum: OCSD stopped for this card. */
+#define MC_CMD_HP_OUT_OCSD_STOPPED 0x1
+/* enum: OCSD was successfully started with the address provided. */
+#define MC_CMD_HP_OUT_OCSD_STARTED 0x2
+/* enum: OCSD was already started for this card. */
+#define MC_CMD_HP_OUT_OCSD_ALREADY_STARTED 0x3
+
+
+/***********************************/
/* MC_CMD_STACKINFO
* Get stack information.
*/
@@ -751,6 +1301,7 @@
#define MC_CMD_STACKINFO_OUT_LENMIN 12
#define MC_CMD_STACKINFO_OUT_LENMAX 252
#define MC_CMD_STACKINFO_OUT_LEN(num) (0+12*(num))
+/* (thread ptr, stack size, free space) for each thread in system */
#define MC_CMD_STACKINFO_OUT_THREAD_INFO_OFST 0
#define MC_CMD_STACKINFO_OUT_THREAD_INFO_LEN 12
#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MINNUM 1
@@ -765,19 +1316,35 @@
/* MC_CMD_MDIO_READ_IN msgrequest */
#define MC_CMD_MDIO_READ_IN_LEN 16
+/* Bus number; there are two MDIO buses: one for the internal PHY, and one for
+ * external devices.
+ */
#define MC_CMD_MDIO_READ_IN_BUS_OFST 0
-#define MC_CMD_MDIO_BUS_INTERNAL 0x0 /* enum */
-#define MC_CMD_MDIO_BUS_EXTERNAL 0x1 /* enum */
+/* enum: Internal. */
+#define MC_CMD_MDIO_BUS_INTERNAL 0x0
+/* enum: External. */
+#define MC_CMD_MDIO_BUS_EXTERNAL 0x1
+/* Port address */
#define MC_CMD_MDIO_READ_IN_PRTAD_OFST 4
+/* Device Address or clause 22. */
#define MC_CMD_MDIO_READ_IN_DEVAD_OFST 8
-#define MC_CMD_MDIO_CLAUSE22 0x20 /* enum */
+/* enum: By default all the MCDI MDIO operations perform clause45 mode. If you
+ * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22.
+ */
+#define MC_CMD_MDIO_CLAUSE22 0x20
+/* Address */
#define MC_CMD_MDIO_READ_IN_ADDR_OFST 12
/* MC_CMD_MDIO_READ_OUT msgresponse */
#define MC_CMD_MDIO_READ_OUT_LEN 8
+/* Value */
#define MC_CMD_MDIO_READ_OUT_VALUE_OFST 0
+/* Status the MDIO commands return the raw status bits from the MDIO block. A
+ * "good" transaction should have the DONE bit set and all other bits clear.
+ */
#define MC_CMD_MDIO_READ_OUT_STATUS_OFST 4
-#define MC_CMD_MDIO_STATUS_GOOD 0x8 /* enum */
+/* enum: Good. */
+#define MC_CMD_MDIO_STATUS_GOOD 0x8
/***********************************/
@@ -788,18 +1355,34 @@
/* MC_CMD_MDIO_WRITE_IN msgrequest */
#define MC_CMD_MDIO_WRITE_IN_LEN 20
+/* Bus number; there are two MDIO buses: one for the internal PHY, and one for
+ * external devices.
+ */
#define MC_CMD_MDIO_WRITE_IN_BUS_OFST 0
+/* enum: Internal. */
/* MC_CMD_MDIO_BUS_INTERNAL 0x0 */
+/* enum: External. */
/* MC_CMD_MDIO_BUS_EXTERNAL 0x1 */
+/* Port address */
#define MC_CMD_MDIO_WRITE_IN_PRTAD_OFST 4
+/* Device Address or clause 22. */
#define MC_CMD_MDIO_WRITE_IN_DEVAD_OFST 8
+/* enum: By default all the MCDI MDIO operations perform clause45 mode. If you
+ * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22.
+ */
/* MC_CMD_MDIO_CLAUSE22 0x20 */
+/* Address */
#define MC_CMD_MDIO_WRITE_IN_ADDR_OFST 12
+/* Value */
#define MC_CMD_MDIO_WRITE_IN_VALUE_OFST 16
/* MC_CMD_MDIO_WRITE_OUT msgresponse */
#define MC_CMD_MDIO_WRITE_OUT_LEN 4
+/* Status; the MDIO commands return the raw status bits from the MDIO block. A
+ * "good" transaction should have the DONE bit set and all other bits clear.
+ */
#define MC_CMD_MDIO_WRITE_OUT_STATUS_OFST 0
+/* enum: Good. */
/* MC_CMD_MDIO_STATUS_GOOD 0x8 */
@@ -813,6 +1396,9 @@
#define MC_CMD_DBI_WRITE_IN_LENMIN 12
#define MC_CMD_DBI_WRITE_IN_LENMAX 252
#define MC_CMD_DBI_WRITE_IN_LEN(num) (0+12*(num))
+/* Each write op consists of an address (offset 0), byte enable/VF/CS2 (offset
+ * 32) and value (offset 64). See MC_CMD_DBIWROP_TYPEDEF.
+ */
#define MC_CMD_DBI_WRITE_IN_DBIWROP_OFST 0
#define MC_CMD_DBI_WRITE_IN_DBIWROP_LEN 12
#define MC_CMD_DBI_WRITE_IN_DBIWROP_MINNUM 1
@@ -826,9 +1412,15 @@
#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_OFST 0
#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LBN 0
#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_WIDTH 32
-#define MC_CMD_DBIWROP_TYPEDEF_BYTE_MASK_OFST 4
-#define MC_CMD_DBIWROP_TYPEDEF_BYTE_MASK_LBN 32
-#define MC_CMD_DBIWROP_TYPEDEF_BYTE_MASK_WIDTH 32
+#define MC_CMD_DBIWROP_TYPEDEF_PARMS_OFST 4
+#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_LBN 16
+#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_WIDTH 16
+#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_LBN 15
+#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_WIDTH 1
+#define MC_CMD_DBIWROP_TYPEDEF_CS2_LBN 14
+#define MC_CMD_DBIWROP_TYPEDEF_CS2_WIDTH 1
+#define MC_CMD_DBIWROP_TYPEDEF_PARMS_LBN 32
+#define MC_CMD_DBIWROP_TYPEDEF_PARMS_WIDTH 32
#define MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST 8
#define MC_CMD_DBIWROP_TYPEDEF_VALUE_LBN 64
#define MC_CMD_DBIWROP_TYPEDEF_VALUE_WIDTH 32
@@ -836,69 +1428,111 @@
/***********************************/
/* MC_CMD_PORT_READ32
- * Read a 32-bit register from the indirect port register map.
+ * Read a 32-bit register from the indirect port register map. The port to
+ * access is implied by the Shared memory channel used.
*/
#define MC_CMD_PORT_READ32 0x14
/* MC_CMD_PORT_READ32_IN msgrequest */
#define MC_CMD_PORT_READ32_IN_LEN 4
+/* Address */
#define MC_CMD_PORT_READ32_IN_ADDR_OFST 0
/* MC_CMD_PORT_READ32_OUT msgresponse */
#define MC_CMD_PORT_READ32_OUT_LEN 8
+/* Value */
#define MC_CMD_PORT_READ32_OUT_VALUE_OFST 0
+/* Status */
#define MC_CMD_PORT_READ32_OUT_STATUS_OFST 4
/***********************************/
/* MC_CMD_PORT_WRITE32
- * Write a 32-bit register to the indirect port register map.
+ * Write a 32-bit register to the indirect port register map. The port to
+ * access is implied by the Shared memory channel used.
*/
#define MC_CMD_PORT_WRITE32 0x15
/* MC_CMD_PORT_WRITE32_IN msgrequest */
#define MC_CMD_PORT_WRITE32_IN_LEN 8
+/* Address */
#define MC_CMD_PORT_WRITE32_IN_ADDR_OFST 0
+/* Value */
#define MC_CMD_PORT_WRITE32_IN_VALUE_OFST 4
/* MC_CMD_PORT_WRITE32_OUT msgresponse */
#define MC_CMD_PORT_WRITE32_OUT_LEN 4
+/* Status */
#define MC_CMD_PORT_WRITE32_OUT_STATUS_OFST 0
/***********************************/
/* MC_CMD_PORT_READ128
- * Read a 128-bit register from the indirect port register map.
+ * Read a 128-bit register from the indirect port register map. The port to
+ * access is implied by the Shared memory channel used.
*/
#define MC_CMD_PORT_READ128 0x16
/* MC_CMD_PORT_READ128_IN msgrequest */
#define MC_CMD_PORT_READ128_IN_LEN 4
+/* Address */
#define MC_CMD_PORT_READ128_IN_ADDR_OFST 0
/* MC_CMD_PORT_READ128_OUT msgresponse */
#define MC_CMD_PORT_READ128_OUT_LEN 20
+/* Value */
#define MC_CMD_PORT_READ128_OUT_VALUE_OFST 0
#define MC_CMD_PORT_READ128_OUT_VALUE_LEN 16
+/* Status */
#define MC_CMD_PORT_READ128_OUT_STATUS_OFST 16
/***********************************/
/* MC_CMD_PORT_WRITE128
- * Write a 128-bit register to the indirect port register map.
+ * Write a 128-bit register to the indirect port register map. The port to
+ * access is implied by the Shared memory channel used.
*/
#define MC_CMD_PORT_WRITE128 0x17
/* MC_CMD_PORT_WRITE128_IN msgrequest */
#define MC_CMD_PORT_WRITE128_IN_LEN 20
+/* Address */
#define MC_CMD_PORT_WRITE128_IN_ADDR_OFST 0
+/* Value */
#define MC_CMD_PORT_WRITE128_IN_VALUE_OFST 4
#define MC_CMD_PORT_WRITE128_IN_VALUE_LEN 16
/* MC_CMD_PORT_WRITE128_OUT msgresponse */
#define MC_CMD_PORT_WRITE128_OUT_LEN 4
+/* Status */
#define MC_CMD_PORT_WRITE128_OUT_STATUS_OFST 0
+/* MC_CMD_CAPABILITIES structuredef */
+#define MC_CMD_CAPABILITIES_LEN 4
+/* Small buf table. */
+#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_LBN 0
+#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_WIDTH 1
+/* Turbo mode (for Maranello). */
+#define MC_CMD_CAPABILITIES_TURBO_LBN 1
+#define MC_CMD_CAPABILITIES_TURBO_WIDTH 1
+/* Turbo mode active (for Maranello). */
+#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_LBN 2
+#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_WIDTH 1
+/* PTP offload. */
+#define MC_CMD_CAPABILITIES_PTP_LBN 3
+#define MC_CMD_CAPABILITIES_PTP_WIDTH 1
+/* AOE mode. */
+#define MC_CMD_CAPABILITIES_AOE_LBN 4
+#define MC_CMD_CAPABILITIES_AOE_WIDTH 1
+/* AOE mode active. */
+#define MC_CMD_CAPABILITIES_AOE_ACTIVE_LBN 5
+#define MC_CMD_CAPABILITIES_AOE_ACTIVE_WIDTH 1
+/* AOE mode active. */
+#define MC_CMD_CAPABILITIES_FC_ACTIVE_LBN 6
+#define MC_CMD_CAPABILITIES_FC_ACTIVE_WIDTH 1
+#define MC_CMD_CAPABILITIES_RESERVED_LBN 7
+#define MC_CMD_CAPABILITIES_RESERVED_WIDTH 25
+
/***********************************/
/* MC_CMD_GET_BOARD_CFG
@@ -916,18 +1550,10 @@
#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_OFST 0
#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_OFST 4
#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_LEN 32
+/* See MC_CMD_CAPABILITIES */
#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT0_OFST 36
-#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_LBN 0x0 /* enum */
-#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_WIDTH 0x1 /* enum */
-#define MC_CMD_CAPABILITIES_TURBO_LBN 0x1 /* enum */
-#define MC_CMD_CAPABILITIES_TURBO_WIDTH 0x1 /* enum */
-#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_LBN 0x2 /* enum */
-#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_WIDTH 0x1 /* enum */
-#define MC_CMD_CAPABILITIES_PTP_LBN 0x3 /* enum */
-#define MC_CMD_CAPABILITIES_PTP_WIDTH 0x1 /* enum */
+/* See MC_CMD_CAPABILITIES */
#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT1_OFST 40
-/* Enum values, see field(s): */
-/* CAPABILITIES_PORT0 */
#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST 44
#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_LEN 6
#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST 50
@@ -936,6 +1562,11 @@
#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT1_OFST 60
#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT0_OFST 64
#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT1_OFST 68
+/* This field contains a 16-bit value for each of the types of NVRAM area. The
+ * values are defined in the firmware/mc/platform/.c file for a specific board
+ * type, but otherwise have no meaning to the MC; they are used by the driver
+ * to manage selection of appropriate firmware updates.
+ */
#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST 72
#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN 2
#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MINNUM 12
@@ -944,7 +1575,7 @@
/***********************************/
/* MC_CMD_DBI_READX
- * Read DBI register(s).
+ * Read DBI register(s) -- extended functionality
*/
#define MC_CMD_DBI_READX 0x19
@@ -952,6 +1583,7 @@
#define MC_CMD_DBI_READX_IN_LENMIN 8
#define MC_CMD_DBI_READX_IN_LENMAX 248
#define MC_CMD_DBI_READX_IN_LEN(num) (0+8*(num))
+/* Each Read op consists of an address (offset 0), VF/CS2) */
#define MC_CMD_DBI_READX_IN_DBIRDOP_OFST 0
#define MC_CMD_DBI_READX_IN_DBIRDOP_LEN 8
#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_OFST 0
@@ -963,11 +1595,27 @@
#define MC_CMD_DBI_READX_OUT_LENMIN 4
#define MC_CMD_DBI_READX_OUT_LENMAX 252
#define MC_CMD_DBI_READX_OUT_LEN(num) (0+4*(num))
+/* Value */
#define MC_CMD_DBI_READX_OUT_VALUE_OFST 0
#define MC_CMD_DBI_READX_OUT_VALUE_LEN 4
#define MC_CMD_DBI_READX_OUT_VALUE_MINNUM 1
#define MC_CMD_DBI_READX_OUT_VALUE_MAXNUM 63
+/* MC_CMD_DBIRDOP_TYPEDEF structuredef */
+#define MC_CMD_DBIRDOP_TYPEDEF_LEN 8
+#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_OFST 0
+#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_LBN 0
+#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_WIDTH 32
+#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_OFST 4
+#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_LBN 16
+#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_WIDTH 16
+#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_LBN 15
+#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_WIDTH 1
+#define MC_CMD_DBIRDOP_TYPEDEF_CS2_LBN 14
+#define MC_CMD_DBIRDOP_TYPEDEF_CS2_WIDTH 1
+#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_LBN 32
+#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_WIDTH 32
+
/***********************************/
/* MC_CMD_SET_RAND_SEED
@@ -977,6 +1625,7 @@
/* MC_CMD_SET_RAND_SEED_IN msgrequest */
#define MC_CMD_SET_RAND_SEED_IN_LEN 16
+/* Seed value. */
#define MC_CMD_SET_RAND_SEED_IN_SEED_OFST 0
#define MC_CMD_SET_RAND_SEED_IN_SEED_LEN 16
@@ -986,7 +1635,7 @@
/***********************************/
/* MC_CMD_LTSSM_HIST
- * Retrieve the history of the PCIE LTSSM.
+ * Retrieve the history of the LTSSM, if the build supports it.
*/
#define MC_CMD_LTSSM_HIST 0x1b
@@ -997,6 +1646,7 @@
#define MC_CMD_LTSSM_HIST_OUT_LENMIN 0
#define MC_CMD_LTSSM_HIST_OUT_LENMAX 252
#define MC_CMD_LTSSM_HIST_OUT_LEN(num) (0+4*(num))
+/* variable number of LTSSM values, as bytes. The history is read-to-clear. */
#define MC_CMD_LTSSM_HIST_OUT_DATA_OFST 0
#define MC_CMD_LTSSM_HIST_OUT_DATA_LEN 4
#define MC_CMD_LTSSM_HIST_OUT_DATA_MINNUM 0
@@ -1005,41 +1655,47 @@
/***********************************/
/* MC_CMD_DRV_ATTACH
- * Inform MCPU that this port is managed on the host.
+ * Inform MCPU that this port is managed on the host (i.e. driver active). For
+ * Huntington, also request the preferred datapath firmware to use if possible
+ * (it may not be possible for this request to be fulfilled; the driver must
+ * issue a subsequent MC_CMD_GET_CAPABILITIES command to determine which
+ * features are actually available). The FIRMWARE_ID field is ignored by older
+ * platforms.
*/
#define MC_CMD_DRV_ATTACH 0x1c
/* MC_CMD_DRV_ATTACH_IN msgrequest */
-#define MC_CMD_DRV_ATTACH_IN_LEN 8
+#define MC_CMD_DRV_ATTACH_IN_LEN 12
+/* new state (0=detached, 1=attached) to set if UPDATE=1 */
#define MC_CMD_DRV_ATTACH_IN_NEW_STATE_OFST 0
+/* 1 to set new state, or 0 to just report the existing state */
#define MC_CMD_DRV_ATTACH_IN_UPDATE_OFST 4
+/* preferred datapath firmware (for Huntington; ignored for Siena) */
+#define MC_CMD_DRV_ATTACH_IN_FIRMWARE_ID_OFST 8
+/* enum: Prefer to use full featured firmware */
+#define MC_CMD_FW_FULL_FEATURED 0x0
+/* enum: Prefer to use firmware with fewer features but lower latency */
+#define MC_CMD_FW_LOW_LATENCY 0x1
/* MC_CMD_DRV_ATTACH_OUT msgresponse */
#define MC_CMD_DRV_ATTACH_OUT_LEN 4
+/* previous or existing state (0=detached, 1=attached) */
#define MC_CMD_DRV_ATTACH_OUT_OLD_STATE_OFST 0
-
-/***********************************/
-/* MC_CMD_NCSI_PROD
- * Trigger an NC-SI event.
+/* MC_CMD_DRV_ATTACH_EXT_OUT msgresponse */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_LEN 8
+/* previous or existing state (0=detached, 1=attached) */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_OLD_STATE_OFST 0
+/* Flags associated with this function */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_OFST 4
+/* enum: Labels the lowest-numbered function visible to the OS */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY 0x0
+/* enum: The function can control the link state of the physical port it is
+ * bound to.
*/
-#define MC_CMD_NCSI_PROD 0x1d
-
-/* MC_CMD_NCSI_PROD_IN msgrequest */
-#define MC_CMD_NCSI_PROD_IN_LEN 4
-#define MC_CMD_NCSI_PROD_IN_EVENTS_OFST 0
-#define MC_CMD_NCSI_PROD_LINKCHANGE 0x0 /* enum */
-#define MC_CMD_NCSI_PROD_RESET 0x1 /* enum */
-#define MC_CMD_NCSI_PROD_DRVATTACH 0x2 /* enum */
-#define MC_CMD_NCSI_PROD_IN_LINKCHANGE_LBN 0
-#define MC_CMD_NCSI_PROD_IN_LINKCHANGE_WIDTH 1
-#define MC_CMD_NCSI_PROD_IN_RESET_LBN 1
-#define MC_CMD_NCSI_PROD_IN_RESET_WIDTH 1
-#define MC_CMD_NCSI_PROD_IN_DRVATTACH_LBN 2
-#define MC_CMD_NCSI_PROD_IN_DRVATTACH_WIDTH 1
-
-/* MC_CMD_NCSI_PROD_OUT msgresponse */
-#define MC_CMD_NCSI_PROD_OUT_LEN 0
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL 0x1
+/* enum: The function can perform privileged operations */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED 0x2
/***********************************/
@@ -1050,6 +1706,7 @@
/* MC_CMD_SHMUART_IN msgrequest */
#define MC_CMD_SHMUART_IN_LEN 4
+/* ??? */
#define MC_CMD_SHMUART_IN_FLAG_OFST 0
/* MC_CMD_SHMUART_OUT msgresponse */
@@ -1057,13 +1714,33 @@
/***********************************/
+/* MC_CMD_PORT_RESET
+ * Generic per-port reset. There is no equivalent for per-board reset. Locks
+ * required: None; Return code: 0, ETIME. NOTE: This command is deprecated -
+ * use MC_CMD_ENTITY_RESET instead.
+ */
+#define MC_CMD_PORT_RESET 0x20
+
+/* MC_CMD_PORT_RESET_IN msgrequest */
+#define MC_CMD_PORT_RESET_IN_LEN 0
+
+/* MC_CMD_PORT_RESET_OUT msgresponse */
+#define MC_CMD_PORT_RESET_OUT_LEN 0
+
+
+/***********************************/
/* MC_CMD_ENTITY_RESET
- * Generic per-port reset.
+ * Generic per-resource reset. There is no equivalent for per-board reset.
+ * Locks required: None; Return code: 0, ETIME. NOTE: This command is an
+ * extended version of the deprecated MC_CMD_PORT_RESET with added fields.
*/
#define MC_CMD_ENTITY_RESET 0x20
/* MC_CMD_ENTITY_RESET_IN msgrequest */
#define MC_CMD_ENTITY_RESET_IN_LEN 4
+/* Optional flags field. Omitting this will perform a "legacy" reset action
+ * (TBD).
+ */
#define MC_CMD_ENTITY_RESET_IN_FLAG_OFST 0
#define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_LBN 0
#define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_WIDTH 1
@@ -1080,7 +1757,9 @@
/* MC_CMD_PCIE_CREDITS_IN msgrequest */
#define MC_CMD_PCIE_CREDITS_IN_LEN 8
+/* poll period. 0 is disabled */
#define MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_OFST 0
+/* wipe statistics */
#define MC_CMD_PCIE_CREDITS_IN_WIPE_OFST 4
/* MC_CMD_PCIE_CREDITS_OUT msgresponse */
@@ -1141,7 +1820,7 @@
/***********************************/
/* MC_CMD_PUTS
- * puts(3) implementation over MCDI
+ * Copy the given ASCII string out onto UART and/or out of the network port.
*/
#define MC_CMD_PUTS 0x23
@@ -1167,7 +1846,8 @@
/***********************************/
/* MC_CMD_GET_PHY_CFG
- * Report PHY configuration.
+ * Report PHY configuration. This guarantees to succeed even if the PHY is in a
+ * 'zombie' state. Locks required: None
*/
#define MC_CMD_GET_PHY_CFG 0x24
@@ -1176,6 +1856,7 @@
/* MC_CMD_GET_PHY_CFG_OUT msgresponse */
#define MC_CMD_GET_PHY_CFG_OUT_LEN 72
+/* flags */
#define MC_CMD_GET_PHY_CFG_OUT_FLAGS_OFST 0
#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_LBN 0
#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_WIDTH 1
@@ -1191,7 +1872,9 @@
#define MC_CMD_GET_PHY_CFG_OUT_TXDIS_WIDTH 1
#define MC_CMD_GET_PHY_CFG_OUT_BIST_LBN 6
#define MC_CMD_GET_PHY_CFG_OUT_BIST_WIDTH 1
+/* ?? */
#define MC_CMD_GET_PHY_CFG_OUT_TYPE_OFST 4
+/* Bitmask of supported capabilities */
#define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_OFST 8
#define MC_CMD_PHY_CAP_10HDX_LBN 1
#define MC_CMD_PHY_CAP_10HDX_WIDTH 1
@@ -1213,20 +1896,36 @@
#define MC_CMD_PHY_CAP_ASYM_WIDTH 1
#define MC_CMD_PHY_CAP_AN_LBN 10
#define MC_CMD_PHY_CAP_AN_WIDTH 1
+#define MC_CMD_PHY_CAP_40000FDX_LBN 11
+#define MC_CMD_PHY_CAP_40000FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_DDM_LBN 12
+#define MC_CMD_PHY_CAP_DDM_WIDTH 1
+/* ?? */
#define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_OFST 12
+/* ?? */
#define MC_CMD_GET_PHY_CFG_OUT_PRT_OFST 16
+/* ?? */
#define MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_OFST 20
+/* ?? */
#define MC_CMD_GET_PHY_CFG_OUT_NAME_OFST 24
#define MC_CMD_GET_PHY_CFG_OUT_NAME_LEN 20
+/* ?? */
#define MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_OFST 44
-#define MC_CMD_MEDIA_XAUI 0x1 /* enum */
-#define MC_CMD_MEDIA_CX4 0x2 /* enum */
-#define MC_CMD_MEDIA_KX4 0x3 /* enum */
-#define MC_CMD_MEDIA_XFP 0x4 /* enum */
-#define MC_CMD_MEDIA_SFP_PLUS 0x5 /* enum */
-#define MC_CMD_MEDIA_BASE_T 0x6 /* enum */
+/* enum: Xaui. */
+#define MC_CMD_MEDIA_XAUI 0x1
+/* enum: CX4. */
+#define MC_CMD_MEDIA_CX4 0x2
+/* enum: KX4. */
+#define MC_CMD_MEDIA_KX4 0x3
+/* enum: XFP Far. */
+#define MC_CMD_MEDIA_XFP 0x4
+/* enum: SFP+. */
+#define MC_CMD_MEDIA_SFP_PLUS 0x5
+/* enum: 10GBaseT. */
+#define MC_CMD_MEDIA_BASE_T 0x6
#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_OFST 48
-#define MC_CMD_MMD_CLAUSE22 0x0 /* enum */
+/* enum: Native clause 22 */
+#define MC_CMD_MMD_CLAUSE22 0x0
#define MC_CMD_MMD_CLAUSE45_PMAPMD 0x1 /* enum */
#define MC_CMD_MMD_CLAUSE45_WIS 0x2 /* enum */
#define MC_CMD_MMD_CLAUSE45_PCS 0x3 /* enum */
@@ -1234,7 +1933,8 @@
#define MC_CMD_MMD_CLAUSE45_DTEXS 0x5 /* enum */
#define MC_CMD_MMD_CLAUSE45_TC 0x6 /* enum */
#define MC_CMD_MMD_CLAUSE45_AN 0x7 /* enum */
-#define MC_CMD_MMD_CLAUSE45_C22EXT 0x1d /* enum */
+/* enum: Clause22 proxied over clause45 by PHY. */
+#define MC_CMD_MMD_CLAUSE45_C22EXT 0x1d
#define MC_CMD_MMD_CLAUSE45_VEND1 0x1e /* enum */
#define MC_CMD_MMD_CLAUSE45_VEND2 0x1f /* enum */
#define MC_CMD_GET_PHY_CFG_OUT_REVISION_OFST 52
@@ -1243,18 +1943,31 @@
/***********************************/
/* MC_CMD_START_BIST
- * Start a BIST test on the PHY.
+ * Start a BIST test on the PHY. Locks required: PHY_LOCK if doing a PHY BIST
+ * Return code: 0, EINVAL, EACCES (if PHY_LOCK is not held)
*/
#define MC_CMD_START_BIST 0x25
/* MC_CMD_START_BIST_IN msgrequest */
#define MC_CMD_START_BIST_IN_LEN 4
+/* Type of test. */
#define MC_CMD_START_BIST_IN_TYPE_OFST 0
-#define MC_CMD_PHY_BIST_CABLE_SHORT 0x1 /* enum */
-#define MC_CMD_PHY_BIST_CABLE_LONG 0x2 /* enum */
-#define MC_CMD_BPX_SERDES_BIST 0x3 /* enum */
-#define MC_CMD_MC_LOOPBACK_BIST 0x4 /* enum */
-#define MC_CMD_PHY_BIST 0x5 /* enum */
+/* enum: Run the PHY's short cable BIST. */
+#define MC_CMD_PHY_BIST_CABLE_SHORT 0x1
+/* enum: Run the PHY's long cable BIST. */
+#define MC_CMD_PHY_BIST_CABLE_LONG 0x2
+/* enum: Run BIST on the currently selected BPX Serdes (XAUI or XFI) . */
+#define MC_CMD_BPX_SERDES_BIST 0x3
+/* enum: Run the MC loopback tests. */
+#define MC_CMD_MC_LOOPBACK_BIST 0x4
+/* enum: Run the PHY's standard BIST. */
+#define MC_CMD_PHY_BIST 0x5
+/* enum: Run MC RAM test. */
+#define MC_CMD_MC_MEM_BIST 0x6
+/* enum: Run Port RAM test. */
+#define MC_CMD_PORT_MEM_BIST 0x7
+/* enum: Run register test. */
+#define MC_CMD_REG_BIST 0x8
/* MC_CMD_START_BIST_OUT msgresponse */
#define MC_CMD_START_BIST_OUT_LEN 0
@@ -1262,7 +1975,12 @@
/***********************************/
/* MC_CMD_POLL_BIST
- * Poll for BIST completion.
+ * Poll for BIST completion. Returns a single status code, and optionally some
+ * PHY specific bist output. The driver should only consume the BIST output
+ * after validating OUTLEN and MC_CMD_GET_PHY_CFG.TYPE. If a driver can't
+ * successfully parse the BIST output, it should still respect the pass/Fail in
+ * OUT.RESULT. Locks required: PHY_LOCK if doing a PHY BIST. Return code: 0,
+ * EACCES (if PHY_LOCK is not held).
*/
#define MC_CMD_POLL_BIST 0x26
@@ -1271,15 +1989,21 @@
/* MC_CMD_POLL_BIST_OUT msgresponse */
#define MC_CMD_POLL_BIST_OUT_LEN 8
+/* result */
#define MC_CMD_POLL_BIST_OUT_RESULT_OFST 0
-#define MC_CMD_POLL_BIST_RUNNING 0x1 /* enum */
-#define MC_CMD_POLL_BIST_PASSED 0x2 /* enum */
-#define MC_CMD_POLL_BIST_FAILED 0x3 /* enum */
-#define MC_CMD_POLL_BIST_TIMEOUT 0x4 /* enum */
+/* enum: Running. */
+#define MC_CMD_POLL_BIST_RUNNING 0x1
+/* enum: Passed. */
+#define MC_CMD_POLL_BIST_PASSED 0x2
+/* enum: Failed. */
+#define MC_CMD_POLL_BIST_FAILED 0x3
+/* enum: Timed-out. */
+#define MC_CMD_POLL_BIST_TIMEOUT 0x4
#define MC_CMD_POLL_BIST_OUT_PRIVATE_OFST 4
/* MC_CMD_POLL_BIST_OUT_SFT9001 msgresponse */
#define MC_CMD_POLL_BIST_OUT_SFT9001_LEN 36
+/* result */
/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
/* Enum values, see field(s): */
/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
@@ -1287,42 +2011,116 @@
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_OFST 8
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_OFST 12
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_OFST 16
+/* Status of each channel A */
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_OFST 20
-#define MC_CMD_POLL_BIST_SFT9001_PAIR_OK 0x1 /* enum */
-#define MC_CMD_POLL_BIST_SFT9001_PAIR_OPEN 0x2 /* enum */
-#define MC_CMD_POLL_BIST_SFT9001_INTRA_PAIR_SHORT 0x3 /* enum */
-#define MC_CMD_POLL_BIST_SFT9001_INTER_PAIR_SHORT 0x4 /* enum */
-#define MC_CMD_POLL_BIST_SFT9001_PAIR_BUSY 0x9 /* enum */
+/* enum: Ok. */
+#define MC_CMD_POLL_BIST_SFT9001_PAIR_OK 0x1
+/* enum: Open. */
+#define MC_CMD_POLL_BIST_SFT9001_PAIR_OPEN 0x2
+/* enum: Intra-pair short. */
+#define MC_CMD_POLL_BIST_SFT9001_INTRA_PAIR_SHORT 0x3
+/* enum: Inter-pair short. */
+#define MC_CMD_POLL_BIST_SFT9001_INTER_PAIR_SHORT 0x4
+/* enum: Busy. */
+#define MC_CMD_POLL_BIST_SFT9001_PAIR_BUSY 0x9
+/* Status of each channel B */
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_OFST 24
/* Enum values, see field(s): */
/* CABLE_STATUS_A */
+/* Status of each channel C */
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_OFST 28
/* Enum values, see field(s): */
/* CABLE_STATUS_A */
+/* Status of each channel D */
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_OFST 32
/* Enum values, see field(s): */
/* CABLE_STATUS_A */
/* MC_CMD_POLL_BIST_OUT_MRSFP msgresponse */
#define MC_CMD_POLL_BIST_OUT_MRSFP_LEN 8
+/* result */
/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
/* Enum values, see field(s): */
/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
#define MC_CMD_POLL_BIST_OUT_MRSFP_TEST_OFST 4
-#define MC_CMD_POLL_BIST_MRSFP_TEST_COMPLETE 0x0 /* enum */
-#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_WRITE 0x1 /* enum */
-#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_IO_EXP 0x2 /* enum */
-#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_MODULE 0x3 /* enum */
-#define MC_CMD_POLL_BIST_MRSFP_TEST_IO_EXP_I2C_CONFIGURE 0x4 /* enum */
-#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_I2C_NO_CROSSTALK 0x5 /* enum */
-#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_PRESENCE 0x6 /* enum */
-#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_I2C_ACCESS 0x7 /* enum */
-#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_SANE_VALUE 0x8 /* enum */
+/* enum: Complete. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_COMPLETE 0x0
+/* enum: Bus switch off I2C write. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_WRITE 0x1
+/* enum: Bus switch off I2C no access IO exp. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_IO_EXP 0x2
+/* enum: Bus switch off I2C no access module. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_MODULE 0x3
+/* enum: IO exp I2C configure. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_IO_EXP_I2C_CONFIGURE 0x4
+/* enum: Bus switch I2C no cross talk. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_I2C_NO_CROSSTALK 0x5
+/* enum: Module presence. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_PRESENCE 0x6
+/* enum: Module ID I2C access. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_I2C_ACCESS 0x7
+/* enum: Module ID sane value. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_SANE_VALUE 0x8
+
+/* MC_CMD_POLL_BIST_OUT_MEM msgresponse */
+#define MC_CMD_POLL_BIST_OUT_MEM_LEN 36
+/* result */
+/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
+/* Enum values, see field(s): */
+/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
+#define MC_CMD_POLL_BIST_OUT_MEM_TEST_OFST 4
+/* enum: Test has completed. */
+#define MC_CMD_POLL_BIST_MEM_COMPLETE 0x0
+/* enum: RAM test - walk ones. */
+#define MC_CMD_POLL_BIST_MEM_MEM_WALK_ONES 0x1
+/* enum: RAM test - walk zeros. */
+#define MC_CMD_POLL_BIST_MEM_MEM_WALK_ZEROS 0x2
+/* enum: RAM test - walking inversions zeros/ones. */
+#define MC_CMD_POLL_BIST_MEM_MEM_INV_ZERO_ONE 0x3
+/* enum: RAM test - walking inversions checkerboard. */
+#define MC_CMD_POLL_BIST_MEM_MEM_INV_CHKBOARD 0x4
+/* enum: Register test - set / clear individual bits. */
+#define MC_CMD_POLL_BIST_MEM_REG 0x5
+/* enum: ECC error detected. */
+#define MC_CMD_POLL_BIST_MEM_ECC 0x6
+/* Failure address, only valid if result is POLL_BIST_FAILED */
+#define MC_CMD_POLL_BIST_OUT_MEM_ADDR_OFST 8
+/* Bus or address space to which the failure address corresponds */
+#define MC_CMD_POLL_BIST_OUT_MEM_BUS_OFST 12
+/* enum: MC MIPS bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_MC 0x0
+/* enum: CSR IREG bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_CSR 0x1
+/* enum: RX DPCPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_RX 0x2
+/* enum: TX0 DPCPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_TX0 0x3
+/* enum: TX1 DPCPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_TX1 0x4
+/* enum: RX DICPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DICPU_RX 0x5
+/* enum: TX DICPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DICPU_TX 0x6
+/* Pattern written to RAM / register */
+#define MC_CMD_POLL_BIST_OUT_MEM_EXPECT_OFST 16
+/* Actual value read from RAM / register */
+#define MC_CMD_POLL_BIST_OUT_MEM_ACTUAL_OFST 20
+/* ECC error mask */
+#define MC_CMD_POLL_BIST_OUT_MEM_ECC_OFST 24
+/* ECC parity error mask */
+#define MC_CMD_POLL_BIST_OUT_MEM_ECC_PARITY_OFST 28
+/* ECC fatal error mask */
+#define MC_CMD_POLL_BIST_OUT_MEM_ECC_FATAL_OFST 32
/***********************************/
/* MC_CMD_FLUSH_RX_QUEUES
- * Flush receive queue(s).
+ * Flush receive queue(s). If SRIOV is enabled (via MC_CMD_SRIOV), then RXQ
+ * flushes should be initiated via this MCDI operation, rather than via
+ * directly writing FLUSH_CMD.
+ *
+ * The flush is completed (either done/fail) asynchronously (after this command
+ * returns). The driver must still wait for flush done/failure events as usual.
*/
#define MC_CMD_FLUSH_RX_QUEUES 0x27
@@ -1341,7 +2139,7 @@
/***********************************/
/* MC_CMD_GET_LOOPBACK_MODES
- * Get port's loopback modes.
+ * Returns a bitmask of loopback modes available at each speed.
*/
#define MC_CMD_GET_LOOPBACK_MODES 0x28
@@ -1349,61 +2147,116 @@
#define MC_CMD_GET_LOOPBACK_MODES_IN_LEN 0
/* MC_CMD_GET_LOOPBACK_MODES_OUT msgresponse */
-#define MC_CMD_GET_LOOPBACK_MODES_OUT_LEN 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_LEN 40
+/* Supported loopbacks. */
#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_OFST 0
#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LEN 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LO_OFST 0
#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_OFST 4
-#define MC_CMD_LOOPBACK_NONE 0x0 /* enum */
-#define MC_CMD_LOOPBACK_DATA 0x1 /* enum */
-#define MC_CMD_LOOPBACK_GMAC 0x2 /* enum */
-#define MC_CMD_LOOPBACK_XGMII 0x3 /* enum */
-#define MC_CMD_LOOPBACK_XGXS 0x4 /* enum */
-#define MC_CMD_LOOPBACK_XAUI 0x5 /* enum */
-#define MC_CMD_LOOPBACK_GMII 0x6 /* enum */
-#define MC_CMD_LOOPBACK_SGMII 0x7 /* enum */
-#define MC_CMD_LOOPBACK_XGBR 0x8 /* enum */
-#define MC_CMD_LOOPBACK_XFI 0x9 /* enum */
-#define MC_CMD_LOOPBACK_XAUI_FAR 0xa /* enum */
-#define MC_CMD_LOOPBACK_GMII_FAR 0xb /* enum */
-#define MC_CMD_LOOPBACK_SGMII_FAR 0xc /* enum */
-#define MC_CMD_LOOPBACK_XFI_FAR 0xd /* enum */
-#define MC_CMD_LOOPBACK_GPHY 0xe /* enum */
-#define MC_CMD_LOOPBACK_PHYXS 0xf /* enum */
-#define MC_CMD_LOOPBACK_PCS 0x10 /* enum */
-#define MC_CMD_LOOPBACK_PMAPMD 0x11 /* enum */
-#define MC_CMD_LOOPBACK_XPORT 0x12 /* enum */
-#define MC_CMD_LOOPBACK_XGMII_WS 0x13 /* enum */
-#define MC_CMD_LOOPBACK_XAUI_WS 0x14 /* enum */
-#define MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15 /* enum */
-#define MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16 /* enum */
-#define MC_CMD_LOOPBACK_GMII_WS 0x17 /* enum */
-#define MC_CMD_LOOPBACK_XFI_WS 0x18 /* enum */
-#define MC_CMD_LOOPBACK_XFI_WS_FAR 0x19 /* enum */
-#define MC_CMD_LOOPBACK_PHYXS_WS 0x1a /* enum */
+/* enum: None. */
+#define MC_CMD_LOOPBACK_NONE 0x0
+/* enum: Data. */
+#define MC_CMD_LOOPBACK_DATA 0x1
+/* enum: GMAC. */
+#define MC_CMD_LOOPBACK_GMAC 0x2
+/* enum: XGMII. */
+#define MC_CMD_LOOPBACK_XGMII 0x3
+/* enum: XGXS. */
+#define MC_CMD_LOOPBACK_XGXS 0x4
+/* enum: XAUI. */
+#define MC_CMD_LOOPBACK_XAUI 0x5
+/* enum: GMII. */
+#define MC_CMD_LOOPBACK_GMII 0x6
+/* enum: SGMII. */
+#define MC_CMD_LOOPBACK_SGMII 0x7
+/* enum: XGBR. */
+#define MC_CMD_LOOPBACK_XGBR 0x8
+/* enum: XFI. */
+#define MC_CMD_LOOPBACK_XFI 0x9
+/* enum: XAUI Far. */
+#define MC_CMD_LOOPBACK_XAUI_FAR 0xa
+/* enum: GMII Far. */
+#define MC_CMD_LOOPBACK_GMII_FAR 0xb
+/* enum: SGMII Far. */
+#define MC_CMD_LOOPBACK_SGMII_FAR 0xc
+/* enum: XFI Far. */
+#define MC_CMD_LOOPBACK_XFI_FAR 0xd
+/* enum: GPhy. */
+#define MC_CMD_LOOPBACK_GPHY 0xe
+/* enum: PhyXS. */
+#define MC_CMD_LOOPBACK_PHYXS 0xf
+/* enum: PCS. */
+#define MC_CMD_LOOPBACK_PCS 0x10
+/* enum: PMA-PMD. */
+#define MC_CMD_LOOPBACK_PMAPMD 0x11
+/* enum: Cross-Port. */
+#define MC_CMD_LOOPBACK_XPORT 0x12
+/* enum: XGMII-Wireside. */
+#define MC_CMD_LOOPBACK_XGMII_WS 0x13
+/* enum: XAUI Wireside. */
+#define MC_CMD_LOOPBACK_XAUI_WS 0x14
+/* enum: XAUI Wireside Far. */
+#define MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15
+/* enum: XAUI Wireside near. */
+#define MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16
+/* enum: GMII Wireside. */
+#define MC_CMD_LOOPBACK_GMII_WS 0x17
+/* enum: XFI Wireside. */
+#define MC_CMD_LOOPBACK_XFI_WS 0x18
+/* enum: XFI Wireside Far. */
+#define MC_CMD_LOOPBACK_XFI_WS_FAR 0x19
+/* enum: PhyXS Wireside. */
+#define MC_CMD_LOOPBACK_PHYXS_WS 0x1a
+/* enum: PMA lanes MAC-Serdes. */
+#define MC_CMD_LOOPBACK_PMA_INT 0x1b
+/* enum: KR Serdes Parallel (Encoder). */
+#define MC_CMD_LOOPBACK_SD_NEAR 0x1c
+/* enum: KR Serdes Serial. */
+#define MC_CMD_LOOPBACK_SD_FAR 0x1d
+/* enum: PMA lanes MAC-Serdes Wireside. */
+#define MC_CMD_LOOPBACK_PMA_INT_WS 0x1e
+/* enum: KR Serdes Parallel Wireside (Full PCS). */
+#define MC_CMD_LOOPBACK_SD_FEP2_WS 0x1f
+/* enum: KR Serdes Parallel Wireside (Sym Aligner to TX). */
+#define MC_CMD_LOOPBACK_SD_FEP1_5_WS 0x20
+/* enum: KR Serdes Parallel Wireside (Deserializer to Serializer). */
+#define MC_CMD_LOOPBACK_SD_FEP_WS 0x21
+/* enum: KR Serdes Serial Wireside. */
+#define MC_CMD_LOOPBACK_SD_FES_WS 0x22
+/* Supported loopbacks. */
#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_OFST 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LEN 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LO_OFST 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_HI_OFST 12
/* Enum values, see field(s): */
/* 100M */
+/* Supported loopbacks. */
#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_OFST 16
#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LEN 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LO_OFST 16
#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_HI_OFST 20
/* Enum values, see field(s): */
/* 100M */
+/* Supported loopbacks. */
#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_OFST 24
#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LEN 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LO_OFST 24
#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_HI_OFST 28
/* Enum values, see field(s): */
/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_OFST 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LO_OFST 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_HI_OFST 36
+/* Enum values, see field(s): */
+/* 100M */
/***********************************/
/* MC_CMD_GET_LINK
- * Read the unified MAC/PHY link state.
+ * Read the unified MAC/PHY link state. Locks required: None Return code: 0,
+ * ETIME.
*/
#define MC_CMD_GET_LINK 0x29
@@ -1412,9 +2265,15 @@
/* MC_CMD_GET_LINK_OUT msgresponse */
#define MC_CMD_GET_LINK_OUT_LEN 28
+/* near-side advertised capabilities */
#define MC_CMD_GET_LINK_OUT_CAP_OFST 0
+/* link-partner advertised capabilities */
#define MC_CMD_GET_LINK_OUT_LP_CAP_OFST 4
+/* Autonegotiated speed in mbit/s. The link may still be down even if this
+ * reads non-zero.
+ */
#define MC_CMD_GET_LINK_OUT_LINK_SPEED_OFST 8
+/* Current loopback setting. */
#define MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_OFST 12
/* Enum values, see field(s): */
/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
@@ -1427,10 +2286,14 @@
#define MC_CMD_GET_LINK_OUT_BPX_LINK_WIDTH 1
#define MC_CMD_GET_LINK_OUT_PHY_LINK_LBN 3
#define MC_CMD_GET_LINK_OUT_PHY_LINK_WIDTH 1
+/* This returns the negotiated flow control value. */
#define MC_CMD_GET_LINK_OUT_FCNTL_OFST 20
-#define MC_CMD_FCNTL_OFF 0x0 /* enum */
-#define MC_CMD_FCNTL_RESPOND 0x1 /* enum */
-#define MC_CMD_FCNTL_BIDIR 0x2 /* enum */
+/* enum: Flow control is off. */
+#define MC_CMD_FCNTL_OFF 0x0
+/* enum: Respond to flow control. */
+#define MC_CMD_FCNTL_RESPOND 0x1
+/* enum: Respond to and Issue flow control. */
+#define MC_CMD_FCNTL_BIDIR 0x2
#define MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24
#define MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0
#define MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1
@@ -1444,13 +2307,16 @@
/***********************************/
/* MC_CMD_SET_LINK
- * Write the unified MAC/PHY link configuration.
+ * Write the unified MAC/PHY link configuration. Locks required: None. Return
+ * code: 0, EINVAL, ETIME
*/
#define MC_CMD_SET_LINK 0x2a
/* MC_CMD_SET_LINK_IN msgrequest */
#define MC_CMD_SET_LINK_IN_LEN 16
+/* ??? */
#define MC_CMD_SET_LINK_IN_CAP_OFST 0
+/* Flags */
#define MC_CMD_SET_LINK_IN_FLAGS_OFST 4
#define MC_CMD_SET_LINK_IN_LOWPOWER_LBN 0
#define MC_CMD_SET_LINK_IN_LOWPOWER_WIDTH 1
@@ -1458,9 +2324,13 @@
#define MC_CMD_SET_LINK_IN_POWEROFF_WIDTH 1
#define MC_CMD_SET_LINK_IN_TXDIS_LBN 2
#define MC_CMD_SET_LINK_IN_TXDIS_WIDTH 1
+/* Loopback mode. */
#define MC_CMD_SET_LINK_IN_LOOPBACK_MODE_OFST 8
/* Enum values, see field(s): */
/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
+/* A loopback speed of "0" is supported, and means (choose any available
+ * speed).
+ */
#define MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_OFST 12
/* MC_CMD_SET_LINK_OUT msgresponse */
@@ -1469,12 +2339,13 @@
/***********************************/
/* MC_CMD_SET_ID_LED
- * Set indentification LED state.
+ * Set identification LED state. Locks required: None. Return code: 0, EINVAL
*/
#define MC_CMD_SET_ID_LED 0x2b
/* MC_CMD_SET_ID_LED_IN msgrequest */
#define MC_CMD_SET_ID_LED_IN_LEN 4
+/* Set LED state. */
#define MC_CMD_SET_ID_LED_IN_STATE_OFST 0
#define MC_CMD_LED_OFF 0x0 /* enum */
#define MC_CMD_LED_ON 0x1 /* enum */
@@ -1486,12 +2357,15 @@
/***********************************/
/* MC_CMD_SET_MAC
- * Set MAC configuration.
+ * Set MAC configuration. Locks required: None. Return code: 0, EINVAL
*/
#define MC_CMD_SET_MAC 0x2c
/* MC_CMD_SET_MAC_IN msgrequest */
#define MC_CMD_SET_MAC_IN_LEN 24
+/* The MTU is the MTU programmed directly into the XMAC/GMAC (inclusive of
+ * EtherII, VLAN, bug16011 padding).
+ */
#define MC_CMD_SET_MAC_IN_MTU_OFST 0
#define MC_CMD_SET_MAC_IN_DRAIN_OFST 4
#define MC_CMD_SET_MAC_IN_ADDR_OFST 8
@@ -1504,10 +2378,14 @@
#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_LBN 1
#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_WIDTH 1
#define MC_CMD_SET_MAC_IN_FCNTL_OFST 20
+/* enum: Flow control is off. */
/* MC_CMD_FCNTL_OFF 0x0 */
+/* enum: Respond to flow control. */
/* MC_CMD_FCNTL_RESPOND 0x1 */
+/* enum: Respond to and Issue flow control. */
/* MC_CMD_FCNTL_BIDIR 0x2 */
-#define MC_CMD_FCNTL_AUTO 0x3 /* enum */
+/* enum: Auto neg flow control. */
+#define MC_CMD_FCNTL_AUTO 0x3
/* MC_CMD_SET_MAC_OUT msgresponse */
#define MC_CMD_SET_MAC_OUT_LEN 0
@@ -1515,12 +2393,18 @@
/***********************************/
/* MC_CMD_PHY_STATS
- * Get generic PHY statistics.
+ * Get generic PHY statistics. This call returns the statistics for a generic
+ * PHY in a sparse array (indexed by the enumerate). Each value is represented
+ * by a 32bit number. If the DMA_ADDR is 0, then no DMA is performed, and the
+ * statistics may be read from the message response. If DMA_ADDR != 0, then the
+ * statistics are dmad to that (page-aligned location). Locks required: None.
+ * Returns: 0, ETIME
*/
#define MC_CMD_PHY_STATS 0x2d
/* MC_CMD_PHY_STATS_IN msgrequest */
#define MC_CMD_PHY_STATS_IN_LEN 8
+/* ??? */
#define MC_CMD_PHY_STATS_IN_DMA_ADDR_OFST 0
#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LEN 8
#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LO_OFST 0
@@ -1534,40 +2418,71 @@
#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_OFST 0
#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_LEN 4
#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_PHY_NSTATS
-#define MC_CMD_OUI 0x0 /* enum */
-#define MC_CMD_PMA_PMD_LINK_UP 0x1 /* enum */
-#define MC_CMD_PMA_PMD_RX_FAULT 0x2 /* enum */
-#define MC_CMD_PMA_PMD_TX_FAULT 0x3 /* enum */
-#define MC_CMD_PMA_PMD_SIGNAL 0x4 /* enum */
-#define MC_CMD_PMA_PMD_SNR_A 0x5 /* enum */
-#define MC_CMD_PMA_PMD_SNR_B 0x6 /* enum */
-#define MC_CMD_PMA_PMD_SNR_C 0x7 /* enum */
-#define MC_CMD_PMA_PMD_SNR_D 0x8 /* enum */
-#define MC_CMD_PCS_LINK_UP 0x9 /* enum */
-#define MC_CMD_PCS_RX_FAULT 0xa /* enum */
-#define MC_CMD_PCS_TX_FAULT 0xb /* enum */
-#define MC_CMD_PCS_BER 0xc /* enum */
-#define MC_CMD_PCS_BLOCK_ERRORS 0xd /* enum */
-#define MC_CMD_PHYXS_LINK_UP 0xe /* enum */
-#define MC_CMD_PHYXS_RX_FAULT 0xf /* enum */
-#define MC_CMD_PHYXS_TX_FAULT 0x10 /* enum */
-#define MC_CMD_PHYXS_ALIGN 0x11 /* enum */
-#define MC_CMD_PHYXS_SYNC 0x12 /* enum */
-#define MC_CMD_AN_LINK_UP 0x13 /* enum */
-#define MC_CMD_AN_COMPLETE 0x14 /* enum */
-#define MC_CMD_AN_10GBT_STATUS 0x15 /* enum */
-#define MC_CMD_CL22_LINK_UP 0x16 /* enum */
-#define MC_CMD_PHY_NSTATS 0x17 /* enum */
+/* enum: OUI. */
+#define MC_CMD_OUI 0x0
+/* enum: PMA-PMD Link Up. */
+#define MC_CMD_PMA_PMD_LINK_UP 0x1
+/* enum: PMA-PMD RX Fault. */
+#define MC_CMD_PMA_PMD_RX_FAULT 0x2
+/* enum: PMA-PMD TX Fault. */
+#define MC_CMD_PMA_PMD_TX_FAULT 0x3
+/* enum: PMA-PMD Signal */
+#define MC_CMD_PMA_PMD_SIGNAL 0x4
+/* enum: PMA-PMD SNR A. */
+#define MC_CMD_PMA_PMD_SNR_A 0x5
+/* enum: PMA-PMD SNR B. */
+#define MC_CMD_PMA_PMD_SNR_B 0x6
+/* enum: PMA-PMD SNR C. */
+#define MC_CMD_PMA_PMD_SNR_C 0x7
+/* enum: PMA-PMD SNR D. */
+#define MC_CMD_PMA_PMD_SNR_D 0x8
+/* enum: PCS Link Up. */
+#define MC_CMD_PCS_LINK_UP 0x9
+/* enum: PCS RX Fault. */
+#define MC_CMD_PCS_RX_FAULT 0xa
+/* enum: PCS TX Fault. */
+#define MC_CMD_PCS_TX_FAULT 0xb
+/* enum: PCS BER. */
+#define MC_CMD_PCS_BER 0xc
+/* enum: PCS Block Errors. */
+#define MC_CMD_PCS_BLOCK_ERRORS 0xd
+/* enum: PhyXS Link Up. */
+#define MC_CMD_PHYXS_LINK_UP 0xe
+/* enum: PhyXS RX Fault. */
+#define MC_CMD_PHYXS_RX_FAULT 0xf
+/* enum: PhyXS TX Fault. */
+#define MC_CMD_PHYXS_TX_FAULT 0x10
+/* enum: PhyXS Align. */
+#define MC_CMD_PHYXS_ALIGN 0x11
+/* enum: PhyXS Sync. */
+#define MC_CMD_PHYXS_SYNC 0x12
+/* enum: AN link-up. */
+#define MC_CMD_AN_LINK_UP 0x13
+/* enum: AN Complete. */
+#define MC_CMD_AN_COMPLETE 0x14
+/* enum: AN 10GBaseT Status. */
+#define MC_CMD_AN_10GBT_STATUS 0x15
+/* enum: Clause 22 Link-Up. */
+#define MC_CMD_CL22_LINK_UP 0x16
+/* enum: (Last entry) */
+#define MC_CMD_PHY_NSTATS 0x17
/***********************************/
/* MC_CMD_MAC_STATS
- * Get generic MAC statistics.
+ * Get generic MAC statistics. This call returns unified statistics maintained
+ * by the MC as it switches between the GMAC and XMAC. The MC will write out
+ * all supported stats. The driver should zero initialise the buffer to
+ * guarantee consistent results. If the DMA_ADDR is 0, then no DMA is
+ * performed, and the statistics may be read from the message response. If
+ * DMA_ADDR != 0, then the statistics are dmad to that (page-aligned location).
+ * Locks required: None. Returns: 0, ETIME
*/
#define MC_CMD_MAC_STATS 0x2e
/* MC_CMD_MAC_STATS_IN msgrequest */
#define MC_CMD_MAC_STATS_IN_LEN 16
+/* ??? */
#define MC_CMD_MAC_STATS_IN_DMA_ADDR_OFST 0
#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LEN 8
#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LO_OFST 0
@@ -1684,6 +2599,7 @@
/* MC_CMD_MEMCPY_RECORD_TYPEDEF structuredef */
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LEN 32
+/* this is only used for the first record */
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_OFST 0
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LBN 0
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_WIDTH 32
@@ -1713,7 +2629,23 @@
/***********************************/
/* MC_CMD_MEMCPY
- * Perform memory copy operation.
+ * DMA write data into (Rid,Addr), either by dma reading (Rid,Addr), or by data
+ * embedded directly in the command.
+ *
+ * A common pattern is for a client to use generation counts to signal a dma
+ * update of a datastructure. To facilitate this, this MCDI operation can
+ * contain multiple requests which are executed in strict order. Requests take
+ * the form of duplicating the entire MCDI request continuously (including the
+ * requests record, which is ignored in all but the first structure)
+ *
+ * The source data can either come from a DMA from the host, or it can be
+ * embedded within the request directly, thereby eliminating a DMA read. To
+ * indicate this, the client sets FROM_RID=%RID_INLINE, ADDR_HI=0, and
+ * ADDR_LO=offset, and inserts the data at %offset from the start of the
+ * payload. It's the callers responsibility to ensure that the embedded data
+ * doesn't overlap the records.
+ *
+ * Returns: 0, EINVAL (invalid RID)
*/
#define MC_CMD_MEMCPY 0x31
@@ -1721,6 +2653,7 @@
#define MC_CMD_MEMCPY_IN_LENMIN 32
#define MC_CMD_MEMCPY_IN_LENMAX 224
#define MC_CMD_MEMCPY_IN_LEN(num) (0+32*(num))
+/* see MC_CMD_MEMCPY_RECORD_TYPEDEF */
#define MC_CMD_MEMCPY_IN_RECORD_OFST 0
#define MC_CMD_MEMCPY_IN_RECORD_LEN 32
#define MC_CMD_MEMCPY_IN_RECORD_MINNUM 1
@@ -1741,14 +2674,22 @@
#define MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0
#define MC_CMD_FILTER_MODE_SIMPLE 0x0 /* enum */
#define MC_CMD_FILTER_MODE_STRUCTURED 0xffffffff /* enum */
+/* A type value of 1 is unused. */
#define MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4
-#define MC_CMD_WOL_TYPE_MAGIC 0x0 /* enum */
-#define MC_CMD_WOL_TYPE_WIN_MAGIC 0x2 /* enum */
-#define MC_CMD_WOL_TYPE_IPV4_SYN 0x3 /* enum */
-#define MC_CMD_WOL_TYPE_IPV6_SYN 0x4 /* enum */
-#define MC_CMD_WOL_TYPE_BITMAP 0x5 /* enum */
-#define MC_CMD_WOL_TYPE_LINK 0x6 /* enum */
-#define MC_CMD_WOL_TYPE_MAX 0x7 /* enum */
+/* enum: Magic */
+#define MC_CMD_WOL_TYPE_MAGIC 0x0
+/* enum: MS Windows Magic */
+#define MC_CMD_WOL_TYPE_WIN_MAGIC 0x2
+/* enum: IPv4 Syn */
+#define MC_CMD_WOL_TYPE_IPV4_SYN 0x3
+/* enum: IPv6 Syn */
+#define MC_CMD_WOL_TYPE_IPV6_SYN 0x4
+/* enum: Bitmap */
+#define MC_CMD_WOL_TYPE_BITMAP 0x5
+/* enum: Link */
+#define MC_CMD_WOL_TYPE_LINK 0x6
+/* enum: (Above this for future use) */
+#define MC_CMD_WOL_TYPE_MAX 0x7
#define MC_CMD_WOL_FILTER_SET_IN_DATA_OFST 8
#define MC_CMD_WOL_FILTER_SET_IN_DATA_LEN 4
#define MC_CMD_WOL_FILTER_SET_IN_DATA_NUM 46
@@ -1818,7 +2759,7 @@
/***********************************/
/* MC_CMD_WOL_FILTER_REMOVE
- * Remove a WoL filter.
+ * Remove a WoL filter. Locks required: None. Returns: 0, EINVAL, ENOSYS
*/
#define MC_CMD_WOL_FILTER_REMOVE 0x33
@@ -1832,7 +2773,8 @@
/***********************************/
/* MC_CMD_WOL_FILTER_RESET
- * Reset (i.e. remove all) WoL filters.
+ * Reset (i.e. remove all) WoL filters. Locks required: None. Returns: 0,
+ * ENOSYS
*/
#define MC_CMD_WOL_FILTER_RESET 0x34
@@ -1848,7 +2790,7 @@
/***********************************/
/* MC_CMD_SET_MCAST_HASH
- * Set the MCASH hash value.
+ * Set the MCAST hash value without otherwise reconfiguring the MAC
*/
#define MC_CMD_SET_MCAST_HASH 0x35
@@ -1865,7 +2807,8 @@
/***********************************/
/* MC_CMD_NVRAM_TYPES
- * Get virtual NVRAM partitions information.
+ * Return bitfield indicating available types of virtual NVRAM partitions.
+ * Locks required: none. Returns: 0
*/
#define MC_CMD_NVRAM_TYPES 0x36
@@ -1874,26 +2817,54 @@
/* MC_CMD_NVRAM_TYPES_OUT msgresponse */
#define MC_CMD_NVRAM_TYPES_OUT_LEN 4
+/* Bit mask of supported types. */
#define MC_CMD_NVRAM_TYPES_OUT_TYPES_OFST 0
-#define MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO 0x0 /* enum */
-#define MC_CMD_NVRAM_TYPE_MC_FW 0x1 /* enum */
-#define MC_CMD_NVRAM_TYPE_MC_FW_BACKUP 0x2 /* enum */
-#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0 0x3 /* enum */
-#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1 0x4 /* enum */
-#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0 0x5 /* enum */
-#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1 0x6 /* enum */
-#define MC_CMD_NVRAM_TYPE_EXP_ROM 0x7 /* enum */
-#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0 0x8 /* enum */
-#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1 0x9 /* enum */
-#define MC_CMD_NVRAM_TYPE_PHY_PORT0 0xa /* enum */
-#define MC_CMD_NVRAM_TYPE_PHY_PORT1 0xb /* enum */
-#define MC_CMD_NVRAM_TYPE_LOG 0xc /* enum */
-#define MC_CMD_NVRAM_TYPE_FPGA 0xd /* enum */
+/* enum: Disabled callisto. */
+#define MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO 0x0
+/* enum: MC firmware. */
+#define MC_CMD_NVRAM_TYPE_MC_FW 0x1
+/* enum: MC backup firmware. */
+#define MC_CMD_NVRAM_TYPE_MC_FW_BACKUP 0x2
+/* enum: Static configuration Port0. */
+#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0 0x3
+/* enum: Static configuration Port1. */
+#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1 0x4
+/* enum: Dynamic configuration Port0. */
+#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0 0x5
+/* enum: Dynamic configuration Port1. */
+#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1 0x6
+/* enum: Expansion Rom. */
+#define MC_CMD_NVRAM_TYPE_EXP_ROM 0x7
+/* enum: Expansion Rom Configuration Port0. */
+#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0 0x8
+/* enum: Expansion Rom Configuration Port1. */
+#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1 0x9
+/* enum: Phy Configuration Port0. */
+#define MC_CMD_NVRAM_TYPE_PHY_PORT0 0xa
+/* enum: Phy Configuration Port1. */
+#define MC_CMD_NVRAM_TYPE_PHY_PORT1 0xb
+/* enum: Log. */
+#define MC_CMD_NVRAM_TYPE_LOG 0xc
+/* enum: FPGA image. */
+#define MC_CMD_NVRAM_TYPE_FPGA 0xd
+/* enum: FPGA backup image */
+#define MC_CMD_NVRAM_TYPE_FPGA_BACKUP 0xe
+/* enum: FC firmware. */
+#define MC_CMD_NVRAM_TYPE_FC_FW 0xf
+/* enum: FC backup firmware. */
+#define MC_CMD_NVRAM_TYPE_FC_FW_BACKUP 0x10
+/* enum: CPLD image. */
+#define MC_CMD_NVRAM_TYPE_CPLD 0x11
+/* enum: Licensing information. */
+#define MC_CMD_NVRAM_TYPE_LICENSE 0x12
+/* enum: FC Log. */
+#define MC_CMD_NVRAM_TYPE_FC_LOG 0x13
/***********************************/
/* MC_CMD_NVRAM_INFO
- * Read info about a virtual NVRAM partition.
+ * Read info about a virtual NVRAM partition. Locks required: none. Returns: 0,
+ * EINVAL (bad type).
*/
#define MC_CMD_NVRAM_INFO 0x37
@@ -1913,13 +2884,19 @@
#define MC_CMD_NVRAM_INFO_OUT_FLAGS_OFST 12
#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN 0
#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_WIDTH 1
+#define MC_CMD_NVRAM_INFO_OUT_TLV_LBN 1
+#define MC_CMD_NVRAM_INFO_OUT_TLV_WIDTH 1
+#define MC_CMD_NVRAM_INFO_OUT_A_B_LBN 7
+#define MC_CMD_NVRAM_INFO_OUT_A_B_WIDTH 1
#define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_OFST 16
#define MC_CMD_NVRAM_INFO_OUT_PHYSADDR_OFST 20
/***********************************/
/* MC_CMD_NVRAM_UPDATE_START
- * Start a group of update operations on a virtual NVRAM partition.
+ * Start a group of update operations on a virtual NVRAM partition. Locks
+ * required: PHY_LOCK if type==*PHY*. Returns: 0, EINVAL (bad type), EACCES (if
+ * PHY_LOCK required and not held).
*/
#define MC_CMD_NVRAM_UPDATE_START 0x38
@@ -1935,7 +2912,9 @@
/***********************************/
/* MC_CMD_NVRAM_READ
- * Read data from a virtual NVRAM partition.
+ * Read data from a virtual NVRAM partition. Locks required: PHY_LOCK if
+ * type==*PHY*. Returns: 0, EINVAL (bad type/offset/length), EACCES (if
+ * PHY_LOCK required and not held)
*/
#define MC_CMD_NVRAM_READ 0x39
@@ -1945,6 +2924,7 @@
/* Enum values, see field(s): */
/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
#define MC_CMD_NVRAM_READ_IN_OFFSET_OFST 4
+/* amount to read in bytes */
#define MC_CMD_NVRAM_READ_IN_LENGTH_OFST 8
/* MC_CMD_NVRAM_READ_OUT msgresponse */
@@ -1959,7 +2939,9 @@
/***********************************/
/* MC_CMD_NVRAM_WRITE
- * Write data to a virtual NVRAM partition.
+ * Write data to a virtual NVRAM partition. Locks required: PHY_LOCK if
+ * type==*PHY*. Returns: 0, EINVAL (bad type/offset/length), EACCES (if
+ * PHY_LOCK required and not held)
*/
#define MC_CMD_NVRAM_WRITE 0x3a
@@ -1983,7 +2965,9 @@
/***********************************/
/* MC_CMD_NVRAM_ERASE
- * Erase sector(s) from a virtual NVRAM partition.
+ * Erase sector(s) from a virtual NVRAM partition. Locks required: PHY_LOCK if
+ * type==*PHY*. Returns: 0, EINVAL (bad type/offset/length), EACCES (if
+ * PHY_LOCK required and not held)
*/
#define MC_CMD_NVRAM_ERASE 0x3b
@@ -2001,7 +2985,9 @@
/***********************************/
/* MC_CMD_NVRAM_UPDATE_FINISH
- * Finish a group of update operations on a virtual NVRAM partition.
+ * Finish a group of update operations on a virtual NVRAM partition. Locks
+ * required: PHY_LOCK if type==*PHY*. Returns: 0, EINVAL (bad
+ * type/offset/length), EACCES (if PHY_LOCK required and not held)
*/
#define MC_CMD_NVRAM_UPDATE_FINISH 0x3c
@@ -2019,6 +3005,20 @@
/***********************************/
/* MC_CMD_REBOOT
* Reboot the MC.
+ *
+ * The AFTER_ASSERTION flag is intended to be used when the driver notices an
+ * assertion failure (at which point it is expected to perform a complete tear
+ * down and reinitialise), to allow both ports to reset the MC once in an
+ * atomic fashion.
+ *
+ * Production mc firmwares are generally compiled with REBOOT_ON_ASSERT=1,
+ * which means that they will automatically reboot out of the assertion
+ * handler, so this is in practise an optional operation. It is still
+ * recommended that drivers execute this to support custom firmwares with
+ * REBOOT_ON_ASSERT=0.
+ *
+ * Locks required: NONE Returns: Nothing. You get back a response with ERR=1,
+ * DATALEN=0
*/
#define MC_CMD_REBOOT 0x3d
@@ -2033,7 +3033,9 @@
/***********************************/
/* MC_CMD_SCHEDINFO
- * Request scheduler info.
+ * Request scheduler info. Locks required: NONE. Returns: An array of
+ * (timeslice,maximum overrun), one for each thread, in ascending order of
+ * thread address.
*/
#define MC_CMD_SCHEDINFO 0x3e
@@ -2052,14 +3054,24 @@
/***********************************/
/* MC_CMD_REBOOT_MODE
+ * Set the mode for the next MC reboot. Locks required: NONE. Sets the reboot
+ * mode to the specified value. Returns the old mode.
*/
#define MC_CMD_REBOOT_MODE 0x3f
/* MC_CMD_REBOOT_MODE_IN msgrequest */
#define MC_CMD_REBOOT_MODE_IN_LEN 4
#define MC_CMD_REBOOT_MODE_IN_VALUE_OFST 0
-#define MC_CMD_REBOOT_MODE_NORMAL 0x0 /* enum */
-#define MC_CMD_REBOOT_MODE_SNAPPER 0x3 /* enum */
+/* enum: Normal. */
+#define MC_CMD_REBOOT_MODE_NORMAL 0x0
+/* enum: Power-on Reset. */
+#define MC_CMD_REBOOT_MODE_POR 0x2
+/* enum: Snapper. */
+#define MC_CMD_REBOOT_MODE_SNAPPER 0x3
+/* enum: snapper fake POR */
+#define MC_CMD_REBOOT_MODE_SNAPPER_POR 0x4
+#define MC_CMD_REBOOT_MODE_IN_FAKE_LBN 7
+#define MC_CMD_REBOOT_MODE_IN_FAKE_WIDTH 1
/* MC_CMD_REBOOT_MODE_OUT msgresponse */
#define MC_CMD_REBOOT_MODE_OUT_LEN 4
@@ -2069,32 +3081,145 @@
/***********************************/
/* MC_CMD_SENSOR_INFO
* Returns information about every available sensor.
+ *
+ * Each sensor has a single (16bit) value, and a corresponding state. The
+ * mapping between value and state is nominally determined by the MC, but may
+ * be implemented using up to 2 ranges per sensor.
+ *
+ * This call returns a mask (32bit) of the sensors that are supported by this
+ * platform, then an array of sensor information structures, in order of sensor
+ * type (but without gaps for unimplemented sensors). Each structure defines
+ * the ranges for the corresponding sensor. An unused range is indicated by
+ * equal limit values. If one range is used, a value outside that range results
+ * in STATE_FATAL. If two ranges are used, a value outside the second range
+ * results in STATE_FATAL while a value outside the first and inside the second
+ * range results in STATE_WARNING.
+ *
+ * Sensor masks and sensor information arrays are organised into pages. For
+ * backward compatibility, older host software can only use sensors in page 0.
+ * Bit 32 in the sensor mask was previously unused, and is no reserved for use
+ * as the next page flag.
+ *
+ * If the request does not contain a PAGE value then firmware will only return
+ * page 0 of sensor information, with bit 31 in the sensor mask cleared.
+ *
+ * If the request contains a PAGE value then firmware responds with the sensor
+ * mask and sensor information array for that page of sensors. In this case bit
+ * 31 in the mask is set if another page exists.
+ *
+ * Locks required: None Returns: 0
*/
#define MC_CMD_SENSOR_INFO 0x41
/* MC_CMD_SENSOR_INFO_IN msgrequest */
#define MC_CMD_SENSOR_INFO_IN_LEN 0
+/* MC_CMD_SENSOR_INFO_EXT_IN msgrequest */
+#define MC_CMD_SENSOR_INFO_EXT_IN_LEN 4
+/* Which page of sensors to report.
+ *
+ * Page 0 contains sensors 0 to 30 (sensor 31 is the next page bit).
+ *
+ * Page 1 contains sensors 32 to 62 (sensor 63 is the next page bit). etc.
+ */
+#define MC_CMD_SENSOR_INFO_EXT_IN_PAGE_OFST 0
+
/* MC_CMD_SENSOR_INFO_OUT msgresponse */
#define MC_CMD_SENSOR_INFO_OUT_LENMIN 12
#define MC_CMD_SENSOR_INFO_OUT_LENMAX 252
#define MC_CMD_SENSOR_INFO_OUT_LEN(num) (4+8*(num))
#define MC_CMD_SENSOR_INFO_OUT_MASK_OFST 0
-#define MC_CMD_SENSOR_CONTROLLER_TEMP 0x0 /* enum */
-#define MC_CMD_SENSOR_PHY_COMMON_TEMP 0x1 /* enum */
-#define MC_CMD_SENSOR_CONTROLLER_COOLING 0x2 /* enum */
-#define MC_CMD_SENSOR_PHY0_TEMP 0x3 /* enum */
-#define MC_CMD_SENSOR_PHY0_COOLING 0x4 /* enum */
-#define MC_CMD_SENSOR_PHY1_TEMP 0x5 /* enum */
-#define MC_CMD_SENSOR_PHY1_COOLING 0x6 /* enum */
-#define MC_CMD_SENSOR_IN_1V0 0x7 /* enum */
-#define MC_CMD_SENSOR_IN_1V2 0x8 /* enum */
-#define MC_CMD_SENSOR_IN_1V8 0x9 /* enum */
-#define MC_CMD_SENSOR_IN_2V5 0xa /* enum */
-#define MC_CMD_SENSOR_IN_3V3 0xb /* enum */
-#define MC_CMD_SENSOR_IN_12V0 0xc /* enum */
-#define MC_CMD_SENSOR_IN_1V2A 0xd /* enum */
-#define MC_CMD_SENSOR_IN_VREF 0xe /* enum */
+/* enum: Controller temperature: degC */
+#define MC_CMD_SENSOR_CONTROLLER_TEMP 0x0
+/* enum: Phy common temperature: degC */
+#define MC_CMD_SENSOR_PHY_COMMON_TEMP 0x1
+/* enum: Controller cooling: bool */
+#define MC_CMD_SENSOR_CONTROLLER_COOLING 0x2
+/* enum: Phy 0 temperature: degC */
+#define MC_CMD_SENSOR_PHY0_TEMP 0x3
+/* enum: Phy 0 cooling: bool */
+#define MC_CMD_SENSOR_PHY0_COOLING 0x4
+/* enum: Phy 1 temperature: degC */
+#define MC_CMD_SENSOR_PHY1_TEMP 0x5
+/* enum: Phy 1 cooling: bool */
+#define MC_CMD_SENSOR_PHY1_COOLING 0x6
+/* enum: 1.0v power: mV */
+#define MC_CMD_SENSOR_IN_1V0 0x7
+/* enum: 1.2v power: mV */
+#define MC_CMD_SENSOR_IN_1V2 0x8
+/* enum: 1.8v power: mV */
+#define MC_CMD_SENSOR_IN_1V8 0x9
+/* enum: 2.5v power: mV */
+#define MC_CMD_SENSOR_IN_2V5 0xa
+/* enum: 3.3v power: mV */
+#define MC_CMD_SENSOR_IN_3V3 0xb
+/* enum: 12v power: mV */
+#define MC_CMD_SENSOR_IN_12V0 0xc
+/* enum: 1.2v analogue power: mV */
+#define MC_CMD_SENSOR_IN_1V2A 0xd
+/* enum: reference voltage: mV */
+#define MC_CMD_SENSOR_IN_VREF 0xe
+/* enum: AOE FPGA power: mV */
+#define MC_CMD_SENSOR_OUT_VAOE 0xf
+/* enum: AOE FPGA temperature: degC */
+#define MC_CMD_SENSOR_AOE_TEMP 0x10
+/* enum: AOE FPGA PSU temperature: degC */
+#define MC_CMD_SENSOR_PSU_AOE_TEMP 0x11
+/* enum: AOE PSU temperature: degC */
+#define MC_CMD_SENSOR_PSU_TEMP 0x12
+/* enum: Fan 0 speed: RPM */
+#define MC_CMD_SENSOR_FAN_0 0x13
+/* enum: Fan 1 speed: RPM */
+#define MC_CMD_SENSOR_FAN_1 0x14
+/* enum: Fan 2 speed: RPM */
+#define MC_CMD_SENSOR_FAN_2 0x15
+/* enum: Fan 3 speed: RPM */
+#define MC_CMD_SENSOR_FAN_3 0x16
+/* enum: Fan 4 speed: RPM */
+#define MC_CMD_SENSOR_FAN_4 0x17
+/* enum: AOE FPGA input power: mV */
+#define MC_CMD_SENSOR_IN_VAOE 0x18
+/* enum: AOE FPGA current: mA */
+#define MC_CMD_SENSOR_OUT_IAOE 0x19
+/* enum: AOE FPGA input current: mA */
+#define MC_CMD_SENSOR_IN_IAOE 0x1a
+/* enum: NIC power consumption: W */
+#define MC_CMD_SENSOR_NIC_POWER 0x1b
+/* enum: 0.9v power voltage: mV */
+#define MC_CMD_SENSOR_IN_0V9 0x1c
+/* enum: 0.9v power current: mA */
+#define MC_CMD_SENSOR_IN_I0V9 0x1d
+/* enum: 1.2v power current: mA */
+#define MC_CMD_SENSOR_IN_I1V2 0x1e
+/* enum: Not a sensor: reserved for the next page flag */
+#define MC_CMD_SENSOR_PAGE0_NEXT 0x1f
+/* enum: 0.9v power voltage (at ADC): mV */
+#define MC_CMD_SENSOR_IN_0V9_ADC 0x20
+/* enum: Controller temperature 2: degC */
+#define MC_CMD_SENSOR_CONTROLLER_2_TEMP 0x21
+/* enum: Voltage regulator internal temperature: degC */
+#define MC_CMD_SENSOR_VREG_INTERNAL_TEMP 0x22
+/* enum: 0.9V voltage regulator temperature: degC */
+#define MC_CMD_SENSOR_VREG_0V9_TEMP 0x23
+/* enum: 1.2V voltage regulator temperature: degC */
+#define MC_CMD_SENSOR_VREG_1V2_TEMP 0x24
+/* enum: controller internal temperature sensor voltage (internal ADC): mV */
+#define MC_CMD_SENSOR_CONTROLLER_VPTAT 0x25
+/* enum: controller internal temperature (internal ADC): degC */
+#define MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP 0x26
+/* enum: controller internal temperature sensor voltage (external ADC): mV */
+#define MC_CMD_SENSOR_CONTROLLER_VPTAT_EXTADC 0x27
+/* enum: controller internal temperature (external ADC): degC */
+#define MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP_EXTADC 0x28
+/* enum: ambient temperature: degC */
+#define MC_CMD_SENSOR_AMBIENT_TEMP 0x29
+/* enum: air flow: bool */
+#define MC_CMD_SENSOR_AIRFLOW 0x2a
+/* enum: voltage between VSS08D and VSS08D at CSR: mV */
+#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR 0x2b
+/* enum: voltage between VSS08D and VSS08D at CSR (external ADC): mV */
+#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR_EXTADC 0x2c
+/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */
#define MC_CMD_SENSOR_ENTRY_OFST 4
#define MC_CMD_SENSOR_ENTRY_LEN 8
#define MC_CMD_SENSOR_ENTRY_LO_OFST 4
@@ -2102,6 +3227,23 @@
#define MC_CMD_SENSOR_ENTRY_MINNUM 1
#define MC_CMD_SENSOR_ENTRY_MAXNUM 31
+/* MC_CMD_SENSOR_INFO_EXT_OUT msgresponse */
+#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMIN 12
+#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMAX 252
+#define MC_CMD_SENSOR_INFO_EXT_OUT_LEN(num) (4+8*(num))
+#define MC_CMD_SENSOR_INFO_EXT_OUT_MASK_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_SENSOR_INFO_OUT */
+#define MC_CMD_SENSOR_INFO_EXT_OUT_NEXT_PAGE_LBN 31
+#define MC_CMD_SENSOR_INFO_EXT_OUT_NEXT_PAGE_WIDTH 1
+/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */
+/* MC_CMD_SENSOR_ENTRY_OFST 4 */
+/* MC_CMD_SENSOR_ENTRY_LEN 8 */
+/* MC_CMD_SENSOR_ENTRY_LO_OFST 4 */
+/* MC_CMD_SENSOR_ENTRY_HI_OFST 8 */
+/* MC_CMD_SENSOR_ENTRY_MINNUM 1 */
+/* MC_CMD_SENSOR_ENTRY_MAXNUM 31 */
+
/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF structuredef */
#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_LEN 8
#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_OFST 0
@@ -2124,39 +3266,80 @@
/***********************************/
/* MC_CMD_READ_SENSORS
- * Returns the current reading from each sensor.
+ * Returns the current reading from each sensor. DMAs an array of sensor
+ * readings, in order of sensor type (but without gaps for unimplemented
+ * sensors), into host memory. Each array element is a
+ * MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF dword.
+ *
+ * If the request does not contain the LENGTH field then only sensors 0 to 30
+ * are reported, to avoid DMA buffer overflow in older host software. If the
+ * sensor reading require more space than the LENGTH allows, then return
+ * EINVAL.
+ *
+ * The MC will send a SENSOREVT event every time any sensor changes state. The
+ * driver is responsible for ensuring that it doesn't miss any events. The
+ * board will function normally if all sensors are in STATE_OK or
+ * STATE_WARNING. Otherwise the board should not be expected to function.
*/
#define MC_CMD_READ_SENSORS 0x42
/* MC_CMD_READ_SENSORS_IN msgrequest */
#define MC_CMD_READ_SENSORS_IN_LEN 8
+/* DMA address of host buffer for sensor readings (must be 4Kbyte aligned). */
#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_OFST 0
#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LEN 8
#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LO_OFST 0
#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_HI_OFST 4
+/* MC_CMD_READ_SENSORS_EXT_IN msgrequest */
+#define MC_CMD_READ_SENSORS_EXT_IN_LEN 12
+/* DMA address of host buffer for sensor readings */
+#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_OFST 0
+#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LEN 8
+#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LO_OFST 0
+#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_HI_OFST 4
+/* Size in bytes of host buffer. */
+#define MC_CMD_READ_SENSORS_EXT_IN_LENGTH_OFST 8
+
/* MC_CMD_READ_SENSORS_OUT msgresponse */
#define MC_CMD_READ_SENSORS_OUT_LEN 0
+/* MC_CMD_READ_SENSORS_EXT_OUT msgresponse */
+#define MC_CMD_READ_SENSORS_EXT_OUT_LEN 0
+
/* MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF structuredef */
-#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_LEN 3
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_LEN 4
#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_OFST 0
#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_LEN 2
#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_LBN 0
#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_WIDTH 16
#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_OFST 2
#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LEN 1
-#define MC_CMD_SENSOR_STATE_OK 0x0 /* enum */
-#define MC_CMD_SENSOR_STATE_WARNING 0x1 /* enum */
-#define MC_CMD_SENSOR_STATE_FATAL 0x2 /* enum */
-#define MC_CMD_SENSOR_STATE_BROKEN 0x3 /* enum */
+/* enum: Ok. */
+#define MC_CMD_SENSOR_STATE_OK 0x0
+/* enum: Breached warning threshold. */
+#define MC_CMD_SENSOR_STATE_WARNING 0x1
+/* enum: Breached fatal threshold. */
+#define MC_CMD_SENSOR_STATE_FATAL 0x2
+/* enum: Fault with sensor. */
+#define MC_CMD_SENSOR_STATE_BROKEN 0x3
+/* enum: Sensor is working but does not currently have a reading. */
+#define MC_CMD_SENSOR_STATE_NO_READING 0x4
#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LBN 16
#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_WIDTH 8
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_OFST 3
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_LEN 1
+/* Enum values, see field(s): */
+/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_LBN 24
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_WIDTH 8
/***********************************/
/* MC_CMD_GET_PHY_STATE
- * Report current state of PHY.
+ * Report current state of PHY. A 'zombie' PHY is a PHY that has failed to boot
+ * (e.g. due to missing or corrupted firmware). Locks required: None. Return
+ * code: 0
*/
#define MC_CMD_GET_PHY_STATE 0x43
@@ -2166,13 +3349,16 @@
/* MC_CMD_GET_PHY_STATE_OUT msgresponse */
#define MC_CMD_GET_PHY_STATE_OUT_LEN 4
#define MC_CMD_GET_PHY_STATE_OUT_STATE_OFST 0
-#define MC_CMD_PHY_STATE_OK 0x1 /* enum */
-#define MC_CMD_PHY_STATE_ZOMBIE 0x2 /* enum */
+/* enum: Ok. */
+#define MC_CMD_PHY_STATE_OK 0x1
+/* enum: Faulty. */
+#define MC_CMD_PHY_STATE_ZOMBIE 0x2
/***********************************/
/* MC_CMD_SETUP_8021QBB
- * 802.1Qbb control.
+ * 802.1Qbb control. 8 Tx queues that map to priorities 0 - 7. Use all 1s to
+ * disable 802.Qbb for a given priority.
*/
#define MC_CMD_SETUP_8021QBB 0x44
@@ -2187,7 +3373,7 @@
/***********************************/
/* MC_CMD_WOL_FILTER_GET
- * Retrieve ID of any WoL filters.
+ * Retrieve ID of any WoL filters. Locks required: None. Returns: 0, ENOSYS
*/
#define MC_CMD_WOL_FILTER_GET 0x45
@@ -2201,7 +3387,8 @@
/***********************************/
/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD
- * Add a protocol offload to NIC for lights-out state.
+ * Add a protocol offload to NIC for lights-out state. Locks required: None.
+ * Returns: 0, ENOSYS
*/
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD 0x46
@@ -2241,7 +3428,8 @@
/***********************************/
/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD
- * Remove a protocol offload from NIC for lights-out state.
+ * Remove a protocol offload from NIC for lights-out state. Locks required:
+ * None. Returns: 0, ENOSYS
*/
#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD 0x47
@@ -2256,7 +3444,7 @@
/***********************************/
/* MC_CMD_MAC_RESET_RESTORE
- * Restore MAC after block reset.
+ * Restore MAC after block reset. Locks required: None. Returns: 0.
*/
#define MC_CMD_MAC_RESET_RESTORE 0x48
@@ -2269,6 +3457,9 @@
/***********************************/
/* MC_CMD_TESTASSERT
+ * Deliberately trigger an assert-detonation in the firmware for testing
+ * purposes (i.e. to allow tests that the driver copes gracefully). Locks
+ * required: None Returns: 0
*/
#define MC_CMD_TESTASSERT 0x49
@@ -2281,14 +3472,23 @@
/***********************************/
/* MC_CMD_WORKAROUND
- * Enable/Disable a given workaround.
+ * Enable/Disable a given workaround. The mcfw will return EINVAL if it doesn't
+ * understand the given workaround number - which should not be treated as a
+ * hard error by client code. This op does not imply any semantics about each
+ * workaround, that's between the driver and the mcfw on a per-workaround
+ * basis. Locks required: None. Returns: 0, EINVAL .
*/
#define MC_CMD_WORKAROUND 0x4a
/* MC_CMD_WORKAROUND_IN msgrequest */
#define MC_CMD_WORKAROUND_IN_LEN 8
#define MC_CMD_WORKAROUND_IN_TYPE_OFST 0
-#define MC_CMD_WORKAROUND_BUG17230 0x1 /* enum */
+/* enum: Bug 17230 work around. */
+#define MC_CMD_WORKAROUND_BUG17230 0x1
+/* enum: Bug 35388 work around (unsafe EVQ writes). */
+#define MC_CMD_WORKAROUND_BUG35388 0x2
+/* enum: Bug35017 workaround (A64 tables must be identity map) */
+#define MC_CMD_WORKAROUND_BUG35017 0x3
#define MC_CMD_WORKAROUND_IN_ENABLED_OFST 4
/* MC_CMD_WORKAROUND_OUT msgresponse */
@@ -2297,7 +3497,12 @@
/***********************************/
/* MC_CMD_GET_PHY_MEDIA_INFO
- * Read media-specific data from PHY.
+ * Read media-specific data from PHY (e.g. SFP/SFP+ module ID information for
+ * SFP+ PHYs). The 'media type' can be found via GET_PHY_CFG
+ * (GET_PHY_CFG_OUT_MEDIA_TYPE); the valid 'page number' input values, and the
+ * output data, are interpreted on a per-type basis. For SFP+: PAGE=0 or 1
+ * returns a 128-byte block read from module I2C address 0xA0 offset 0 or 0x80.
+ * Anything else: currently undefined. Locks required: None. Return code: 0.
*/
#define MC_CMD_GET_PHY_MEDIA_INFO 0x4b
@@ -2309,6 +3514,7 @@
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMIN 5
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX 252
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(num) (4+1*(num))
+/* in bytes */
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_OFST 0
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST 4
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_LEN 1
@@ -2318,7 +3524,8 @@
/***********************************/
/* MC_CMD_NVRAM_TEST
- * Test a particular NVRAM partition.
+ * Test a particular NVRAM partition for valid contents (where "valid" depends
+ * on the type of partition).
*/
#define MC_CMD_NVRAM_TEST 0x4c
@@ -2331,22 +3538,31 @@
/* MC_CMD_NVRAM_TEST_OUT msgresponse */
#define MC_CMD_NVRAM_TEST_OUT_LEN 4
#define MC_CMD_NVRAM_TEST_OUT_RESULT_OFST 0
-#define MC_CMD_NVRAM_TEST_PASS 0x0 /* enum */
-#define MC_CMD_NVRAM_TEST_FAIL 0x1 /* enum */
-#define MC_CMD_NVRAM_TEST_NOTSUPP 0x2 /* enum */
+/* enum: Passed. */
+#define MC_CMD_NVRAM_TEST_PASS 0x0
+/* enum: Failed. */
+#define MC_CMD_NVRAM_TEST_FAIL 0x1
+/* enum: Not supported. */
+#define MC_CMD_NVRAM_TEST_NOTSUPP 0x2
/***********************************/
/* MC_CMD_MRSFP_TWEAK
- * Read status and/or set parameters for the 'mrsfp' driver.
+ * Read status and/or set parameters for the 'mrsfp' driver in mr_rusty builds.
+ * I2C I/O expander bits are always read; if equaliser parameters are supplied,
+ * they are configured first. Locks required: None. Return code: 0, EINVAL.
*/
#define MC_CMD_MRSFP_TWEAK 0x4d
/* MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG msgrequest */
#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_LEN 16
+/* 0-6 low->high de-emph. */
#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_OFST 0
+/* 0-8 low->high ref.V */
#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_OFST 4
+/* 0-8 0-8 low->high boost */
#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_OFST 8
+/* 0-8 low->high ref.V */
#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_OFST 12
/* MC_CMD_MRSFP_TWEAK_IN_READ_ONLY msgrequest */
@@ -2354,16 +3570,23 @@
/* MC_CMD_MRSFP_TWEAK_OUT msgresponse */
#define MC_CMD_MRSFP_TWEAK_OUT_LEN 12
+/* input bits */
#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_OFST 0
+/* output bits */
#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4
+/* direction */
#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8
-#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OUT 0x0 /* enum */
-#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_IN 0x1 /* enum */
+/* enum: Out. */
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OUT 0x0
+/* enum: In. */
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_IN 0x1
/***********************************/
/* MC_CMD_SENSOR_SET_LIMS
- * Adjusts the sensor limits.
+ * Adjusts the sensor limits. This is a warranty-voiding operation. Returns:
+ * ENOENT if the sensor specified does not exist, EINVAL if the limits are out
+ * of range.
*/
#define MC_CMD_SENSOR_SET_LIMS 0x4e
@@ -2372,9 +3595,13 @@
#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_OFST 0
/* Enum values, see field(s): */
/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
+/* interpretation is is sensor-specific. */
#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_OFST 4
+/* interpretation is is sensor-specific. */
#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_OFST 8
+/* interpretation is is sensor-specific. */
#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_OFST 12
+/* interpretation is is sensor-specific. */
#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_OFST 16
/* MC_CMD_SENSOR_SET_LIMS_OUT msgresponse */
@@ -2396,9 +3623,3640 @@
#define MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_OFST 8
#define MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_OFST 12
+
+/***********************************/
+/* MC_CMD_NVRAM_PARTITIONS
+ * Reads the list of available virtual NVRAM partition types. Locks required:
+ * none. Returns: 0, EINVAL (bad type).
+ */
+#define MC_CMD_NVRAM_PARTITIONS 0x51
+
+/* MC_CMD_NVRAM_PARTITIONS_IN msgrequest */
+#define MC_CMD_NVRAM_PARTITIONS_IN_LEN 0
+
+/* MC_CMD_NVRAM_PARTITIONS_OUT msgresponse */
+#define MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN 4
+#define MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX 252
+#define MC_CMD_NVRAM_PARTITIONS_OUT_LEN(num) (4+4*(num))
+/* total number of partitions */
+#define MC_CMD_NVRAM_PARTITIONS_OUT_NUM_PARTITIONS_OFST 0
+/* type ID code for each of NUM_PARTITIONS partitions */
+#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_OFST 4
+#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_LEN 4
+#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_MINNUM 0
+#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_MAXNUM 62
+
+
+/***********************************/
+/* MC_CMD_NVRAM_METADATA
+ * Reads soft metadata for a virtual NVRAM partition type. Locks required:
+ * none. Returns: 0, EINVAL (bad type).
+ */
+#define MC_CMD_NVRAM_METADATA 0x52
+
+/* MC_CMD_NVRAM_METADATA_IN msgrequest */
+#define MC_CMD_NVRAM_METADATA_IN_LEN 4
+/* Partition type ID code */
+#define MC_CMD_NVRAM_METADATA_IN_TYPE_OFST 0
+
+/* MC_CMD_NVRAM_METADATA_OUT msgresponse */
+#define MC_CMD_NVRAM_METADATA_OUT_LENMIN 20
+#define MC_CMD_NVRAM_METADATA_OUT_LENMAX 252
+#define MC_CMD_NVRAM_METADATA_OUT_LEN(num) (20+1*(num))
+/* Partition type ID code */
+#define MC_CMD_NVRAM_METADATA_OUT_TYPE_OFST 0
+#define MC_CMD_NVRAM_METADATA_OUT_FLAGS_OFST 4
+#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN 0
+#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_WIDTH 1
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_LBN 1
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_WIDTH 1
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_LBN 2
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_WIDTH 1
+/* Subtype ID code for content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_OFST 8
+/* 1st component of W.X.Y.Z version number for content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_W_OFST 12
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_W_LEN 2
+/* 2nd component of W.X.Y.Z version number for content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_X_OFST 14
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_X_LEN 2
+/* 3rd component of W.X.Y.Z version number for content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Y_OFST 16
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Y_LEN 2
+/* 4th component of W.X.Y.Z version number for content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Z_OFST 18
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Z_LEN 2
+/* Zero-terminated string describing the content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_OFST 20
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_LEN 1
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_MINNUM 0
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_MAXNUM 232
+
+
+/***********************************/
+/* MC_CMD_GET_MAC_ADDRESSES
+ * Returns the base MAC, count and stride for the requestiong function
+ */
+#define MC_CMD_GET_MAC_ADDRESSES 0x55
+
+/* MC_CMD_GET_MAC_ADDRESSES_IN msgrequest */
+#define MC_CMD_GET_MAC_ADDRESSES_IN_LEN 0
+
+/* MC_CMD_GET_MAC_ADDRESSES_OUT msgresponse */
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_LEN 16
+/* Base MAC address */
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE_OFST 0
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE_LEN 6
+/* Padding */
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_RESERVED_OFST 6
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_RESERVED_LEN 2
+/* Number of allocated MAC addresses */
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_COUNT_OFST 8
+/* Spacing of allocated MAC addresses */
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_OFST 12
+
/* MC_CMD_RESOURCE_SPECIFIER enum */
-#define MC_CMD_RESOURCE_INSTANCE_ANY 0xffffffff /* enum */
-#define MC_CMD_RESOURCE_INSTANCE_NONE 0xfffffffe /* enum */
+/* enum: Any */
+#define MC_CMD_RESOURCE_INSTANCE_ANY 0xffffffff
+/* enum: None */
+#define MC_CMD_RESOURCE_INSTANCE_NONE 0xfffffffe
+
+/* EVB_PORT_ID structuredef */
+#define EVB_PORT_ID_LEN 4
+#define EVB_PORT_ID_PORT_ID_OFST 0
+/* enum: An invalid port handle. */
+#define EVB_PORT_ID_NULL 0x0
+/* enum: The port assigned to this function.. */
+#define EVB_PORT_ID_ASSIGNED 0x1000000
+/* enum: External network port 0 */
+#define EVB_PORT_ID_MAC0 0x2000000
+/* enum: External network port 1 */
+#define EVB_PORT_ID_MAC1 0x2000001
+/* enum: External network port 2 */
+#define EVB_PORT_ID_MAC2 0x2000002
+/* enum: External network port 3 */
+#define EVB_PORT_ID_MAC3 0x2000003
+#define EVB_PORT_ID_PORT_ID_LBN 0
+#define EVB_PORT_ID_PORT_ID_WIDTH 32
+
+/* EVB_VLAN_TAG structuredef */
+#define EVB_VLAN_TAG_LEN 2
+/* The VLAN tag value */
+#define EVB_VLAN_TAG_VLAN_ID_LBN 0
+#define EVB_VLAN_TAG_VLAN_ID_WIDTH 12
+#define EVB_VLAN_TAG_MODE_LBN 12
+#define EVB_VLAN_TAG_MODE_WIDTH 4
+/* enum: Insert the VLAN. */
+#define EVB_VLAN_TAG_INSERT 0x0
+/* enum: Replace the VLAN if already present. */
+#define EVB_VLAN_TAG_REPLACE 0x1
+
+/* BUFTBL_ENTRY structuredef */
+#define BUFTBL_ENTRY_LEN 12
+/* the owner ID */
+#define BUFTBL_ENTRY_OID_OFST 0
+#define BUFTBL_ENTRY_OID_LEN 2
+#define BUFTBL_ENTRY_OID_LBN 0
+#define BUFTBL_ENTRY_OID_WIDTH 16
+/* the page parameter as one of ESE_DZ_SMC_PAGE_SIZE_ */
+#define BUFTBL_ENTRY_PGSZ_OFST 2
+#define BUFTBL_ENTRY_PGSZ_LEN 2
+#define BUFTBL_ENTRY_PGSZ_LBN 16
+#define BUFTBL_ENTRY_PGSZ_WIDTH 16
+/* the raw 64-bit address field from the SMC, not adjusted for page size */
+#define BUFTBL_ENTRY_RAWADDR_OFST 4
+#define BUFTBL_ENTRY_RAWADDR_LEN 8
+#define BUFTBL_ENTRY_RAWADDR_LO_OFST 4
+#define BUFTBL_ENTRY_RAWADDR_HI_OFST 8
+#define BUFTBL_ENTRY_RAWADDR_LBN 32
+#define BUFTBL_ENTRY_RAWADDR_WIDTH 64
+
+/* NVRAM_PARTITION_TYPE structuredef */
+#define NVRAM_PARTITION_TYPE_LEN 2
+#define NVRAM_PARTITION_TYPE_ID_OFST 0
+#define NVRAM_PARTITION_TYPE_ID_LEN 2
+/* enum: Primary MC firmware partition */
+#define NVRAM_PARTITION_TYPE_MC_FIRMWARE 0x100
+/* enum: Secondary MC firmware partition */
+#define NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP 0x200
+/* enum: Expansion ROM partition */
+#define NVRAM_PARTITION_TYPE_EXPANSION_ROM 0x300
+/* enum: Static configuration TLV partition */
+#define NVRAM_PARTITION_TYPE_STATIC_CONFIG 0x400
+/* enum: Dynamic configuration TLV partition */
+#define NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG 0x500
+/* enum: Expansion ROM configuration data for port 0 */
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0 0x600
+/* enum: Expansion ROM configuration data for port 1 */
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1 0x601
+/* enum: Expansion ROM configuration data for port 2 */
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2 0x602
+/* enum: Expansion ROM configuration data for port 3 */
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3 0x603
+/* enum: Non-volatile log output partition */
+#define NVRAM_PARTITION_TYPE_LOG 0x700
+/* enum: Device state dump output partition */
+#define NVRAM_PARTITION_TYPE_DUMP 0x800
+/* enum: Application license key storage partition */
+#define NVRAM_PARTITION_TYPE_LICENSE 0x900
+/* enum: Start of range used for PHY partitions (low 8 bits are the PHY ID) */
+#define NVRAM_PARTITION_TYPE_PHY_MIN 0xa00
+/* enum: End of range used for PHY partitions (low 8 bits are the PHY ID) */
+#define NVRAM_PARTITION_TYPE_PHY_MAX 0xaff
+/* enum: Start of reserved value range (firmware may use for any purpose) */
+#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN 0xff00
+/* enum: End of reserved value range (firmware may use for any purpose) */
+#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MAX 0xfffd
+/* enum: Recovery partition map (provided if real map is missing or corrupt) */
+#define NVRAM_PARTITION_TYPE_RECOVERY_MAP 0xfffe
+/* enum: Partition map (real map as stored in flash) */
+#define NVRAM_PARTITION_TYPE_PARTITION_MAP 0xffff
+#define NVRAM_PARTITION_TYPE_ID_LBN 0
+#define NVRAM_PARTITION_TYPE_ID_WIDTH 16
+
+
+/***********************************/
+/* MC_CMD_READ_REGS
+ * Get a dump of the MCPU registers
+ */
+#define MC_CMD_READ_REGS 0x50
+
+/* MC_CMD_READ_REGS_IN msgrequest */
+#define MC_CMD_READ_REGS_IN_LEN 0
+
+/* MC_CMD_READ_REGS_OUT msgresponse */
+#define MC_CMD_READ_REGS_OUT_LEN 308
+/* Whether the corresponding register entry contains a valid value */
+#define MC_CMD_READ_REGS_OUT_MASK_OFST 0
+#define MC_CMD_READ_REGS_OUT_MASK_LEN 16
+/* Same order as MIPS GDB (r0-r31, sr, lo, hi, bad, cause, 32 x float, fsr,
+ * fir, fp)
+ */
+#define MC_CMD_READ_REGS_OUT_REGS_OFST 16
+#define MC_CMD_READ_REGS_OUT_REGS_LEN 4
+#define MC_CMD_READ_REGS_OUT_REGS_NUM 73
+
+
+/***********************************/
+/* MC_CMD_INIT_EVQ
+ * Set up an event queue according to the supplied parameters. The IN arguments
+ * end with an address for each 4k of host memory required to back the EVQ.
+ */
+#define MC_CMD_INIT_EVQ 0x80
+
+/* MC_CMD_INIT_EVQ_IN msgrequest */
+#define MC_CMD_INIT_EVQ_IN_LENMIN 44
+#define MC_CMD_INIT_EVQ_IN_LENMAX 548
+#define MC_CMD_INIT_EVQ_IN_LEN(num) (36+8*(num))
+/* Size, in entries */
+#define MC_CMD_INIT_EVQ_IN_SIZE_OFST 0
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_EVQ_IN_INSTANCE_OFST 4
+/* The initial timer value. The load value is ignored if the timer mode is DIS.
+ */
+#define MC_CMD_INIT_EVQ_IN_TMR_LOAD_OFST 8
+/* The reload value is ignored in one-shot modes */
+#define MC_CMD_INIT_EVQ_IN_TMR_RELOAD_OFST 12
+/* tbd */
+#define MC_CMD_INIT_EVQ_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_LBN 0
+#define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_LBN 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_INT_ARMD_LBN 2
+#define MC_CMD_INIT_EVQ_IN_FLAG_INT_ARMD_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_CUT_THRU_LBN 3
+#define MC_CMD_INIT_EVQ_IN_FLAG_CUT_THRU_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_RX_MERGE_LBN 4
+#define MC_CMD_INIT_EVQ_IN_FLAG_RX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_LBN 5
+#define MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_TMR_MODE_OFST 20
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS 0x0
+/* enum: Immediate */
+#define MC_CMD_INIT_EVQ_IN_TMR_IMMED_START 0x1
+/* enum: Triggered */
+#define MC_CMD_INIT_EVQ_IN_TMR_TRIG_START 0x2
+/* enum: Hold-off */
+#define MC_CMD_INIT_EVQ_IN_TMR_INT_HLDOFF 0x3
+/* Target EVQ for wakeups if in wakeup mode. */
+#define MC_CMD_INIT_EVQ_IN_TARGET_EVQ_OFST 24
+/* Target interrupt if in interrupting mode (note union with target EVQ). Use
+ * MC_CMD_RESOURCE_INSTANCE_ANY unless a specific one required for test
+ * purposes.
+ */
+#define MC_CMD_INIT_EVQ_IN_IRQ_NUM_OFST 24
+/* Event Counter Mode. */
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_OFST 28
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS 0x0
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_RX 0x1
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_TX 0x2
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_RXTX 0x3
+/* Event queue packet count threshold. */
+#define MC_CMD_INIT_EVQ_IN_COUNT_THRSHLD_OFST 32
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_OFST 36
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LO_OFST 36
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_HI_OFST 40
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_MINNUM 1
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_MAXNUM 64
+
+/* MC_CMD_INIT_EVQ_OUT msgresponse */
+#define MC_CMD_INIT_EVQ_OUT_LEN 4
+/* Only valid if INTRFLAG was true */
+#define MC_CMD_INIT_EVQ_OUT_IRQ_OFST 0
+
+/* QUEUE_CRC_MODE structuredef */
+#define QUEUE_CRC_MODE_LEN 1
+#define QUEUE_CRC_MODE_MODE_LBN 0
+#define QUEUE_CRC_MODE_MODE_WIDTH 4
+/* enum: No CRC. */
+#define QUEUE_CRC_MODE_NONE 0x0
+/* enum: CRC Fiber channel over ethernet. */
+#define QUEUE_CRC_MODE_FCOE 0x1
+/* enum: CRC (digest) iSCSI header only. */
+#define QUEUE_CRC_MODE_ISCSI_HDR 0x2
+/* enum: CRC (digest) iSCSI header and payload. */
+#define QUEUE_CRC_MODE_ISCSI 0x3
+/* enum: CRC Fiber channel over IP over ethernet. */
+#define QUEUE_CRC_MODE_FCOIPOE 0x4
+/* enum: CRC MPA. */
+#define QUEUE_CRC_MODE_MPA 0x5
+#define QUEUE_CRC_MODE_SPARE_LBN 4
+#define QUEUE_CRC_MODE_SPARE_WIDTH 4
+
+
+/***********************************/
+/* MC_CMD_INIT_RXQ
+ * set up a receive queue according to the supplied parameters. The IN
+ * arguments end with an address for each 4k of host memory required to back
+ * the RXQ.
+ */
+#define MC_CMD_INIT_RXQ 0x81
+
+/* MC_CMD_INIT_RXQ_IN msgrequest */
+#define MC_CMD_INIT_RXQ_IN_LENMIN 36
+#define MC_CMD_INIT_RXQ_IN_LENMAX 252
+#define MC_CMD_INIT_RXQ_IN_LEN(num) (28+8*(num))
+/* Size, in entries */
+#define MC_CMD_INIT_RXQ_IN_SIZE_OFST 0
+/* The EVQ to send events to. This is an index originally specified to INIT_EVQ
+ */
+#define MC_CMD_INIT_RXQ_IN_TARGET_EVQ_OFST 4
+/* The value to put in the event data. Check hardware spec. for valid range. */
+#define MC_CMD_INIT_RXQ_IN_LABEL_OFST 8
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_RXQ_IN_INSTANCE_OFST 12
+/* There will be more flags here. */
+#define MC_CMD_INIT_RXQ_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_LBN 0
+#define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_LBN 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_TIMESTAMP_LBN 2
+#define MC_CMD_INIT_RXQ_IN_FLAG_TIMESTAMP_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_CRC_MODE_LBN 3
+#define MC_CMD_INIT_RXQ_IN_CRC_MODE_WIDTH 4
+#define MC_CMD_INIT_RXQ_IN_FLAG_CHAIN_LBN 7
+#define MC_CMD_INIT_RXQ_IN_FLAG_CHAIN_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_LBN 8
+#define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define MC_CMD_INIT_RXQ_IN_OWNER_ID_OFST 20
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define MC_CMD_INIT_RXQ_IN_PORT_ID_OFST 24
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_OFST 28
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_HI_OFST 32
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MINNUM 1
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MAXNUM 28
+
+/* MC_CMD_INIT_RXQ_OUT msgresponse */
+#define MC_CMD_INIT_RXQ_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_INIT_TXQ
+ */
+#define MC_CMD_INIT_TXQ 0x82
+
+/* MC_CMD_INIT_TXQ_IN msgrequest */
+#define MC_CMD_INIT_TXQ_IN_LENMIN 36
+#define MC_CMD_INIT_TXQ_IN_LENMAX 252
+#define MC_CMD_INIT_TXQ_IN_LEN(num) (28+8*(num))
+/* Size, in entries */
+#define MC_CMD_INIT_TXQ_IN_SIZE_OFST 0
+/* The EVQ to send events to. This is an index originally specified to
+ * INIT_EVQ.
+ */
+#define MC_CMD_INIT_TXQ_IN_TARGET_EVQ_OFST 4
+/* The value to put in the event data. Check hardware spec. for valid range. */
+#define MC_CMD_INIT_TXQ_IN_LABEL_OFST 8
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_TXQ_IN_INSTANCE_OFST 12
+/* There will be more flags here. */
+#define MC_CMD_INIT_TXQ_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_LBN 0
+#define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_LBN 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_CSUM_DIS_LBN 2
+#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_CSUM_DIS_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_UDP_ONLY_LBN 3
+#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_UDP_ONLY_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_CRC_MODE_LBN 4
+#define MC_CMD_INIT_TXQ_IN_CRC_MODE_WIDTH 4
+#define MC_CMD_INIT_TXQ_IN_FLAG_TIMESTAMP_LBN 8
+#define MC_CMD_INIT_TXQ_IN_FLAG_TIMESTAMP_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_LBN 9
+#define MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define MC_CMD_INIT_TXQ_IN_OWNER_ID_OFST 20
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define MC_CMD_INIT_TXQ_IN_PORT_ID_OFST 24
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_OFST 28
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_HI_OFST 32
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MINNUM 1
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MAXNUM 28
+
+/* MC_CMD_INIT_TXQ_OUT msgresponse */
+#define MC_CMD_INIT_TXQ_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_FINI_EVQ
+ * Teardown an EVQ.
+ *
+ * All DMAQs or EVQs that point to the EVQ to tear down must be torn down first
+ * or the operation will fail with EBUSY
+ */
+#define MC_CMD_FINI_EVQ 0x83
+
+/* MC_CMD_FINI_EVQ_IN msgrequest */
+#define MC_CMD_FINI_EVQ_IN_LEN 4
+/* Instance of EVQ to destroy. Should be the same instance as that previously
+ * passed to INIT_EVQ
+ */
+#define MC_CMD_FINI_EVQ_IN_INSTANCE_OFST 0
+
+/* MC_CMD_FINI_EVQ_OUT msgresponse */
+#define MC_CMD_FINI_EVQ_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_FINI_RXQ
+ * Teardown a RXQ.
+ */
+#define MC_CMD_FINI_RXQ 0x84
+
+/* MC_CMD_FINI_RXQ_IN msgrequest */
+#define MC_CMD_FINI_RXQ_IN_LEN 4
+/* Instance of RXQ to destroy */
+#define MC_CMD_FINI_RXQ_IN_INSTANCE_OFST 0
+
+/* MC_CMD_FINI_RXQ_OUT msgresponse */
+#define MC_CMD_FINI_RXQ_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_FINI_TXQ
+ * Teardown a TXQ.
+ */
+#define MC_CMD_FINI_TXQ 0x85
+
+/* MC_CMD_FINI_TXQ_IN msgrequest */
+#define MC_CMD_FINI_TXQ_IN_LEN 4
+/* Instance of TXQ to destroy */
+#define MC_CMD_FINI_TXQ_IN_INSTANCE_OFST 0
+
+/* MC_CMD_FINI_TXQ_OUT msgresponse */
+#define MC_CMD_FINI_TXQ_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_DRIVER_EVENT
+ * Generate an event on an EVQ belonging to the function issuing the command.
+ */
+#define MC_CMD_DRIVER_EVENT 0x86
+
+/* MC_CMD_DRIVER_EVENT_IN msgrequest */
+#define MC_CMD_DRIVER_EVENT_IN_LEN 12
+/* Handle of target EVQ */
+#define MC_CMD_DRIVER_EVENT_IN_EVQ_OFST 0
+/* Bits 0 - 63 of event */
+#define MC_CMD_DRIVER_EVENT_IN_DATA_OFST 4
+#define MC_CMD_DRIVER_EVENT_IN_DATA_LEN 8
+#define MC_CMD_DRIVER_EVENT_IN_DATA_LO_OFST 4
+#define MC_CMD_DRIVER_EVENT_IN_DATA_HI_OFST 8
+
+/* MC_CMD_DRIVER_EVENT_OUT msgresponse */
+#define MC_CMD_DRIVER_EVENT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PROXY_CMD
+ * Execute an arbitrary MCDI command on behalf of a different function, subject
+ * to security restrictions. The command to be proxied follows immediately
+ * afterward in the host buffer (or on the UART). This command supercedes
+ * MC_CMD_SET_FUNC, which remains available for Siena but now deprecated.
+ */
+#define MC_CMD_PROXY_CMD 0x5b
+
+/* MC_CMD_PROXY_CMD_IN msgrequest */
+#define MC_CMD_PROXY_CMD_IN_LEN 4
+/* The handle of the target function. */
+#define MC_CMD_PROXY_CMD_IN_TARGET_OFST 0
+#define MC_CMD_PROXY_CMD_IN_TARGET_PF_LBN 0
+#define MC_CMD_PROXY_CMD_IN_TARGET_PF_WIDTH 16
+#define MC_CMD_PROXY_CMD_IN_TARGET_VF_LBN 16
+#define MC_CMD_PROXY_CMD_IN_TARGET_VF_WIDTH 16
+#define MC_CMD_PROXY_CMD_IN_VF_NULL 0xffff /* enum */
+
+
+/***********************************/
+/* MC_CMD_ALLOC_BUFTBL_CHUNK
+ * Allocate a set of buffer table entries using the specified owner ID. This
+ * operation allocates the required buffer table entries (and fails if it
+ * cannot do so). The buffer table entries will initially be zeroed.
+ */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK 0x87
+
+/* MC_CMD_ALLOC_BUFTBL_CHUNK_IN msgrequest */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_LEN 8
+/* Owner ID to use */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_OFST 0
+/* Size of buffer table pages to use, in bytes (note that only a few values are
+ * legal on any specific hardware).
+ */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_OFST 4
+
+/* MC_CMD_ALLOC_BUFTBL_CHUNK_OUT msgresponse */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_LEN 12
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_OFST 0
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_OFST 4
+/* Buffer table IDs for use in DMA descriptors. */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_OFST 8
+
+
+/***********************************/
+/* MC_CMD_PROGRAM_BUFTBL_ENTRIES
+ * Reprogram a set of buffer table entries in the specified chunk.
+ */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES 0x88
+
+/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN msgrequest */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMIN 20
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX 252
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LEN(num) (12+8*(num))
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_OFST 0
+/* ID */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_OFST 4
+/* Num entries */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 8
+/* Buffer table entry address */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_OFST 12
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LEN 8
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_OFST 12
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_OFST 16
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MINNUM 1
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MAXNUM 30
+
+/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT msgresponse */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_FREE_BUFTBL_CHUNK
+ */
+#define MC_CMD_FREE_BUFTBL_CHUNK 0x89
+
+/* MC_CMD_FREE_BUFTBL_CHUNK_IN msgrequest */
+#define MC_CMD_FREE_BUFTBL_CHUNK_IN_LEN 4
+#define MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_OFST 0
+
+/* MC_CMD_FREE_BUFTBL_CHUNK_OUT msgresponse */
+#define MC_CMD_FREE_BUFTBL_CHUNK_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_FILTER_OP
+ * Multiplexed MCDI call for filter operations
+ */
+#define MC_CMD_FILTER_OP 0x8a
+
+/* MC_CMD_FILTER_OP_IN msgrequest */
+#define MC_CMD_FILTER_OP_IN_LEN 108
+/* identifies the type of operation requested */
+#define MC_CMD_FILTER_OP_IN_OP_OFST 0
+/* enum: single-recipient filter insert */
+#define MC_CMD_FILTER_OP_IN_OP_INSERT 0x0
+/* enum: single-recipient filter remove */
+#define MC_CMD_FILTER_OP_IN_OP_REMOVE 0x1
+/* enum: multi-recipient filter subscribe */
+#define MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE 0x2
+/* enum: multi-recipient filter unsubscribe */
+#define MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE 0x3
+/* enum: replace one recipient with another (warning - the filter handle may
+ * change)
+ */
+#define MC_CMD_FILTER_OP_IN_OP_REPLACE 0x4
+/* filter handle (for remove / unsubscribe operations) */
+#define MC_CMD_FILTER_OP_IN_HANDLE_OFST 4
+#define MC_CMD_FILTER_OP_IN_HANDLE_LEN 8
+#define MC_CMD_FILTER_OP_IN_HANDLE_LO_OFST 4
+#define MC_CMD_FILTER_OP_IN_HANDLE_HI_OFST 8
+/* The port ID associated with the v-adaptor which should contain this filter.
+ */
+#define MC_CMD_FILTER_OP_IN_PORT_ID_OFST 12
+/* fields to include in match criteria */
+#define MC_CMD_FILTER_OP_IN_MATCH_FIELDS_OFST 16
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_LBN 0
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_IP_LBN 1
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC_LBN 2
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT_LBN 3
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_MAC_LBN 4
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_PORT_LBN 5
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE_LBN 6
+#define MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN_LBN 7
+#define MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN_LBN 8
+#define MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO_LBN 9
+#define MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF0_LBN 10
+#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF0_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF1_LBN 11
+#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF1_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30
+#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN 31
+#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1
+/* receive destination */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_OFST 20
+/* enum: drop packets */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_DROP 0x0
+/* enum: receive to host */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_HOST 0x1
+/* enum: receive to MC */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_MC 0x2
+/* enum: loop back to port 0 TX MAC */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_TX0 0x3
+/* enum: loop back to port 1 TX MAC */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_TX1 0x4
+/* receive queue handle (for multiple queue modes, this is the base queue) */
+#define MC_CMD_FILTER_OP_IN_RX_QUEUE_OFST 24
+/* receive mode */
+#define MC_CMD_FILTER_OP_IN_RX_MODE_OFST 28
+/* enum: receive to just the specified queue */
+#define MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE 0x0
+/* enum: receive to multiple queues using RSS context */
+#define MC_CMD_FILTER_OP_IN_RX_MODE_RSS 0x1
+/* enum: receive to multiple queues using .1p mapping */
+#define MC_CMD_FILTER_OP_IN_RX_MODE_DOT1P_MAPPING 0x2
+/* enum: install a filter entry that will never match; for test purposes only
+ */
+#define MC_CMD_FILTER_OP_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000
+/* RSS context (for RX_MODE_RSS) or .1p mapping handle (for
+ * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or
+ * MC_CMD_DOT1P_MAPPING_ALLOC. Note that these handles should be considered
+ * opaque to the host, although a value of 0xFFFFFFFF is guaranteed never to be
+ * a valid handle.
+ */
+#define MC_CMD_FILTER_OP_IN_RX_CONTEXT_OFST 32
+/* transmit domain (reserved; set to 0) */
+#define MC_CMD_FILTER_OP_IN_TX_DOMAIN_OFST 36
+/* transmit destination (either set the MAC and/or PM bits for explicit
+ * control, or set this field to TX_DEST_DEFAULT for sensible default
+ * behaviour)
+ */
+#define MC_CMD_FILTER_OP_IN_TX_DEST_OFST 40
+/* enum: request default behaviour (based on filter type) */
+#define MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT 0xffffffff
+#define MC_CMD_FILTER_OP_IN_TX_DEST_MAC_LBN 0
+#define MC_CMD_FILTER_OP_IN_TX_DEST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_TX_DEST_PM_LBN 1
+#define MC_CMD_FILTER_OP_IN_TX_DEST_PM_WIDTH 1
+/* source MAC address to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_SRC_MAC_OFST 44
+#define MC_CMD_FILTER_OP_IN_SRC_MAC_LEN 6
+/* source port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_SRC_PORT_OFST 50
+#define MC_CMD_FILTER_OP_IN_SRC_PORT_LEN 2
+/* destination MAC address to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_DST_MAC_OFST 52
+#define MC_CMD_FILTER_OP_IN_DST_MAC_LEN 6
+/* destination port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_DST_PORT_OFST 58
+#define MC_CMD_FILTER_OP_IN_DST_PORT_LEN 2
+/* Ethernet type to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_ETHER_TYPE_OFST 60
+#define MC_CMD_FILTER_OP_IN_ETHER_TYPE_LEN 2
+/* Inner VLAN tag to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_INNER_VLAN_OFST 62
+#define MC_CMD_FILTER_OP_IN_INNER_VLAN_LEN 2
+/* Outer VLAN tag to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_OUTER_VLAN_OFST 64
+#define MC_CMD_FILTER_OP_IN_OUTER_VLAN_LEN 2
+/* IP protocol to match (in low byte; set high byte to 0) */
+#define MC_CMD_FILTER_OP_IN_IP_PROTO_OFST 66
+#define MC_CMD_FILTER_OP_IN_IP_PROTO_LEN 2
+/* Firmware defined register 0 to match (reserved; set to 0) */
+#define MC_CMD_FILTER_OP_IN_FWDEF0_OFST 68
+/* Firmware defined register 1 to match (reserved; set to 0) */
+#define MC_CMD_FILTER_OP_IN_FWDEF1_OFST 72
+/* source IP address to match (as bytes in network order; set last 12 bytes to
+ * 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_IN_SRC_IP_OFST 76
+#define MC_CMD_FILTER_OP_IN_SRC_IP_LEN 16
+/* destination IP address to match (as bytes in network order; set last 12
+ * bytes to 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_IN_DST_IP_OFST 92
+#define MC_CMD_FILTER_OP_IN_DST_IP_LEN 16
+
+/* MC_CMD_FILTER_OP_OUT msgresponse */
+#define MC_CMD_FILTER_OP_OUT_LEN 12
+/* identifies the type of operation requested */
+#define MC_CMD_FILTER_OP_OUT_OP_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_FILTER_OP_IN/OP */
+/* Returned filter handle (for insert / subscribe operations). Note that these
+ * handles should be considered opaque to the host, although a value of
+ * 0xFFFFFFFF_FFFFFFFF is guaranteed never to be a valid handle.
+ */
+#define MC_CMD_FILTER_OP_OUT_HANDLE_OFST 4
+#define MC_CMD_FILTER_OP_OUT_HANDLE_LEN 8
+#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_OFST 4
+#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_OFST 8
+
+
+/***********************************/
+/* MC_CMD_GET_PARSER_DISP_INFO
+ * Get information related to the parser-dispatcher subsystem
+ */
+#define MC_CMD_GET_PARSER_DISP_INFO 0xe4
+
+/* MC_CMD_GET_PARSER_DISP_INFO_IN msgrequest */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_LEN 4
+/* identifies the type of operation requested */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_OFST 0
+/* enum: read the list of supported RX filter matches */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES 0x1
+
+/* MC_CMD_GET_PARSER_DISP_INFO_OUT msgresponse */
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMIN 8
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX 252
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LEN(num) (8+4*(num))
+/* identifies the type of operation requested */
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_OP_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
+/* number of supported match types */
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_NUM_SUPPORTED_MATCHES_OFST 4
+/* array of supported match types (valid MATCH_FIELDS values for
+ * MC_CMD_FILTER_OP) sorted in decreasing priority order
+ */
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_OFST 8
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_LEN 4
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MINNUM 0
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM 61
+
+
+/***********************************/
+/* MC_CMD_PARSER_DISP_RW
+ * Direct read/write of parser-dispatcher state (DICPUs and LUE) for debugging
+ */
+#define MC_CMD_PARSER_DISP_RW 0xe5
+
+/* MC_CMD_PARSER_DISP_RW_IN msgrequest */
+#define MC_CMD_PARSER_DISP_RW_IN_LEN 32
+/* identifies the target of the operation */
+#define MC_CMD_PARSER_DISP_RW_IN_TARGET_OFST 0
+/* enum: RX dispatcher CPU */
+#define MC_CMD_PARSER_DISP_RW_IN_RX_DICPU 0x0
+/* enum: TX dispatcher CPU */
+#define MC_CMD_PARSER_DISP_RW_IN_TX_DICPU 0x1
+/* enum: Lookup engine */
+#define MC_CMD_PARSER_DISP_RW_IN_LUE 0x2
+/* identifies the type of operation requested */
+#define MC_CMD_PARSER_DISP_RW_IN_OP_OFST 4
+/* enum: read a word of DICPU DMEM or a LUE entry */
+#define MC_CMD_PARSER_DISP_RW_IN_READ 0x0
+/* enum: write a word of DICPU DMEM or a LUE entry */
+#define MC_CMD_PARSER_DISP_RW_IN_WRITE 0x1
+/* enum: read-modify-write a word of DICPU DMEM (not valid for LUE) */
+#define MC_CMD_PARSER_DISP_RW_IN_RMW 0x2
+/* data memory address or LUE index */
+#define MC_CMD_PARSER_DISP_RW_IN_ADDRESS_OFST 8
+/* value to write (for DMEM writes) */
+#define MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_OFST 12
+/* XOR value (for DMEM read-modify-writes: new = (old & mask) ^ value) */
+#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_OFST 12
+/* AND mask (for DMEM read-modify-writes: new = (old & mask) ^ value) */
+#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_OFST 16
+/* value to write (for LUE writes) */
+#define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_OFST 12
+#define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_LEN 20
+
+/* MC_CMD_PARSER_DISP_RW_OUT msgresponse */
+#define MC_CMD_PARSER_DISP_RW_OUT_LEN 52
+/* value read (for DMEM reads) */
+#define MC_CMD_PARSER_DISP_RW_OUT_DMEM_READ_VALUE_OFST 0
+/* value read (for LUE reads) */
+#define MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_OFST 0
+#define MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_LEN 20
+/* up to 8 32-bit words of additional soft state from the LUE manager (the
+ * exact content is firmware-dependent and intended only for debug use)
+ */
+#define MC_CMD_PARSER_DISP_RW_OUT_LUE_MGR_STATE_OFST 20
+#define MC_CMD_PARSER_DISP_RW_OUT_LUE_MGR_STATE_LEN 32
+
+
+/***********************************/
+/* MC_CMD_GET_PF_COUNT
+ * Get number of PFs on the device.
+ */
+#define MC_CMD_GET_PF_COUNT 0xb6
+
+/* MC_CMD_GET_PF_COUNT_IN msgrequest */
+#define MC_CMD_GET_PF_COUNT_IN_LEN 0
+
+/* MC_CMD_GET_PF_COUNT_OUT msgresponse */
+#define MC_CMD_GET_PF_COUNT_OUT_LEN 1
+/* Identifies the number of PFs on the device. */
+#define MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_OFST 0
+#define MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_LEN 1
+
+
+/***********************************/
+/* MC_CMD_SET_PF_COUNT
+ * Set number of PFs on the device.
+ */
+#define MC_CMD_SET_PF_COUNT 0xb7
+
+/* MC_CMD_SET_PF_COUNT_IN msgrequest */
+#define MC_CMD_SET_PF_COUNT_IN_LEN 4
+/* New number of PFs on the device. */
+#define MC_CMD_SET_PF_COUNT_IN_PF_COUNT_OFST 0
+
+/* MC_CMD_SET_PF_COUNT_OUT msgresponse */
+#define MC_CMD_SET_PF_COUNT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_PORT_ASSIGNMENT
+ * Get port assignment for current PCI function.
+ */
+#define MC_CMD_GET_PORT_ASSIGNMENT 0xb8
+
+/* MC_CMD_GET_PORT_ASSIGNMENT_IN msgrequest */
+#define MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN 0
+
+/* MC_CMD_GET_PORT_ASSIGNMENT_OUT msgresponse */
+#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN 4
+/* Identifies the port assignment for this function. */
+#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_PORT_OFST 0
+
+
+/***********************************/
+/* MC_CMD_SET_PORT_ASSIGNMENT
+ * Set port assignment for current PCI function.
+ */
+#define MC_CMD_SET_PORT_ASSIGNMENT 0xb9
+
+/* MC_CMD_SET_PORT_ASSIGNMENT_IN msgrequest */
+#define MC_CMD_SET_PORT_ASSIGNMENT_IN_LEN 4
+/* Identifies the port assignment for this function. */
+#define MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_OFST 0
+
+/* MC_CMD_SET_PORT_ASSIGNMENT_OUT msgresponse */
+#define MC_CMD_SET_PORT_ASSIGNMENT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_ALLOC_VIS
+ * Allocate VIs for current PCI function.
+ */
+#define MC_CMD_ALLOC_VIS 0x8b
+
+/* MC_CMD_ALLOC_VIS_IN msgrequest */
+#define MC_CMD_ALLOC_VIS_IN_LEN 8
+/* The minimum number of VIs that is acceptable */
+#define MC_CMD_ALLOC_VIS_IN_MIN_VI_COUNT_OFST 0
+/* The maximum number of VIs that would be useful */
+#define MC_CMD_ALLOC_VIS_IN_MAX_VI_COUNT_OFST 4
+
+/* MC_CMD_ALLOC_VIS_OUT msgresponse */
+#define MC_CMD_ALLOC_VIS_OUT_LEN 8
+/* The number of VIs allocated on this function */
+#define MC_CMD_ALLOC_VIS_OUT_VI_COUNT_OFST 0
+/* The base absolute VI number allocated to this function. Required to
+ * correctly interpret wakeup events.
+ */
+#define MC_CMD_ALLOC_VIS_OUT_VI_BASE_OFST 4
+
+
+/***********************************/
+/* MC_CMD_FREE_VIS
+ * Free VIs for current PCI function. Any linked PIO buffers will be unlinked,
+ * but not freed.
+ */
+#define MC_CMD_FREE_VIS 0x8c
+
+/* MC_CMD_FREE_VIS_IN msgrequest */
+#define MC_CMD_FREE_VIS_IN_LEN 0
+
+/* MC_CMD_FREE_VIS_OUT msgresponse */
+#define MC_CMD_FREE_VIS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_SRIOV_CFG
+ * Get SRIOV config for this PF.
+ */
+#define MC_CMD_GET_SRIOV_CFG 0xba
+
+/* MC_CMD_GET_SRIOV_CFG_IN msgrequest */
+#define MC_CMD_GET_SRIOV_CFG_IN_LEN 0
+
+/* MC_CMD_GET_SRIOV_CFG_OUT msgresponse */
+#define MC_CMD_GET_SRIOV_CFG_OUT_LEN 20
+/* Number of VFs currently enabled. */
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_CURRENT_OFST 0
+/* Max number of VFs before sriov stride and offset may need to be changed. */
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_MAX_OFST 4
+#define MC_CMD_GET_SRIOV_CFG_OUT_FLAGS_OFST 8
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_LBN 0
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_WIDTH 1
+/* RID offset of first VF from PF. */
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_OFFSET_OFST 12
+/* RID offset of each subsequent VF from the previous. */
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_STRIDE_OFST 16
+
+
+/***********************************/
+/* MC_CMD_SET_SRIOV_CFG
+ * Set SRIOV config for this PF.
+ */
+#define MC_CMD_SET_SRIOV_CFG 0xbb
+
+/* MC_CMD_SET_SRIOV_CFG_IN msgrequest */
+#define MC_CMD_SET_SRIOV_CFG_IN_LEN 20
+/* Number of VFs currently enabled. */
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_OFST 0
+/* Max number of VFs before sriov stride and offset may need to be changed. */
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_OFST 4
+#define MC_CMD_SET_SRIOV_CFG_IN_FLAGS_OFST 8
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_LBN 0
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_WIDTH 1
+/* RID offset of first VF from PF, or 0 for no change, or
+ * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate an offset.
+ */
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_OFST 12
+/* RID offset of each subsequent VF from the previous, 0 for no change, or
+ * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate a stride.
+ */
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_OFST 16
+
+/* MC_CMD_SET_SRIOV_CFG_OUT msgresponse */
+#define MC_CMD_SET_SRIOV_CFG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_VI_ALLOC_INFO
+ * Get information about number of VI's and base VI number allocated to this
+ * function.
+ */
+#define MC_CMD_GET_VI_ALLOC_INFO 0x8d
+
+/* MC_CMD_GET_VI_ALLOC_INFO_IN msgrequest */
+#define MC_CMD_GET_VI_ALLOC_INFO_IN_LEN 0
+
+/* MC_CMD_GET_VI_ALLOC_INFO_OUT msgresponse */
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_LEN 8
+/* The number of VIs allocated on this function */
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_OFST 0
+/* The base absolute VI number allocated to this function. Required to
+ * correctly interpret wakeup events.
+ */
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_OFST 4
+
+
+/***********************************/
+/* MC_CMD_DUMP_VI_STATE
+ * For CmdClient use. Dump pertinent information on a specific absolute VI.
+ */
+#define MC_CMD_DUMP_VI_STATE 0x8e
+
+/* MC_CMD_DUMP_VI_STATE_IN msgrequest */
+#define MC_CMD_DUMP_VI_STATE_IN_LEN 4
+/* The VI number to query. */
+#define MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_OFST 0
+
+/* MC_CMD_DUMP_VI_STATE_OUT msgresponse */
+#define MC_CMD_DUMP_VI_STATE_OUT_LEN 96
+/* The PF part of the function owning this VI. */
+#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_PF_OFST 0
+#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_PF_LEN 2
+/* The VF part of the function owning this VI. */
+#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_VF_OFST 2
+#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_VF_LEN 2
+/* Base of VIs allocated to this function. */
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_BASE_OFST 4
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_BASE_LEN 2
+/* Count of VIs allocated to the owner function. */
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_COUNT_OFST 6
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_COUNT_LEN 2
+/* Base interrupt vector allocated to this function. */
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_BASE_OFST 8
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_BASE_LEN 2
+/* Number of interrupt vectors allocated to this function. */
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_COUNT_OFST 10
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_COUNT_LEN 2
+/* Raw evq ptr table data. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_OFST 12
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LO_OFST 12
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_HI_OFST 16
+/* Raw evq timer table data. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_OFST 20
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LO_OFST 20
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_OFST 24
+/* Combined metadata field. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_OFST 28
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_LBN 0
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_WIDTH 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_LBN 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_LBN 24
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_WIDTH 8
+/* TXDPCPU raw table data for queue. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_OFST 32
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LO_OFST 32
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_HI_OFST 36
+/* TXDPCPU raw table data for queue. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_OFST 40
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LO_OFST 40
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_HI_OFST 44
+/* TXDPCPU raw table data for queue. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_OFST 48
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LO_OFST 48
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_HI_OFST 52
+/* Combined metadata field. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_OFST 56
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LO_OFST 56
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_HI_OFST 60
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_LBN 0
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_WIDTH 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_LBN 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_LBN 24
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_LBN 32
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_LBN 40
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_WIDTH 24
+/* RXDPCPU raw table data for queue. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_OFST 64
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LO_OFST 64
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_HI_OFST 68
+/* RXDPCPU raw table data for queue. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_OFST 72
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LO_OFST 72
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_HI_OFST 76
+/* Reserved, currently 0. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_OFST 80
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LO_OFST 80
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_HI_OFST 84
+/* Combined metadata field. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_OFST 88
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LO_OFST 88
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_HI_OFST 92
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_LBN 0
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_WIDTH 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_LBN 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_LBN 24
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_LBN 32
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_WIDTH 8
+
+
+/***********************************/
+/* MC_CMD_ALLOC_PIOBUF
+ * Allocate a push I/O buffer for later use with a tx queue.
+ */
+#define MC_CMD_ALLOC_PIOBUF 0x8f
+
+/* MC_CMD_ALLOC_PIOBUF_IN msgrequest */
+#define MC_CMD_ALLOC_PIOBUF_IN_LEN 0
+
+/* MC_CMD_ALLOC_PIOBUF_OUT msgresponse */
+#define MC_CMD_ALLOC_PIOBUF_OUT_LEN 4
+/* Handle for allocated push I/O buffer. */
+#define MC_CMD_ALLOC_PIOBUF_OUT_PIOBUF_HANDLE_OFST 0
+
+
+/***********************************/
+/* MC_CMD_FREE_PIOBUF
+ * Free a push I/O buffer.
+ */
+#define MC_CMD_FREE_PIOBUF 0x90
+
+/* MC_CMD_FREE_PIOBUF_IN msgrequest */
+#define MC_CMD_FREE_PIOBUF_IN_LEN 4
+/* Handle for allocated push I/O buffer. */
+#define MC_CMD_FREE_PIOBUF_IN_PIOBUF_HANDLE_OFST 0
+
+/* MC_CMD_FREE_PIOBUF_OUT msgresponse */
+#define MC_CMD_FREE_PIOBUF_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_VI_TLP_PROCESSING
+ * Get TLP steering and ordering information for a VI.
+ */
+#define MC_CMD_GET_VI_TLP_PROCESSING 0xb0
+
+/* MC_CMD_GET_VI_TLP_PROCESSING_IN msgrequest */
+#define MC_CMD_GET_VI_TLP_PROCESSING_IN_LEN 4
+/* VI number to get information for. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0
+
+/* MC_CMD_GET_VI_TLP_PROCESSING_OUT msgresponse */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_LEN 4
+/* Transaction processing steering hint 1 for use with the Rx Queue. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG1_RX_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG1_RX_LEN 1
+/* Transaction processing steering hint 2 for use with the Ev Queue. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG2_EV_OFST 1
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG2_EV_LEN 1
+/* Use Relaxed ordering model for TLPs on this VI. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_RELAXED_ORDERING_LBN 16
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_RELAXED_ORDERING_WIDTH 1
+/* Use ID based ordering for TLPs on this VI. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_ID_BASED_ORDERING_LBN 17
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_ID_BASED_ORDERING_WIDTH 1
+/* Set no snoop bit for TLPs on this VI. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_NO_SNOOP_LBN 18
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_NO_SNOOP_WIDTH 1
+/* Enable TPH for TLPs on this VI. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_LBN 19
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_WIDTH 1
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_DATA_OFST 0
+
+
+/***********************************/
+/* MC_CMD_SET_VI_TLP_PROCESSING
+ * Set TLP steering and ordering information for a VI.
+ */
+#define MC_CMD_SET_VI_TLP_PROCESSING 0xb1
+
+/* MC_CMD_SET_VI_TLP_PROCESSING_IN msgrequest */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_LEN 8
+/* VI number to set information for. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0
+/* Transaction processing steering hint 1 for use with the Rx Queue. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_LEN 1
+/* Transaction processing steering hint 2 for use with the Ev Queue. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG2_EV_OFST 5
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG2_EV_LEN 1
+/* Use Relaxed ordering model for TLPs on this VI. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_RELAXED_ORDERING_LBN 48
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_RELAXED_ORDERING_WIDTH 1
+/* Use ID based ordering for TLPs on this VI. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_ID_BASED_ORDERING_LBN 49
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_ID_BASED_ORDERING_WIDTH 1
+/* Set the no snoop bit for TLPs on this VI. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_NO_SNOOP_LBN 50
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_NO_SNOOP_WIDTH 1
+/* Enable TPH for TLPs on this VI. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_LBN 51
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_WIDTH 1
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_DATA_OFST 4
+
+/* MC_CMD_SET_VI_TLP_PROCESSING_OUT msgresponse */
+#define MC_CMD_SET_VI_TLP_PROCESSING_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_TLP_PROCESSING_GLOBALS
+ * Get global PCIe steering and transaction processing configuration.
+ */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS 0xbc
+
+/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN msgrequest */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_LEN 4
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
+/* enum: MISC. */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_MISC 0x0
+/* enum: IDO. */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_IDO 0x1
+/* enum: RO. */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_RO 0x2
+/* enum: TPH Type. */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_TPH_TYPE 0x3
+
+/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT msgresponse */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_LEN 8
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_GLOBAL_CATEGORY_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */
+/* Amalgamated TLP info word. */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_WORD_OFST 4
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_LBN 0
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_LBN 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_WIDTH 31
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_DL_EN_LBN 0
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_DL_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_TX_EN_LBN 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_TX_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_EV_EN_LBN 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_EV_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_RX_EN_LBN 3
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_RX_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_SPARE_LBN 4
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_SPARE_WIDTH 28
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_RXDMA_EN_LBN 0
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_RXDMA_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_TXDMA_EN_LBN 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_TXDMA_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_DL_EN_LBN 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_DL_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_SPARE_LBN 3
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_SPARE_WIDTH 29
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_MSIX_LBN 0
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_MSIX_WIDTH 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_DL_LBN 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_DL_WIDTH 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_TX_LBN 4
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_TX_WIDTH 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_EV_LBN 6
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_EV_WIDTH 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_RX_LBN 8
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_RX_WIDTH 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TLP_TYPE_SPARE_LBN 9
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TLP_TYPE_SPARE_WIDTH 23
+
+
+/***********************************/
+/* MC_CMD_SET_TLP_PROCESSING_GLOBALS
+ * Set global PCIe steering and transaction processing configuration.
+ */
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS 0xbd
+
+/* MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN msgrequest */
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_LEN 8
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_GET_TLP_PROCESSING_GLOBALS/MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */
+/* Amalgamated TLP info word. */
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_WORD_OFST 4
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_LBN 0
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_LBN 0
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_TX_EN_LBN 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_TX_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_EV_EN_LBN 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_EV_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_RX_EN_LBN 3
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_RX_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_RXDMA_EN_LBN 0
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_RXDMA_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_TXDMA_EN_LBN 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_TXDMA_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_DL_EN_LBN 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_DL_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_MSIX_LBN 0
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_MSIX_WIDTH 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_DL_LBN 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_DL_WIDTH 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_TX_LBN 4
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_TX_WIDTH 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_EV_LBN 6
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_EV_WIDTH 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_RX_LBN 8
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_RX_WIDTH 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_SPARE_LBN 10
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_SPARE_WIDTH 22
+
+/* MC_CMD_SET_TLP_PROCESSING_GLOBALS_OUT msgresponse */
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SATELLITE_DOWNLOAD
+ * Download a new set of images to the satellite CPUs from the host.
+ */
+#define MC_CMD_SATELLITE_DOWNLOAD 0x91
+
+/* MC_CMD_SATELLITE_DOWNLOAD_IN msgrequest: The reset requirements for the CPUs
+ * are subtle, and so downloads must proceed in a number of phases.
+ *
+ * 1) PHASE_RESET with a target of TARGET_ALL and chunk ID/length of 0.
+ *
+ * 2) PHASE_IMEMS for each of the IMEM targets (target IDs 0-11). Each download
+ * may consist of multiple chunks. The final chunk (with CHUNK_ID_LAST) should
+ * be a checksum (a simple 32-bit sum) of the transferred data. An individual
+ * download may be aborted using CHUNK_ID_ABORT.
+ *
+ * 3) PHASE_VECTORS for each of the vector table targets (target IDs 12-15),
+ * similar to PHASE_IMEMS.
+ *
+ * 4) PHASE_READY with a target of TARGET_ALL and chunk ID/length of 0.
+ *
+ * After any error (a requested abort is not considered to be an error) the
+ * sequence must be restarted from PHASE_RESET.
+ */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_LENMIN 20
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_LENMAX 252
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_LEN(num) (16+4*(num))
+/* Download phase. (Note: the IDLE phase is used internally and is never valid
+ * in a command from the host.)
+ */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_OFST 0
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IDLE 0x0 /* enum */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_RESET 0x1 /* enum */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IMEMS 0x2 /* enum */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_VECTORS 0x3 /* enum */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_READY 0x4 /* enum */
+/* Target for download. (These match the blob numbers defined in
+ * mc_flash_layout.h.)
+ */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_OFST 4
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_TEXT 0x0
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_TEXT 0x1
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDP_TEXT 0x2
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDP_TEXT 0x3
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT 0x4
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT_CFG 0x5
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT 0x6
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT_CFG 0x7
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_PGM 0x8
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_SL_PGM 0x9
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_PGM 0xa
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_SL_PGM 0xb
+/* enum: Valid in phase 3 (PHASE_VECTORS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL0 0xc
+/* enum: Valid in phase 3 (PHASE_VECTORS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL0 0xd
+/* enum: Valid in phase 3 (PHASE_VECTORS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL1 0xe
+/* enum: Valid in phase 3 (PHASE_VECTORS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL1 0xf
+/* enum: Valid in phases 1 (PHASE_RESET) and 4 (PHASE_READY) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_ALL 0xffffffff
+/* Chunk ID, or CHUNK_ID_LAST or CHUNK_ID_ABORT */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_OFST 8
+/* enum: Last chunk, containing checksum rather than data */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_LAST 0xffffffff
+/* enum: Abort download of this item */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_ABORT 0xfffffffe
+/* Length of this chunk in bytes */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_LEN_OFST 12
+/* Data for this chunk */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_OFST 16
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_LEN 4
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_MINNUM 1
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_MAXNUM 59
+
+/* MC_CMD_SATELLITE_DOWNLOAD_OUT msgresponse */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_LEN 8
+/* Same as MC_CMD_ERR field, but included as 0 in success cases */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_RESULT_OFST 0
+/* Extra status information */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_INFO_OFST 4
+/* enum: Code download OK, completed. */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_COMPLETE 0x0
+/* enum: Code download aborted as requested. */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_ABORTED 0x1
+/* enum: Code download OK so far, send next chunk. */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_NEXT_CHUNK 0x2
+/* enum: Download phases out of sequence */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_PHASE 0x100
+/* enum: Bad target for this phase */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_TARGET 0x101
+/* enum: Chunk ID out of sequence */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_ID 0x200
+/* enum: Chunk length zero or too large */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_LEN 0x201
+/* enum: Checksum was incorrect */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHECKSUM 0x300
+
+
+/***********************************/
+/* MC_CMD_GET_CAPABILITIES
+ * Get device capabilities.
+ *
+ * This is supplementary to the MC_CMD_GET_BOARD_CFG command, and intended to
+ * reference inherent device capabilities as opposed to current NVRAM config.
+ */
+#define MC_CMD_GET_CAPABILITIES 0xbe
+
+/* MC_CMD_GET_CAPABILITIES_IN msgrequest */
+#define MC_CMD_GET_CAPABILITIES_IN_LEN 0
+
+/* MC_CMD_GET_CAPABILITIES_OUT msgresponse */
+#define MC_CMD_GET_CAPABILITIES_OUT_LEN 20
+/* First word of flags. */
+#define MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_LBN 19
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_VLAN_STRIPPING_LBN 20
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN 21
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_0_LBN 22
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN 23
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_TIMESTAMP_LBN 24
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN 25
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+/* RxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_OFST 4
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: RXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* TxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_OFST 6
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: TXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_OFST 8
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 /* enum */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_VSWITCH 0x3 /* enum */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 /* enum */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_OFST 10
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 /* enum */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_VSWITCH 0x3 /* enum */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 /* enum */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* Hardware capabilities of NIC */
+#define MC_CMD_GET_CAPABILITIES_OUT_HW_CAPABILITIES_OFST 12
+/* Licensed capabilities */
+#define MC_CMD_GET_CAPABILITIES_OUT_LICENSE_CAPABILITIES_OFST 16
+
+
+/***********************************/
+/* MC_CMD_V2_EXTN
+ * Encapsulation for a v2 extended command
+ */
+#define MC_CMD_V2_EXTN 0x7f
+
+/* MC_CMD_V2_EXTN_IN msgrequest */
+#define MC_CMD_V2_EXTN_IN_LEN 4
+/* the extended command number */
+#define MC_CMD_V2_EXTN_IN_EXTENDED_CMD_LBN 0
+#define MC_CMD_V2_EXTN_IN_EXTENDED_CMD_WIDTH 15
+#define MC_CMD_V2_EXTN_IN_UNUSED_LBN 15
+#define MC_CMD_V2_EXTN_IN_UNUSED_WIDTH 1
+/* the actual length of the encapsulated command (which is not in the v1
+ * header)
+ */
+#define MC_CMD_V2_EXTN_IN_ACTUAL_LEN_LBN 16
+#define MC_CMD_V2_EXTN_IN_ACTUAL_LEN_WIDTH 10
+#define MC_CMD_V2_EXTN_IN_UNUSED2_LBN 26
+#define MC_CMD_V2_EXTN_IN_UNUSED2_WIDTH 6
+
+
+/***********************************/
+/* MC_CMD_TCM_BUCKET_ALLOC
+ * Allocate a pacer bucket (for qau rp or a snapper test)
+ */
+#define MC_CMD_TCM_BUCKET_ALLOC 0xb2
+
+/* MC_CMD_TCM_BUCKET_ALLOC_IN msgrequest */
+#define MC_CMD_TCM_BUCKET_ALLOC_IN_LEN 0
+
+/* MC_CMD_TCM_BUCKET_ALLOC_OUT msgresponse */
+#define MC_CMD_TCM_BUCKET_ALLOC_OUT_LEN 4
+/* the bucket id */
+#define MC_CMD_TCM_BUCKET_ALLOC_OUT_BUCKET_OFST 0
+
+
+/***********************************/
+/* MC_CMD_TCM_BUCKET_FREE
+ * Free a pacer bucket
+ */
+#define MC_CMD_TCM_BUCKET_FREE 0xb3
+
+/* MC_CMD_TCM_BUCKET_FREE_IN msgrequest */
+#define MC_CMD_TCM_BUCKET_FREE_IN_LEN 4
+/* the bucket id */
+#define MC_CMD_TCM_BUCKET_FREE_IN_BUCKET_OFST 0
+
+/* MC_CMD_TCM_BUCKET_FREE_OUT msgresponse */
+#define MC_CMD_TCM_BUCKET_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_TCM_BUCKET_INIT
+ * Initialise pacer bucket with a given rate
+ */
+#define MC_CMD_TCM_BUCKET_INIT 0xb4
+
+/* MC_CMD_TCM_BUCKET_INIT_IN msgrequest */
+#define MC_CMD_TCM_BUCKET_INIT_IN_LEN 8
+/* the bucket id */
+#define MC_CMD_TCM_BUCKET_INIT_IN_BUCKET_OFST 0
+/* the rate in mbps */
+#define MC_CMD_TCM_BUCKET_INIT_IN_RATE_OFST 4
+
+/* MC_CMD_TCM_BUCKET_INIT_OUT msgresponse */
+#define MC_CMD_TCM_BUCKET_INIT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_TCM_TXQ_INIT
+ * Initialise txq in pacer with given options or set options
+ */
+#define MC_CMD_TCM_TXQ_INIT 0xb5
+
+/* MC_CMD_TCM_TXQ_INIT_IN msgrequest */
+#define MC_CMD_TCM_TXQ_INIT_IN_LEN 28
+/* the txq id */
+#define MC_CMD_TCM_TXQ_INIT_IN_QID_OFST 0
+/* the static priority associated with the txq */
+#define MC_CMD_TCM_TXQ_INIT_IN_LABEL_OFST 4
+/* bitmask of the priority queues this txq is inserted into */
+#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_OFST 8
+/* the reaction point (RP) bucket */
+#define MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_OFST 12
+/* an already reserved bucket (typically set to bucket associated with outer
+ * vswitch)
+ */
+#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT1_OFST 16
+/* an already reserved bucket (typically set to bucket associated with inner
+ * vswitch)
+ */
+#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT2_OFST 20
+/* the min bucket (typically for ETS/minimum bandwidth) */
+#define MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_OFST 24
+
+/* MC_CMD_TCM_TXQ_INIT_OUT msgresponse */
+#define MC_CMD_TCM_TXQ_INIT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_LINK_PIOBUF
+ * Link a push I/O buffer to a TxQ
+ */
+#define MC_CMD_LINK_PIOBUF 0x92
+
+/* MC_CMD_LINK_PIOBUF_IN msgrequest */
+#define MC_CMD_LINK_PIOBUF_IN_LEN 8
+/* Handle for allocated push I/O buffer. */
+#define MC_CMD_LINK_PIOBUF_IN_PIOBUF_HANDLE_OFST 0
+/* Function Local Instance (VI) number. */
+#define MC_CMD_LINK_PIOBUF_IN_TXQ_INSTANCE_OFST 4
+
+/* MC_CMD_LINK_PIOBUF_OUT msgresponse */
+#define MC_CMD_LINK_PIOBUF_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_UNLINK_PIOBUF
+ * Unlink a push I/O buffer from a TxQ
+ */
+#define MC_CMD_UNLINK_PIOBUF 0x93
+
+/* MC_CMD_UNLINK_PIOBUF_IN msgrequest */
+#define MC_CMD_UNLINK_PIOBUF_IN_LEN 4
+/* Function Local Instance (VI) number. */
+#define MC_CMD_UNLINK_PIOBUF_IN_TXQ_INSTANCE_OFST 0
+
+/* MC_CMD_UNLINK_PIOBUF_OUT msgresponse */
+#define MC_CMD_UNLINK_PIOBUF_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VSWITCH_ALLOC
+ * allocate and initialise a v-switch.
+ */
+#define MC_CMD_VSWITCH_ALLOC 0x94
+
+/* MC_CMD_VSWITCH_ALLOC_IN msgrequest */
+#define MC_CMD_VSWITCH_ALLOC_IN_LEN 16
+/* The port to connect to the v-switch's upstream port. */
+#define MC_CMD_VSWITCH_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+/* The type of v-switch to create. */
+#define MC_CMD_VSWITCH_ALLOC_IN_TYPE_OFST 4
+/* enum: VLAN */
+#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VLAN 0x1
+/* enum: VEB */
+#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEB 0x2
+/* enum: VEPA */
+#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEPA 0x3
+/* Flags controlling v-port creation */
+#define MC_CMD_VSWITCH_ALLOC_IN_FLAGS_OFST 8
+#define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
+#define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
+/* The number of VLAN tags to support. */
+#define MC_CMD_VSWITCH_ALLOC_IN_NUM_VLAN_TAGS_OFST 12
+
+/* MC_CMD_VSWITCH_ALLOC_OUT msgresponse */
+#define MC_CMD_VSWITCH_ALLOC_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VSWITCH_FREE
+ * de-allocate a v-switch.
+ */
+#define MC_CMD_VSWITCH_FREE 0x95
+
+/* MC_CMD_VSWITCH_FREE_IN msgrequest */
+#define MC_CMD_VSWITCH_FREE_IN_LEN 4
+/* The port to which the v-switch is connected. */
+#define MC_CMD_VSWITCH_FREE_IN_UPSTREAM_PORT_ID_OFST 0
+
+/* MC_CMD_VSWITCH_FREE_OUT msgresponse */
+#define MC_CMD_VSWITCH_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VPORT_ALLOC
+ * allocate a v-port.
+ */
+#define MC_CMD_VPORT_ALLOC 0x96
+
+/* MC_CMD_VPORT_ALLOC_IN msgrequest */
+#define MC_CMD_VPORT_ALLOC_IN_LEN 20
+/* The port to which the v-switch is connected. */
+#define MC_CMD_VPORT_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+/* The type of the new v-port. */
+#define MC_CMD_VPORT_ALLOC_IN_TYPE_OFST 4
+/* enum: VLAN (obsolete) */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VLAN 0x1
+/* enum: VEB (obsolete) */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VEB 0x2
+/* enum: VEPA (obsolete) */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VEPA 0x3
+/* enum: A normal v-port receives packets which match a specified MAC and/or
+ * VLAN.
+ */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL 0x4
+/* enum: An expansion v-port packets traffic which don't match any other
+ * v-port.
+ */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_EXPANSION 0x5
+/* enum: An test v-port receives packets which match any filters installed by
+ * its downstream components.
+ */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_TEST 0x6
+/* Flags controlling v-port creation */
+#define MC_CMD_VPORT_ALLOC_IN_FLAGS_OFST 8
+#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
+#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
+/* The number of VLAN tags to insert/remove. */
+#define MC_CMD_VPORT_ALLOC_IN_NUM_VLAN_TAGS_OFST 12
+/* The actual VLAN tags to insert/remove */
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_OFST 16
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_LBN 0
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_WIDTH 16
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_1_LBN 16
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_1_WIDTH 16
+
+/* MC_CMD_VPORT_ALLOC_OUT msgresponse */
+#define MC_CMD_VPORT_ALLOC_OUT_LEN 4
+/* The handle of the new v-port */
+#define MC_CMD_VPORT_ALLOC_OUT_VPORT_ID_OFST 0
+
+
+/***********************************/
+/* MC_CMD_VPORT_FREE
+ * de-allocate a v-port.
+ */
+#define MC_CMD_VPORT_FREE 0x97
+
+/* MC_CMD_VPORT_FREE_IN msgrequest */
+#define MC_CMD_VPORT_FREE_IN_LEN 4
+/* The handle of the v-port */
+#define MC_CMD_VPORT_FREE_IN_VPORT_ID_OFST 0
+
+/* MC_CMD_VPORT_FREE_OUT msgresponse */
+#define MC_CMD_VPORT_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VADAPTOR_ALLOC
+ * allocate a v-adaptor.
+ */
+#define MC_CMD_VADAPTOR_ALLOC 0x98
+
+/* MC_CMD_VADAPTOR_ALLOC_IN msgrequest */
+#define MC_CMD_VADAPTOR_ALLOC_IN_LEN 16
+/* The port to connect to the v-adaptor's port. */
+#define MC_CMD_VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+/* Flags controlling v-adaptor creation */
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAGS_OFST 8
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_LBN 0
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_WIDTH 1
+/* The number of VLAN tags to strip on receive */
+#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLANS_OFST 12
+
+/* MC_CMD_VADAPTOR_ALLOC_OUT msgresponse */
+#define MC_CMD_VADAPTOR_ALLOC_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VADAPTOR_FREE
+ * de-allocate a v-adaptor.
+ */
+#define MC_CMD_VADAPTOR_FREE 0x99
+
+/* MC_CMD_VADAPTOR_FREE_IN msgrequest */
+#define MC_CMD_VADAPTOR_FREE_IN_LEN 4
+/* The port to which the v-adaptor is connected. */
+#define MC_CMD_VADAPTOR_FREE_IN_UPSTREAM_PORT_ID_OFST 0
+
+/* MC_CMD_VADAPTOR_FREE_OUT msgresponse */
+#define MC_CMD_VADAPTOR_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_EVB_PORT_ASSIGN
+ * assign a port to a PCI function.
+ */
+#define MC_CMD_EVB_PORT_ASSIGN 0x9a
+
+/* MC_CMD_EVB_PORT_ASSIGN_IN msgrequest */
+#define MC_CMD_EVB_PORT_ASSIGN_IN_LEN 8
+/* The port to assign. */
+#define MC_CMD_EVB_PORT_ASSIGN_IN_PORT_ID_OFST 0
+/* The target function to modify. */
+#define MC_CMD_EVB_PORT_ASSIGN_IN_FUNCTION_OFST 4
+#define MC_CMD_EVB_PORT_ASSIGN_IN_PF_LBN 0
+#define MC_CMD_EVB_PORT_ASSIGN_IN_PF_WIDTH 16
+#define MC_CMD_EVB_PORT_ASSIGN_IN_VF_LBN 16
+#define MC_CMD_EVB_PORT_ASSIGN_IN_VF_WIDTH 16
+
+/* MC_CMD_EVB_PORT_ASSIGN_OUT msgresponse */
+#define MC_CMD_EVB_PORT_ASSIGN_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RDWR_A64_REGIONS
+ * Assign the 64 bit region addresses.
+ */
+#define MC_CMD_RDWR_A64_REGIONS 0x9b
+
+/* MC_CMD_RDWR_A64_REGIONS_IN msgrequest */
+#define MC_CMD_RDWR_A64_REGIONS_IN_LEN 17
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION0_OFST 0
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION1_OFST 4
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION2_OFST 8
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION3_OFST 12
+/* Write enable bits 0-3, set to write, clear to read. */
+#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_LBN 128
+#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_WIDTH 4
+#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_BYTE_OFST 16
+#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_BYTE_LEN 1
+
+/* MC_CMD_RDWR_A64_REGIONS_OUT msgresponse: This data always included
+ * regardless of state of write bits in the request.
+ */
+#define MC_CMD_RDWR_A64_REGIONS_OUT_LEN 16
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION0_OFST 0
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION1_OFST 4
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION2_OFST 8
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION3_OFST 12
+
+
+/***********************************/
+/* MC_CMD_ONLOAD_STACK_ALLOC
+ * Allocate an Onload stack ID.
+ */
+#define MC_CMD_ONLOAD_STACK_ALLOC 0x9c
+
+/* MC_CMD_ONLOAD_STACK_ALLOC_IN msgrequest */
+#define MC_CMD_ONLOAD_STACK_ALLOC_IN_LEN 4
+/* The handle of the owning upstream port */
+#define MC_CMD_ONLOAD_STACK_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+
+/* MC_CMD_ONLOAD_STACK_ALLOC_OUT msgresponse */
+#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_LEN 4
+/* The handle of the new Onload stack */
+#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_ONLOAD_STACK_ID_OFST 0
+
+
+/***********************************/
+/* MC_CMD_ONLOAD_STACK_FREE
+ * Free an Onload stack ID.
+ */
+#define MC_CMD_ONLOAD_STACK_FREE 0x9d
+
+/* MC_CMD_ONLOAD_STACK_FREE_IN msgrequest */
+#define MC_CMD_ONLOAD_STACK_FREE_IN_LEN 4
+/* The handle of the Onload stack */
+#define MC_CMD_ONLOAD_STACK_FREE_IN_ONLOAD_STACK_ID_OFST 0
+
+/* MC_CMD_ONLOAD_STACK_FREE_OUT msgresponse */
+#define MC_CMD_ONLOAD_STACK_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_ALLOC
+ * Allocate an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC 0x9e
+
+/* MC_CMD_RSS_CONTEXT_ALLOC_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN 12
+/* The handle of the owning upstream port */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+/* The type of context to allocate */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_OFST 4
+/* enum: Allocate a context for exclusive use. The key and indirection table
+ * must be explicitly configured.
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE 0x0
+/* enum: Allocate a context for shared use; this will spread across a range of
+ * queues, but the key and indirection table are pre-configured and may not be
+ * changed. For this mode, NUM_QUEUES must 2, 4, 8, 16, 32 or 64.
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED 0x1
+/* Number of queues spanned by this context, in the range 1-64; valid offsets
+ * in the indirection table will be in the range 0 to NUM_QUEUES-1.
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_NUM_QUEUES_OFST 8
+
+/* MC_CMD_RSS_CONTEXT_ALLOC_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN 4
+/* The handle of the new RSS context */
+#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_OFST 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_FREE
+ * Free an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_FREE 0x9f
+
+/* MC_CMD_RSS_CONTEXT_FREE_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_FREE_IN_LEN 4
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID_OFST 0
+
+/* MC_CMD_RSS_CONTEXT_FREE_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_SET_KEY
+ * Set the Toeplitz hash key for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_SET_KEY 0xa0
+
+/* MC_CMD_RSS_CONTEXT_SET_KEY_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN 44
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID_OFST 0
+/* The 40-byte Toeplitz hash key (TBD endianness issues?) */
+#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_OFST 4
+#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN 40
+
+/* MC_CMD_RSS_CONTEXT_SET_KEY_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_SET_KEY_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_GET_KEY
+ * Get the Toeplitz hash key for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_GET_KEY 0xa1
+
+/* MC_CMD_RSS_CONTEXT_GET_KEY_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN 4
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID_OFST 0
+
+/* MC_CMD_RSS_CONTEXT_GET_KEY_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN 44
+/* The 40-byte Toeplitz hash key (TBD endianness issues?) */
+#define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY_OFST 4
+#define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY_LEN 40
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_SET_TABLE
+ * Set the indirection table for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_SET_TABLE 0xa2
+
+/* MC_CMD_RSS_CONTEXT_SET_TABLE_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN 132
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID_OFST 0
+/* The 128-byte indirection table (1 byte per entry) */
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_OFST 4
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN 128
+
+/* MC_CMD_RSS_CONTEXT_SET_TABLE_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_GET_TABLE
+ * Get the indirection table for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_GET_TABLE 0xa3
+
+/* MC_CMD_RSS_CONTEXT_GET_TABLE_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN 4
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID_OFST 0
+
+/* MC_CMD_RSS_CONTEXT_GET_TABLE_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN 132
+/* The 128-byte indirection table (1 byte per entry) */
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_OFST 4
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_LEN 128
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_SET_FLAGS
+ * Set various control flags for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS 0xe1
+
+/* MC_CMD_RSS_CONTEXT_SET_FLAGS_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN 8
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0
+/* Hash control flags */
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_OFST 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_LBN 0
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN_LBN 1
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN_LBN 2
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_LBN 3
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_WIDTH 1
+
+/* MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_GET_FLAGS
+ * Get various control flags for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS 0xe2
+
+/* MC_CMD_RSS_CONTEXT_GET_FLAGS_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_LEN 4
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0
+
+/* MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN 8
+/* Hash control flags */
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN 0
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_LBN 1
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_LBN 2
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_LBN 3
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_WIDTH 1
+
+
+/***********************************/
+/* MC_CMD_DOT1P_MAPPING_ALLOC
+ * Allocate a .1p mapping.
+ */
+#define MC_CMD_DOT1P_MAPPING_ALLOC 0xa4
+
+/* MC_CMD_DOT1P_MAPPING_ALLOC_IN msgrequest */
+#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_LEN 8
+/* The handle of the owning upstream port */
+#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+/* Number of queues spanned by this mapping, in the range 1-64; valid fixed
+ * offsets in the mapping table will be in the range 0 to NUM_QUEUES-1, and
+ * referenced RSS contexts must span no more than this number.
+ */
+#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_NUM_QUEUES_OFST 4
+
+/* MC_CMD_DOT1P_MAPPING_ALLOC_OUT msgresponse */
+#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_LEN 4
+/* The handle of the new .1p mapping */
+#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_OFST 0
+
+
+/***********************************/
+/* MC_CMD_DOT1P_MAPPING_FREE
+ * Free a .1p mapping.
+ */
+#define MC_CMD_DOT1P_MAPPING_FREE 0xa5
+
+/* MC_CMD_DOT1P_MAPPING_FREE_IN msgrequest */
+#define MC_CMD_DOT1P_MAPPING_FREE_IN_LEN 4
+/* The handle of the .1p mapping */
+#define MC_CMD_DOT1P_MAPPING_FREE_IN_DOT1P_MAPPING_ID_OFST 0
+
+/* MC_CMD_DOT1P_MAPPING_FREE_OUT msgresponse */
+#define MC_CMD_DOT1P_MAPPING_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_DOT1P_MAPPING_SET_TABLE
+ * Set the mapping table for a .1p mapping.
+ */
+#define MC_CMD_DOT1P_MAPPING_SET_TABLE 0xa6
+
+/* MC_CMD_DOT1P_MAPPING_SET_TABLE_IN msgrequest */
+#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_LEN 36
+/* The handle of the .1p mapping */
+#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0
+/* Per-priority mappings (1 32-bit word per entry - an offset or RSS context
+ * handle)
+ */
+#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_MAPPING_TABLE_OFST 4
+#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_MAPPING_TABLE_LEN 32
+
+/* MC_CMD_DOT1P_MAPPING_SET_TABLE_OUT msgresponse */
+#define MC_CMD_DOT1P_MAPPING_SET_TABLE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_DOT1P_MAPPING_GET_TABLE
+ * Get the mapping table for a .1p mapping.
+ */
+#define MC_CMD_DOT1P_MAPPING_GET_TABLE 0xa7
+
+/* MC_CMD_DOT1P_MAPPING_GET_TABLE_IN msgrequest */
+#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_LEN 4
+/* The handle of the .1p mapping */
+#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0
+
+/* MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT msgresponse */
+#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_LEN 36
+/* Per-priority mappings (1 32-bit word per entry - an offset or RSS context
+ * handle)
+ */
+#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_MAPPING_TABLE_OFST 4
+#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_MAPPING_TABLE_LEN 32
+
+
+/***********************************/
+/* MC_CMD_GET_VECTOR_CFG
+ * Get Interrupt Vector config for this PF.
+ */
+#define MC_CMD_GET_VECTOR_CFG 0xbf
+
+/* MC_CMD_GET_VECTOR_CFG_IN msgrequest */
+#define MC_CMD_GET_VECTOR_CFG_IN_LEN 0
+
+/* MC_CMD_GET_VECTOR_CFG_OUT msgresponse */
+#define MC_CMD_GET_VECTOR_CFG_OUT_LEN 12
+/* Base absolute interrupt vector number. */
+#define MC_CMD_GET_VECTOR_CFG_OUT_VEC_BASE_OFST 0
+/* Number of interrupt vectors allocate to this PF. */
+#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_PF_OFST 4
+/* Number of interrupt vectors to allocate per VF. */
+#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_VF_OFST 8
+
+
+/***********************************/
+/* MC_CMD_SET_VECTOR_CFG
+ * Set Interrupt Vector config for this PF.
+ */
+#define MC_CMD_SET_VECTOR_CFG 0xc0
+
+/* MC_CMD_SET_VECTOR_CFG_IN msgrequest */
+#define MC_CMD_SET_VECTOR_CFG_IN_LEN 12
+/* Base absolute interrupt vector number, or MC_CMD_RESOURCE_INSTANCE_ANY to
+ * let the system find a suitable base.
+ */
+#define MC_CMD_SET_VECTOR_CFG_IN_VEC_BASE_OFST 0
+/* Number of interrupt vectors allocate to this PF. */
+#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_PF_OFST 4
+/* Number of interrupt vectors to allocate per VF. */
+#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_VF_OFST 8
+
+/* MC_CMD_SET_VECTOR_CFG_OUT msgresponse */
+#define MC_CMD_SET_VECTOR_CFG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RMON_RX_CLASS_STATS
+ * Retrieve rmon rx class statistics
+ */
+#define MC_CMD_RMON_RX_CLASS_STATS 0xc3
+
+/* MC_CMD_RMON_RX_CLASS_STATS_IN msgrequest */
+#define MC_CMD_RMON_RX_CLASS_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_RX_CLASS_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_RX_CLASS_STATS_IN_CLASS_LBN 0
+#define MC_CMD_RMON_RX_CLASS_STATS_IN_CLASS_WIDTH 8
+#define MC_CMD_RMON_RX_CLASS_STATS_IN_RST_LBN 8
+#define MC_CMD_RMON_RX_CLASS_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_RX_CLASS_STATS_OUT msgresponse */
+#define MC_CMD_RMON_RX_CLASS_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_RX_CLASS_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_RX_CLASS_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_TX_CLASS_STATS
+ * Retrieve rmon tx class statistics
+ */
+#define MC_CMD_RMON_TX_CLASS_STATS 0xc4
+
+/* MC_CMD_RMON_TX_CLASS_STATS_IN msgrequest */
+#define MC_CMD_RMON_TX_CLASS_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_TX_CLASS_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_TX_CLASS_STATS_IN_CLASS_LBN 0
+#define MC_CMD_RMON_TX_CLASS_STATS_IN_CLASS_WIDTH 8
+#define MC_CMD_RMON_TX_CLASS_STATS_IN_RST_LBN 8
+#define MC_CMD_RMON_TX_CLASS_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_TX_CLASS_STATS_OUT msgresponse */
+#define MC_CMD_RMON_TX_CLASS_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_TX_CLASS_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_TX_CLASS_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_RX_SUPER_CLASS_STATS
+ * Retrieve rmon rx super_class statistics
+ */
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS 0xc5
+
+/* MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN msgrequest */
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_SUPER_CLASS_LBN 0
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_SUPER_CLASS_WIDTH 4
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_RST_LBN 4
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT msgresponse */
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_TX_SUPER_CLASS_STATS
+ * Retrieve rmon tx super_class statistics
+ */
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS 0xc6
+
+/* MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN msgrequest */
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_SUPER_CLASS_LBN 0
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_SUPER_CLASS_WIDTH 4
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_RST_LBN 4
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT msgresponse */
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_RX_ADD_QID_TO_CLASS
+ * Add qid to class for statistics collection
+ */
+#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS 0xc7
+
+/* MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN msgrequest */
+#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_LEN 12
+/* class */
+#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_CLASS_OFST 0
+/* qid */
+#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_QID_OFST 4
+/* flags */
+#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_FLAGS_OFST 8
+#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_LBN 0
+#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_WIDTH 4
+#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_PE_DELTA_LBN 4
+#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_PE_DELTA_WIDTH 4
+#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_MTU_LBN 8
+#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_MTU_WIDTH 14
+
+/* MC_CMD_RMON_RX_ADD_QID_TO_CLASS_OUT msgresponse */
+#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RMON_TX_ADD_QID_TO_CLASS
+ * Add qid to class for statistics collection
+ */
+#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS 0xc8
+
+/* MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN msgrequest */
+#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_LEN 12
+/* class */
+#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_CLASS_OFST 0
+/* qid */
+#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_QID_OFST 4
+/* flags */
+#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_FLAGS_OFST 8
+#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_LBN 0
+#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_WIDTH 4
+#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_PE_DELTA_LBN 4
+#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_PE_DELTA_WIDTH 4
+#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_MTU_LBN 8
+#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_MTU_WIDTH 14
+
+/* MC_CMD_RMON_TX_ADD_QID_TO_CLASS_OUT msgresponse */
+#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RMON_MC_ADD_QID_TO_CLASS
+ * Add qid to class for statistics collection
+ */
+#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS 0xc9
+
+/* MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN msgrequest */
+#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_LEN 12
+/* class */
+#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_CLASS_OFST 0
+/* qid */
+#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_QID_OFST 4
+/* flags */
+#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_FLAGS_OFST 8
+#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_SUPER_CLASS_LBN 0
+#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_SUPER_CLASS_WIDTH 4
+#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_PE_DELTA_LBN 4
+#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_PE_DELTA_WIDTH 4
+#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_MTU_LBN 8
+#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_MTU_WIDTH 14
+
+/* MC_CMD_RMON_MC_ADD_QID_TO_CLASS_OUT msgresponse */
+#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RMON_ALLOC_CLASS
+ * Allocate an rmon class
+ */
+#define MC_CMD_RMON_ALLOC_CLASS 0xca
+
+/* MC_CMD_RMON_ALLOC_CLASS_IN msgrequest */
+#define MC_CMD_RMON_ALLOC_CLASS_IN_LEN 0
+
+/* MC_CMD_RMON_ALLOC_CLASS_OUT msgresponse */
+#define MC_CMD_RMON_ALLOC_CLASS_OUT_LEN 4
+/* class */
+#define MC_CMD_RMON_ALLOC_CLASS_OUT_CLASS_OFST 0
+
+
+/***********************************/
+/* MC_CMD_RMON_DEALLOC_CLASS
+ * Deallocate an rmon class
+ */
+#define MC_CMD_RMON_DEALLOC_CLASS 0xcb
+
+/* MC_CMD_RMON_DEALLOC_CLASS_IN msgrequest */
+#define MC_CMD_RMON_DEALLOC_CLASS_IN_LEN 4
+/* class */
+#define MC_CMD_RMON_DEALLOC_CLASS_IN_CLASS_OFST 0
+
+/* MC_CMD_RMON_DEALLOC_CLASS_OUT msgresponse */
+#define MC_CMD_RMON_DEALLOC_CLASS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RMON_ALLOC_SUPER_CLASS
+ * Allocate an rmon super_class
+ */
+#define MC_CMD_RMON_ALLOC_SUPER_CLASS 0xcc
+
+/* MC_CMD_RMON_ALLOC_SUPER_CLASS_IN msgrequest */
+#define MC_CMD_RMON_ALLOC_SUPER_CLASS_IN_LEN 0
+
+/* MC_CMD_RMON_ALLOC_SUPER_CLASS_OUT msgresponse */
+#define MC_CMD_RMON_ALLOC_SUPER_CLASS_OUT_LEN 4
+/* super_class */
+#define MC_CMD_RMON_ALLOC_SUPER_CLASS_OUT_SUPER_CLASS_OFST 0
+
+
+/***********************************/
+/* MC_CMD_RMON_DEALLOC_SUPER_CLASS
+ * Deallocate an rmon tx super_class
+ */
+#define MC_CMD_RMON_DEALLOC_SUPER_CLASS 0xcd
+
+/* MC_CMD_RMON_DEALLOC_SUPER_CLASS_IN msgrequest */
+#define MC_CMD_RMON_DEALLOC_SUPER_CLASS_IN_LEN 4
+/* super_class */
+#define MC_CMD_RMON_DEALLOC_SUPER_CLASS_IN_SUPER_CLASS_OFST 0
+
+/* MC_CMD_RMON_DEALLOC_SUPER_CLASS_OUT msgresponse */
+#define MC_CMD_RMON_DEALLOC_SUPER_CLASS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RMON_RX_UP_CONV_STATS
+ * Retrieve up converter statistics
+ */
+#define MC_CMD_RMON_RX_UP_CONV_STATS 0xce
+
+/* MC_CMD_RMON_RX_UP_CONV_STATS_IN msgrequest */
+#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_PORT_LBN 0
+#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_PORT_WIDTH 2
+#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_RST_LBN 2
+#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_RX_UP_CONV_STATS_OUT msgresponse */
+#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_RX_IPI_STATS
+ * Retrieve rx ipi stats
+ */
+#define MC_CMD_RMON_RX_IPI_STATS 0xcf
+
+/* MC_CMD_RMON_RX_IPI_STATS_IN msgrequest */
+#define MC_CMD_RMON_RX_IPI_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_RX_IPI_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_RX_IPI_STATS_IN_VFIFO_LBN 0
+#define MC_CMD_RMON_RX_IPI_STATS_IN_VFIFO_WIDTH 5
+#define MC_CMD_RMON_RX_IPI_STATS_IN_RST_LBN 5
+#define MC_CMD_RMON_RX_IPI_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_RX_IPI_STATS_OUT msgresponse */
+#define MC_CMD_RMON_RX_IPI_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_RX_IPI_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_RX_IPI_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS
+ * Retrieve rx ipsec cntxt_ptr indexed stats
+ */
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS 0xd0
+
+/* MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN msgrequest */
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_LBN 0
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_WIDTH 9
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_RST_LBN 9
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT msgresponse */
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_RX_IPSEC_PORT_STATS
+ * Retrieve rx ipsec port indexed stats
+ */
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS 0xd1
+
+/* MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN msgrequest */
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_PORT_LBN 0
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_PORT_WIDTH 2
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_RST_LBN 2
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT msgresponse */
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_RX_IPSEC_OFLOW_STATS
+ * Retrieve tx ipsec overflow
+ */
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS 0xd2
+
+/* MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN msgrequest */
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_PORT_LBN 0
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_PORT_WIDTH 2
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_RST_LBN 2
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT msgresponse */
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_VPORT_ADD_MAC_ADDRESS
+ * Add a MAC address to a v-port
+ */
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS 0xa8
+
+/* MC_CMD_VPORT_ADD_MAC_ADDRESS_IN msgrequest */
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN 10
+/* The handle of the v-port */
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID_OFST 0
+/* MAC address to add */
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_OFST 4
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_LEN 6
+
+/* MC_CMD_VPORT_ADD_MAC_ADDRESS_OUT msgresponse */
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VPORT_DEL_MAC_ADDRESS
+ * Delete a MAC address from a v-port
+ */
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS 0xa9
+
+/* MC_CMD_VPORT_DEL_MAC_ADDRESS_IN msgrequest */
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN 10
+/* The handle of the v-port */
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID_OFST 0
+/* MAC address to add */
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_MACADDR_OFST 4
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_MACADDR_LEN 6
+
+/* MC_CMD_VPORT_DEL_MAC_ADDRESS_OUT msgresponse */
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VPORT_GET_MAC_ADDRESSES
+ * Delete a MAC address from a v-port
+ */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES 0xaa
+
+/* MC_CMD_VPORT_GET_MAC_ADDRESSES_IN msgrequest */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN 4
+/* The handle of the v-port */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID_OFST 0
+
+/* MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT msgresponse */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN 4
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX 250
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LEN(num) (4+6*(num))
+/* The number of MAC addresses returned */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT_OFST 0
+/* Array of MAC addresses */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_OFST 4
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_LEN 6
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_MINNUM 0
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_MAXNUM 41
+
+
+/***********************************/
+/* MC_CMD_DUMP_BUFTBL_ENTRIES
+ * Dump buffer table entries, mainly for command client debug use. Dumps
+ * absolute entries, and does not use chunk handles. All entries must be in
+ * range, and used for q page mapping, Although the latter restriction may be
+ * lifted in future.
+ */
+#define MC_CMD_DUMP_BUFTBL_ENTRIES 0xab
+
+/* MC_CMD_DUMP_BUFTBL_ENTRIES_IN msgrequest */
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_LEN 8
+/* Index of the first buffer table entry. */
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_FIRSTID_OFST 0
+/* Number of buffer table entries to dump. */
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 4
+
+/* MC_CMD_DUMP_BUFTBL_ENTRIES_OUT msgresponse */
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMIN 12
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMAX 252
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LEN(num) (0+12*(num))
+/* Raw buffer table entries, layed out as BUFTBL_ENTRY. */
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_OFST 0
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_LEN 12
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MINNUM 1
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MAXNUM 21
+
+
+/***********************************/
+/* MC_CMD_SET_RXDP_CONFIG
+ * Set global RXDP configuration settings
+ */
+#define MC_CMD_SET_RXDP_CONFIG 0xc1
+
+/* MC_CMD_SET_RXDP_CONFIG_IN msgrequest */
+#define MC_CMD_SET_RXDP_CONFIG_IN_LEN 4
+#define MC_CMD_SET_RXDP_CONFIG_IN_DATA_OFST 0
+#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_LBN 0
+#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_WIDTH 1
+
+/* MC_CMD_SET_RXDP_CONFIG_OUT msgresponse */
+#define MC_CMD_SET_RXDP_CONFIG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_RXDP_CONFIG
+ * Get global RXDP configuration settings
+ */
+#define MC_CMD_GET_RXDP_CONFIG 0xc2
+
+/* MC_CMD_GET_RXDP_CONFIG_IN msgrequest */
+#define MC_CMD_GET_RXDP_CONFIG_IN_LEN 0
+
+/* MC_CMD_GET_RXDP_CONFIG_OUT msgresponse */
+#define MC_CMD_GET_RXDP_CONFIG_OUT_LEN 4
+#define MC_CMD_GET_RXDP_CONFIG_OUT_DATA_OFST 0
+#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_LBN 0
+#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_WIDTH 1
+
+
+/***********************************/
+/* MC_CMD_RMON_RX_CLASS_DROPS_STATS
+ * Retrieve rx class drop stats
+ */
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS 0xd3
+
+/* MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN msgrequest */
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_CLASS_LBN 0
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_CLASS_WIDTH 8
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_RST_LBN 8
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT msgresponse */
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS
+ * Retrieve rx super class drop stats
+ */
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS 0xd4
+
+/* MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN msgrequest */
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_SUPER_CLASS_LBN 0
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_SUPER_CLASS_WIDTH 4
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_RST_LBN 4
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT msgresponse */
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_RX_ERRORS_STATS
+ * Retrieve rxdp errors
+ */
+#define MC_CMD_RMON_RX_ERRORS_STATS 0xd5
+
+/* MC_CMD_RMON_RX_ERRORS_STATS_IN msgrequest */
+#define MC_CMD_RMON_RX_ERRORS_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_RX_ERRORS_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_RX_ERRORS_STATS_IN_QID_LBN 0
+#define MC_CMD_RMON_RX_ERRORS_STATS_IN_QID_WIDTH 11
+#define MC_CMD_RMON_RX_ERRORS_STATS_IN_RST_LBN 11
+#define MC_CMD_RMON_RX_ERRORS_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_RX_ERRORS_STATS_OUT msgresponse */
+#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_RX_OVERFLOW_STATS
+ * Retrieve rxdp overflow
+ */
+#define MC_CMD_RMON_RX_OVERFLOW_STATS 0xd6
+
+/* MC_CMD_RMON_RX_OVERFLOW_STATS_IN msgrequest */
+#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_CLASS_LBN 0
+#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_CLASS_WIDTH 8
+#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_RST_LBN 8
+#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_RX_OVERFLOW_STATS_OUT msgresponse */
+#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_TX_IPI_STATS
+ * Retrieve tx ipi stats
+ */
+#define MC_CMD_RMON_TX_IPI_STATS 0xd7
+
+/* MC_CMD_RMON_TX_IPI_STATS_IN msgrequest */
+#define MC_CMD_RMON_TX_IPI_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_TX_IPI_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_TX_IPI_STATS_IN_VFIFO_LBN 0
+#define MC_CMD_RMON_TX_IPI_STATS_IN_VFIFO_WIDTH 5
+#define MC_CMD_RMON_TX_IPI_STATS_IN_RST_LBN 5
+#define MC_CMD_RMON_TX_IPI_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_TX_IPI_STATS_OUT msgresponse */
+#define MC_CMD_RMON_TX_IPI_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_TX_IPI_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_TX_IPI_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS
+ * Retrieve tx ipsec counters by cntxt_ptr
+ */
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS 0xd8
+
+/* MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN msgrequest */
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_LBN 0
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_WIDTH 9
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_RST_LBN 9
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT msgresponse */
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_TX_IPSEC_PORT_STATS
+ * Retrieve tx ipsec counters by port
+ */
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS 0xd9
+
+/* MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN msgrequest */
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_PORT_LBN 0
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_PORT_WIDTH 2
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_RST_LBN 2
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT msgresponse */
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_TX_IPSEC_OFLOW_STATS
+ * Retrieve tx ipsec overflow
+ */
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS 0xda
+
+/* MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN msgrequest */
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_PORT_LBN 0
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_PORT_WIDTH 2
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_RST_LBN 2
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT msgresponse */
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_TX_NOWHERE_STATS
+ * Retrieve tx nowhere stats
+ */
+#define MC_CMD_RMON_TX_NOWHERE_STATS 0xdb
+
+/* MC_CMD_RMON_TX_NOWHERE_STATS_IN msgrequest */
+#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_CLASS_LBN 0
+#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_CLASS_WIDTH 8
+#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_RST_LBN 8
+#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_TX_NOWHERE_STATS_OUT msgresponse */
+#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_TX_NOWHERE_QBB_STATS
+ * Retrieve tx nowhere qbb stats
+ */
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS 0xdc
+
+/* MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN msgrequest */
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_PRIORITY_LBN 0
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_PRIORITY_WIDTH 3
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_RST_LBN 3
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT msgresponse */
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_TX_ERRORS_STATS
+ * Retrieve rxdp errors
+ */
+#define MC_CMD_RMON_TX_ERRORS_STATS 0xdd
+
+/* MC_CMD_RMON_TX_ERRORS_STATS_IN msgrequest */
+#define MC_CMD_RMON_TX_ERRORS_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_TX_ERRORS_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_TX_ERRORS_STATS_IN_QID_LBN 0
+#define MC_CMD_RMON_TX_ERRORS_STATS_IN_QID_WIDTH 11
+#define MC_CMD_RMON_TX_ERRORS_STATS_IN_RST_LBN 11
+#define MC_CMD_RMON_TX_ERRORS_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_TX_ERRORS_STATS_OUT msgresponse */
+#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_TX_OVERFLOW_STATS
+ * Retrieve rxdp overflow
+ */
+#define MC_CMD_RMON_TX_OVERFLOW_STATS 0xde
+
+/* MC_CMD_RMON_TX_OVERFLOW_STATS_IN msgrequest */
+#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_CLASS_LBN 0
+#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_CLASS_WIDTH 8
+#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_RST_LBN 8
+#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_TX_OVERFLOW_STATS_OUT msgresponse */
+#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_COLLECT_CLASS_STATS
+ * Explicitly collect class stats at the specified evb port
+ */
+#define MC_CMD_RMON_COLLECT_CLASS_STATS 0xdf
+
+/* MC_CMD_RMON_COLLECT_CLASS_STATS_IN msgrequest */
+#define MC_CMD_RMON_COLLECT_CLASS_STATS_IN_LEN 4
+/* The port id associated with the vport/pport at which to collect class stats
+ */
+#define MC_CMD_RMON_COLLECT_CLASS_STATS_IN_PORT_ID_OFST 0
+
+/* MC_CMD_RMON_COLLECT_CLASS_STATS_OUT msgresponse */
+#define MC_CMD_RMON_COLLECT_CLASS_STATS_OUT_LEN 4
+/* class */
+#define MC_CMD_RMON_COLLECT_CLASS_STATS_OUT_CLASS_OFST 0
+
+
+/***********************************/
+/* MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS
+ * Explicitly collect class stats at the specified evb port
+ */
+#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS 0xe0
+
+/* MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_IN msgrequest */
+#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_IN_LEN 4
+/* The port id associated with the vport/pport at which to collect class stats
+ */
+#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_IN_PORT_ID_OFST 0
+
+/* MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_OUT msgresponse */
+#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_OUT_LEN 4
+/* super_class */
+#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_OUT_SUPER_CLASS_OFST 0
+
+
+/***********************************/
+/* MC_CMD_GET_CLOCK
+ * Return the system and PDCPU clock frequencies.
+ */
+#define MC_CMD_GET_CLOCK 0xac
+
+/* MC_CMD_GET_CLOCK_IN msgrequest */
+#define MC_CMD_GET_CLOCK_IN_LEN 0
+
+/* MC_CMD_GET_CLOCK_OUT msgresponse */
+#define MC_CMD_GET_CLOCK_OUT_LEN 8
+/* System frequency, MHz */
+#define MC_CMD_GET_CLOCK_OUT_SYS_FREQ_OFST 0
+/* DPCPU frequency, MHz */
+#define MC_CMD_GET_CLOCK_OUT_DPCPU_FREQ_OFST 4
+
+
+/***********************************/
+/* MC_CMD_SET_CLOCK
+ * Control the system and DPCPU clock frequencies. Changes are lost reboot.
+ */
+#define MC_CMD_SET_CLOCK 0xad
+
+/* MC_CMD_SET_CLOCK_IN msgrequest */
+#define MC_CMD_SET_CLOCK_IN_LEN 12
+/* Requested system frequency in MHz; 0 leaves unchanged. */
+#define MC_CMD_SET_CLOCK_IN_SYS_FREQ_OFST 0
+/* Requested inter-core frequency in MHz; 0 leaves unchanged. */
+#define MC_CMD_SET_CLOCK_IN_ICORE_FREQ_OFST 4
+/* Request DPCPU frequency in MHz; 0 leaves unchanged. */
+#define MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_OFST 8
+
+/* MC_CMD_SET_CLOCK_OUT msgresponse */
+#define MC_CMD_SET_CLOCK_OUT_LEN 12
+/* Resulting system frequency in MHz */
+#define MC_CMD_SET_CLOCK_OUT_SYS_FREQ_OFST 0
+/* Resulting inter-core frequency in MHz */
+#define MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_OFST 4
+/* Resulting DPCPU frequency in MHz */
+#define MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_OFST 8
+
+
+/***********************************/
+/* MC_CMD_DPCPU_RPC
+ * Send an arbitrary DPCPU message.
+ */
+#define MC_CMD_DPCPU_RPC 0xae
+
+/* MC_CMD_DPCPU_RPC_IN msgrequest */
+#define MC_CMD_DPCPU_RPC_IN_LEN 36
+#define MC_CMD_DPCPU_RPC_IN_CPU_OFST 0
+/* enum: RxDPCPU */
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX 0x0
+/* enum: TxDPCPU0 */
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX0 0x1
+/* enum: TxDPCPU1 */
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX1 0x2
+/* First 8 bits [39:32] of DATA are consumed by MC-DPCPU protocol and must be
+ * initialised to zero
+ */
+#define MC_CMD_DPCPU_RPC_IN_DATA_OFST 4
+#define MC_CMD_DPCPU_RPC_IN_DATA_LEN 32
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_LBN 8
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_WIDTH 8
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_READ 0x6 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_WRITE 0x7 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_SELF_TEST 0xc /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_CSR_ACCESS 0xe /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_READ 0x46 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_WRITE 0x47 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SELF_TEST 0x4a /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_CSR_ACCESS 0x4c /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SET_MC_REPLAY_CNTXT 0x4d /* enum */
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_LBN 16
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_LBN 16
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_COUNT_LBN 48
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_COUNT_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_LBN 16
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_WIDTH 240
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_LBN 16
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_STOP_RETURN_RESULT 0x0 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_READ 0x1 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE 0x2 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE_READ 0x3 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_PIPELINED_READ 0x4 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_LBN 48
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_LBN 64
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_LBN 80
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_LBN 16
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_CUT_THROUGH 0x1 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD 0x2 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD_FIRST 0x3 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_LBN 64
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_WDATA_OFST 12
+#define MC_CMD_DPCPU_RPC_IN_WDATA_LEN 24
+/* Register data to write. Only valid in write/write-read. */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_DATA_OFST 16
+/* Register address. */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_ADDRESS_OFST 20
+
+/* MC_CMD_DPCPU_RPC_OUT msgresponse */
+#define MC_CMD_DPCPU_RPC_OUT_LEN 36
+#define MC_CMD_DPCPU_RPC_OUT_RC_OFST 0
+/* DATA */
+#define MC_CMD_DPCPU_RPC_OUT_DATA_OFST 4
+#define MC_CMD_DPCPU_RPC_OUT_DATA_LEN 32
+#define MC_CMD_DPCPU_RPC_OUT_HDR_CMD_RESP_ERRCODE_LBN 32
+#define MC_CMD_DPCPU_RPC_OUT_HDR_CMD_RESP_ERRCODE_WIDTH 16
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_COUNT_LBN 48
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_COUNT_WIDTH 16
+#define MC_CMD_DPCPU_RPC_OUT_RDATA_OFST 12
+#define MC_CMD_DPCPU_RPC_OUT_RDATA_LEN 24
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_1_OFST 12
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_2_OFST 16
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_3_OFST 20
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_4_OFST 24
+
+
+/***********************************/
+/* MC_CMD_TRIGGER_INTERRUPT
+ * Trigger an interrupt by prodding the BIU.
+ */
+#define MC_CMD_TRIGGER_INTERRUPT 0xe3
+
+/* MC_CMD_TRIGGER_INTERRUPT_IN msgrequest */
+#define MC_CMD_TRIGGER_INTERRUPT_IN_LEN 4
+/* Interrupt level relative to base for function. */
+#define MC_CMD_TRIGGER_INTERRUPT_IN_INTR_LEVEL_OFST 0
+
+/* MC_CMD_TRIGGER_INTERRUPT_OUT msgresponse */
+#define MC_CMD_TRIGGER_INTERRUPT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_DUMP_DO
+ * Take a dump of the DUT state
+ */
+#define MC_CMD_DUMP_DO 0xe8
+
+/* MC_CMD_DUMP_DO_IN msgrequest */
+#define MC_CMD_DUMP_DO_IN_LEN 52
+#define MC_CMD_DUMP_DO_IN_PADDING_OFST 0
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_OFST 4
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM 0x0 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_DEFAULT 0x1 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_TYPE_OFST 8
+#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_NVRAM 0x1 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY 0x2 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY_MLI 0x3 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_UART 0x4 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 12
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_OFST 16
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 12
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 16
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 12
+#define MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_PAGE_SIZE 0x1000 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 16
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 20
+#define MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_MAX_DEPTH 0x2 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_OFST 12
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_SIZE_OFST 24
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_OFST 28
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM 0x0 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_NVRAM_DUMP_PARTITION 0x1 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_TYPE_OFST 32
+/* Enum values, see field(s): */
+/* MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 36
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_OFST 40
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 36
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 40
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 36
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 40
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 44
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_UART_PORT_OFST 36
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_SIZE_OFST 48
+
+/* MC_CMD_DUMP_DO_OUT msgresponse */
+#define MC_CMD_DUMP_DO_OUT_LEN 4
+#define MC_CMD_DUMP_DO_OUT_DUMPFILE_SIZE_OFST 0
+
+
+/***********************************/
+/* MC_CMD_DUMP_CONFIGURE_UNSOLICITED
+ * Configure unsolicited dumps
+ */
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED 0xe9
+
+/* MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN msgrequest */
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_LEN 52
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_ENABLE_OFST 0
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_OFST 4
+/* Enum values, see field(s): */
+/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC */
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_TYPE_OFST 8
+/* Enum values, see field(s): */
+/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 12
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_OFST 16
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 12
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 16
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 12
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 16
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 20
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_OFST 12
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_SIZE_OFST 24
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_OFST 28
+/* Enum values, see field(s): */
+/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPFILE_DST */
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_TYPE_OFST 32
+/* Enum values, see field(s): */
+/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 36
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_OFST 40
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 36
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 40
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 36
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 40
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 44
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_UART_PORT_OFST 36
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_SIZE_OFST 48
+
+
+/***********************************/
+/* MC_CMD_SET_PSU
+ * Adjusts power supply parameters. This is a warranty-voiding operation.
+ * Returns: ENOENT if the parameter or rail specified does not exist, EINVAL if
+ * the parameter is out of range.
+ */
+#define MC_CMD_SET_PSU 0xea
+
+/* MC_CMD_SET_PSU_IN msgrequest */
+#define MC_CMD_SET_PSU_IN_LEN 12
+#define MC_CMD_SET_PSU_IN_PARAM_OFST 0
+#define MC_CMD_SET_PSU_IN_PARAM_SUPPLY_VOLTAGE 0x0 /* enum */
+#define MC_CMD_SET_PSU_IN_RAIL_OFST 4
+#define MC_CMD_SET_PSU_IN_RAIL_0V9 0x0 /* enum */
+#define MC_CMD_SET_PSU_IN_RAIL_1V2 0x1 /* enum */
+/* desired value, eg voltage in mV */
+#define MC_CMD_SET_PSU_IN_VALUE_OFST 8
+
+/* MC_CMD_SET_PSU_OUT msgresponse */
+#define MC_CMD_SET_PSU_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_FUNCTION_INFO
+ * Get function information. PF and VF number.
+ */
+#define MC_CMD_GET_FUNCTION_INFO 0xec
+
+/* MC_CMD_GET_FUNCTION_INFO_IN msgrequest */
+#define MC_CMD_GET_FUNCTION_INFO_IN_LEN 0
+
+/* MC_CMD_GET_FUNCTION_INFO_OUT msgresponse */
+#define MC_CMD_GET_FUNCTION_INFO_OUT_LEN 8
+#define MC_CMD_GET_FUNCTION_INFO_OUT_PF_OFST 0
+#define MC_CMD_GET_FUNCTION_INFO_OUT_VF_OFST 4
+
+
+/***********************************/
+/* MC_CMD_ENABLE_OFFLINE_BIST
+ * Enters offline BIST mode. All queues are torn down, chip enters quiescent
+ * mode, calling function gets exclusive MCDI ownership. The only way out is
+ * reboot.
+ */
+#define MC_CMD_ENABLE_OFFLINE_BIST 0xed
+
+/* MC_CMD_ENABLE_OFFLINE_BIST_IN msgrequest */
+#define MC_CMD_ENABLE_OFFLINE_BIST_IN_LEN 0
+
+/* MC_CMD_ENABLE_OFFLINE_BIST_OUT msgresponse */
+#define MC_CMD_ENABLE_OFFLINE_BIST_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_START_KR_EYE_PLOT
+ * Start KR Serdes Eye diagram plot on a given lane. Lane must have valid
+ * signal.
+ */
+#define MC_CMD_START_KR_EYE_PLOT 0xee
+
+/* MC_CMD_START_KR_EYE_PLOT_IN msgrequest */
+#define MC_CMD_START_KR_EYE_PLOT_IN_LEN 4
+#define MC_CMD_START_KR_EYE_PLOT_IN_LANE_OFST 0
+
+/* MC_CMD_START_KR_EYE_PLOT_OUT msgresponse */
+#define MC_CMD_START_KR_EYE_PLOT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_POLL_KR_EYE_PLOT
+ * Poll KR Serdes Eye diagram plot. Returns one row of BER data. The caller
+ * should call this command repeatedly after starting eye plot, until no more
+ * data is returned.
+ */
+#define MC_CMD_POLL_KR_EYE_PLOT 0xef
+
+/* MC_CMD_POLL_KR_EYE_PLOT_IN msgrequest */
+#define MC_CMD_POLL_KR_EYE_PLOT_IN_LEN 0
+
+/* MC_CMD_POLL_KR_EYE_PLOT_OUT msgresponse */
+#define MC_CMD_POLL_KR_EYE_PLOT_OUT_LENMIN 0
+#define MC_CMD_POLL_KR_EYE_PLOT_OUT_LENMAX 252
+#define MC_CMD_POLL_KR_EYE_PLOT_OUT_LEN(num) (0+2*(num))
+#define MC_CMD_POLL_KR_EYE_PLOT_OUT_SAMPLES_OFST 0
+#define MC_CMD_POLL_KR_EYE_PLOT_OUT_SAMPLES_LEN 2
+#define MC_CMD_POLL_KR_EYE_PLOT_OUT_SAMPLES_MINNUM 0
+#define MC_CMD_POLL_KR_EYE_PLOT_OUT_SAMPLES_MAXNUM 126
+
+
+/***********************************/
+/* MC_CMD_READ_FUSES
+ * Read data programmed into the device One-Time-Programmable (OTP) Fuses
+ */
+#define MC_CMD_READ_FUSES 0xf0
+
+/* MC_CMD_READ_FUSES_IN msgrequest */
+#define MC_CMD_READ_FUSES_IN_LEN 8
+/* Offset in OTP to read */
+#define MC_CMD_READ_FUSES_IN_OFFSET_OFST 0
+/* Length of data to read in bytes */
+#define MC_CMD_READ_FUSES_IN_LENGTH_OFST 4
+
+/* MC_CMD_READ_FUSES_OUT msgresponse */
+#define MC_CMD_READ_FUSES_OUT_LENMIN 4
+#define MC_CMD_READ_FUSES_OUT_LENMAX 252
+#define MC_CMD_READ_FUSES_OUT_LEN(num) (4+1*(num))
+/* Length of returned OTP data in bytes */
+#define MC_CMD_READ_FUSES_OUT_LENGTH_OFST 0
+/* Returned data */
+#define MC_CMD_READ_FUSES_OUT_DATA_OFST 4
+#define MC_CMD_READ_FUSES_OUT_DATA_LEN 1
+#define MC_CMD_READ_FUSES_OUT_DATA_MINNUM 0
+#define MC_CMD_READ_FUSES_OUT_DATA_MAXNUM 248
+
+
+/***********************************/
+/* MC_CMD_KR_TUNE
+ * Get or set KR Serdes RXEQ and TX Driver settings
+ */
+#define MC_CMD_KR_TUNE 0xf1
+
+/* MC_CMD_KR_TUNE_IN msgrequest */
+#define MC_CMD_KR_TUNE_IN_LENMIN 4
+#define MC_CMD_KR_TUNE_IN_LENMAX 252
+#define MC_CMD_KR_TUNE_IN_LEN(num) (4+4*(num))
+/* Requested operation */
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_OP_LEN 1
+/* enum: Get current RXEQ settings */
+#define MC_CMD_KR_TUNE_IN_RXEQ_GET 0x0
+/* enum: Override RXEQ settings */
+#define MC_CMD_KR_TUNE_IN_RXEQ_SET 0x1
+/* enum: Get current TX Driver settings */
+#define MC_CMD_KR_TUNE_IN_TXEQ_GET 0x2
+/* enum: Override TX Driver settings */
+#define MC_CMD_KR_TUNE_IN_TXEQ_SET 0x3
+/* enum: Force KR Serdes reset / recalibration */
+#define MC_CMD_KR_TUNE_IN_RECAL 0x4
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_LEN 3
+/* Arguments specific to the operation */
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_OFST 4
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_LEN 4
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_MINNUM 0
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_MAXNUM 62
+
+/* MC_CMD_KR_TUNE_OUT msgresponse */
+#define MC_CMD_KR_TUNE_OUT_LEN 0
+
+/* MC_CMD_KR_TUNE_RXEQ_GET_IN msgrequest */
+#define MC_CMD_KR_TUNE_RXEQ_GET_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_RSVD_LEN 3
+
+/* MC_CMD_KR_TUNE_RXEQ_GET_OUT msgresponse */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LENMIN 4
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LENMAX 252
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LEN(num) (0+4*(num))
+/* RXEQ Parameter */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_OFST 0
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LEN 4
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_MINNUM 1
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM 63
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8
+/* enum: Attenuation (0-15) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_ATT 0x0
+/* enum: CTLE Boost (0-15) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_BOOST 0x1
+/* enum: Edge DFE Tap1 (0 - max negative, 64 - zero, 127 - max positive) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP1 0x2
+/* enum: Edge DFE Tap2 (0 - max negative, 32 - zero, 63 - max positive) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP2 0x3
+/* enum: Edge DFE Tap3 (0 - max negative, 32 - zero, 63 - max positive) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP3 0x4
+/* enum: Edge DFE Tap4 (0 - max negative, 32 - zero, 63 - max positive) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP4 0x5
+/* enum: Edge DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP5 0x6
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 3
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_3 0x3 /* enum */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_ALL 0x4 /* enum */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_LBN 11
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_WIDTH 1
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_RESERVED_LBN 12
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_RESERVED_WIDTH 4
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_INITIAL_LBN 16
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_INITIAL_WIDTH 8
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_LBN 24
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8
+
+/* MC_CMD_KR_TUNE_RXEQ_SET_IN msgrequest */
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_LENMIN 8
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_LENMAX 252
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_LEN(num) (4+4*(num))
+/* Requested operation */
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_RSVD_LEN 3
+/* RXEQ Parameter */
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_OFST 4
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_LEN 4
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_MINNUM 1
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_MAXNUM 62
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_ID_LBN 0
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_ID_WIDTH 8
+/* Enum values, see field(s): */
+/* MC_CMD_KR_TUNE_RXEQ_GET_OUT/PARAM_ID */
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_LANE_LBN 8
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_LANE_WIDTH 3
+/* Enum values, see field(s): */
+/* MC_CMD_KR_TUNE_RXEQ_GET_OUT/PARAM_LANE */
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_LBN 11
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_WIDTH 1
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED_LBN 12
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED_WIDTH 4
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_INITIAL_LBN 16
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_INITIAL_WIDTH 8
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED2_LBN 24
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED2_WIDTH 8
+
+/* MC_CMD_KR_TUNE_RXEQ_SET_OUT msgresponse */
+#define MC_CMD_KR_TUNE_RXEQ_SET_OUT_LEN 0
+
+/* MC_CMD_KR_TUNE_RECAL_IN msgrequest */
+#define MC_CMD_KR_TUNE_RECAL_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_RSVD_LEN 3
+
+/* MC_CMD_KR_TUNE_RECAL_OUT msgresponse */
+#define MC_CMD_KR_TUNE_RECAL_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PCIE_TUNE
+ * Get or set PCIE Serdes RXEQ and TX Driver settings
+ */
+#define MC_CMD_PCIE_TUNE 0xf2
+
+/* MC_CMD_PCIE_TUNE_IN msgrequest */
+#define MC_CMD_PCIE_TUNE_IN_LENMIN 4
+#define MC_CMD_PCIE_TUNE_IN_LENMAX 252
+#define MC_CMD_PCIE_TUNE_IN_LEN(num) (4+4*(num))
+/* Requested operation */
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_OP_OFST 0
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_OP_LEN 1
+/* enum: Get current RXEQ settings */
+#define MC_CMD_PCIE_TUNE_IN_RXEQ_GET 0x0
+/* enum: Override RXEQ settings */
+#define MC_CMD_PCIE_TUNE_IN_RXEQ_SET 0x1
+/* enum: Get current TX Driver settings */
+#define MC_CMD_PCIE_TUNE_IN_TXEQ_GET 0x2
+/* enum: Override TX Driver settings */
+#define MC_CMD_PCIE_TUNE_IN_TXEQ_SET 0x3
+/* Align the arguments to 32 bits */
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_OFST 1
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_LEN 3
+/* Arguments specific to the operation */
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_OFST 4
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_LEN 4
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_MINNUM 0
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_MAXNUM 62
+
+/* MC_CMD_PCIE_TUNE_OUT msgresponse */
+#define MC_CMD_PCIE_TUNE_OUT_LEN 0
+
+/* MC_CMD_PCIE_TUNE_RXEQ_GET_IN msgrequest */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_OP_OFST 0
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_RSVD_OFST 1
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_RSVD_LEN 3
+
+/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT msgresponse */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LENMIN 4
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LENMAX 252
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LEN(num) (0+4*(num))
+/* RXEQ Parameter */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_OFST 0
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LEN 4
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_MINNUM 1
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM 63
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8
+/* enum: Attenuation (0-15) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_ATT 0x0
+/* enum: CTLE Boost (0-15) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_BOOST 0x1
+/* enum: DFE Tap1 (0 - max negative, 64 - zero, 127 - max positive) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP1 0x2
+/* enum: DFE Tap2 (0 - max negative, 32 - zero, 63 - max positive) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP2 0x3
+/* enum: DFE Tap3 (0 - max negative, 32 - zero, 63 - max positive) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP3 0x4
+/* enum: DFE Tap4 (0 - max negative, 32 - zero, 63 - max positive) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP4 0x5
+/* enum: DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP5 0x6
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 4
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_3 0x3 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_4 0x4 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_5 0x5 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_6 0x6 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_7 0x7 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_ALL 0x8 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_LBN 12
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_WIDTH 12
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_LBN 24
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8
+
+/* MC_CMD_PCIE_TUNE_TXEQ_GET_IN msgrequest */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_OP_OFST 0
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_RSVD_OFST 1
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_RSVD_LEN 3
+
+/* MC_CMD_PCIE_TUNE_TXEQ_GET_OUT msgresponse */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LENMIN 4
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LENMAX 252
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LEN(num) (0+4*(num))
+/* RXEQ Parameter */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_OFST 0
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LEN 4
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_MINNUM 1
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM 63
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8
+/* enum: TxMargin (PIPE) */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXMARGIN 0x0
+/* enum: TxSwing (PIPE) */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXSWING 0x1
+/* enum: De-emphasis coefficient C(-1) (PIPE) */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CM1 0x2
+/* enum: De-emphasis coefficient C(0) (PIPE) */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_C0 0x3
+/* enum: De-emphasis coefficient C(+1) (PIPE) */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CP1 0x4
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 4
+/* Enum values, see field(s): */
+/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_LANE */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_RESERVED_LBN 12
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_RESERVED_WIDTH 12
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_LBN 24
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8
+
+
+/***********************************/
+/* MC_CMD_LICENSING
+ * Operations on the NVRAM_PARTITION_TYPE_LICENSE application license partition
+ */
+#define MC_CMD_LICENSING 0xf3
+
+/* MC_CMD_LICENSING_IN msgrequest */
+#define MC_CMD_LICENSING_IN_LEN 4
+/* identifies the type of operation requested */
+#define MC_CMD_LICENSING_IN_OP_OFST 0
+/* enum: re-read and apply licenses after a license key partition update; note
+ * that this operation returns a zero-length response
+ */
+#define MC_CMD_LICENSING_IN_OP_UPDATE_LICENSE 0x0
+/* enum: report counts of installed licenses */
+#define MC_CMD_LICENSING_IN_OP_GET_KEY_STATS 0x1
+
+/* MC_CMD_LICENSING_OUT msgresponse */
+#define MC_CMD_LICENSING_OUT_LEN 28
+/* count of application keys which are valid */
+#define MC_CMD_LICENSING_OUT_VALID_APP_KEYS_OFST 0
+/* sum of UNVERIFIABLE_APP_KEYS + WRONG_NODE_APP_KEYS (for compatibility with
+ * MC_CMD_FC_OP_LICENSE)
+ */
+#define MC_CMD_LICENSING_OUT_INVALID_APP_KEYS_OFST 4
+/* count of application keys which are invalid due to being blacklisted */
+#define MC_CMD_LICENSING_OUT_BLACKLISTED_APP_KEYS_OFST 8
+/* count of application keys which are invalid due to being unverifiable */
+#define MC_CMD_LICENSING_OUT_UNVERIFIABLE_APP_KEYS_OFST 12
+/* count of application keys which are invalid due to being for the wrong node
+ */
+#define MC_CMD_LICENSING_OUT_WRONG_NODE_APP_KEYS_OFST 16
+/* licensing state (for diagnostics; the exact meaning of the bits in this
+ * field are private to the firmware)
+ */
+#define MC_CMD_LICENSING_OUT_LICENSING_STATE_OFST 20
+/* licensing subsystem self-test report (for manftest) */
+#define MC_CMD_LICENSING_OUT_LICENSING_SELF_TEST_OFST 24
+/* enum: licensing subsystem self-test failed */
+#define MC_CMD_LICENSING_OUT_SELF_TEST_FAIL 0x0
+/* enum: licensing subsystem self-test passed */
+#define MC_CMD_LICENSING_OUT_SELF_TEST_PASS 0x1
+
+
+/***********************************/
+/* MC_CMD_MC2MC_PROXY
+ * Execute an arbitrary MCDI command on the slave MC of a dual-core device.
+ * This will fail on a single-core system.
+ */
+#define MC_CMD_MC2MC_PROXY 0xf4
#endif /* MCDI_PCOL_H */
diff --git a/drivers/net/ethernet/sfc/mcdi_phy.c b/drivers/net/ethernet/sfc/mcdi_port.c
index 13cb40fe90c1..7b6be61d549f 100644
--- a/drivers/net/ethernet/sfc/mcdi_phy.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -1,6 +1,6 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2009-2010 Solarflare Communications Inc.
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2009-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -36,7 +36,7 @@ struct efx_mcdi_phy_data {
static int
efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_data *cfg)
{
- u8 outbuf[MC_CMD_GET_PHY_CFG_OUT_LEN];
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_CFG_OUT_LEN);
size_t outlen;
int rc;
@@ -78,7 +78,7 @@ static int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities,
u32 flags, u32 loopback_mode,
u32 loopback_speed)
{
- u8 inbuf[MC_CMD_SET_LINK_IN_LEN];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_LINK_IN_LEN);
int rc;
BUILD_BUG_ON(MC_CMD_SET_LINK_OUT_LEN != 0);
@@ -102,7 +102,7 @@ fail:
static int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes)
{
- u8 outbuf[MC_CMD_GET_LOOPBACK_MODES_OUT_LEN];
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LOOPBACK_MODES_OUT_LEN);
size_t outlen;
int rc;
@@ -111,7 +111,8 @@ static int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes)
if (rc)
goto fail;
- if (outlen < MC_CMD_GET_LOOPBACK_MODES_OUT_LEN) {
+ if (outlen < (MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_OFST +
+ MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LEN)) {
rc = -EIO;
goto fail;
}
@@ -125,16 +126,16 @@ fail:
return rc;
}
-int efx_mcdi_mdio_read(struct efx_nic *efx, unsigned int bus,
- unsigned int prtad, unsigned int devad, u16 addr,
- u16 *value_out, u32 *status_out)
+static int efx_mcdi_mdio_read(struct net_device *net_dev,
+ int prtad, int devad, u16 addr)
{
- u8 inbuf[MC_CMD_MDIO_READ_IN_LEN];
- u8 outbuf[MC_CMD_MDIO_READ_OUT_LEN];
+ struct efx_nic *efx = netdev_priv(net_dev);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MDIO_READ_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_MDIO_READ_OUT_LEN);
size_t outlen;
int rc;
- MCDI_SET_DWORD(inbuf, MDIO_READ_IN_BUS, bus);
+ MCDI_SET_DWORD(inbuf, MDIO_READ_IN_BUS, efx->mdio_bus);
MCDI_SET_DWORD(inbuf, MDIO_READ_IN_PRTAD, prtad);
MCDI_SET_DWORD(inbuf, MDIO_READ_IN_DEVAD, devad);
MCDI_SET_DWORD(inbuf, MDIO_READ_IN_ADDR, addr);
@@ -144,25 +145,27 @@ int efx_mcdi_mdio_read(struct efx_nic *efx, unsigned int bus,
if (rc)
goto fail;
- *value_out = (u16)MCDI_DWORD(outbuf, MDIO_READ_OUT_VALUE);
- *status_out = MCDI_DWORD(outbuf, MDIO_READ_OUT_STATUS);
- return 0;
+ if (MCDI_DWORD(outbuf, MDIO_READ_OUT_STATUS) !=
+ MC_CMD_MDIO_STATUS_GOOD)
+ return -EIO;
+
+ return (u16)MCDI_DWORD(outbuf, MDIO_READ_OUT_VALUE);
fail:
netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
-int efx_mcdi_mdio_write(struct efx_nic *efx, unsigned int bus,
- unsigned int prtad, unsigned int devad, u16 addr,
- u16 value, u32 *status_out)
+static int efx_mcdi_mdio_write(struct net_device *net_dev,
+ int prtad, int devad, u16 addr, u16 value)
{
- u8 inbuf[MC_CMD_MDIO_WRITE_IN_LEN];
- u8 outbuf[MC_CMD_MDIO_WRITE_OUT_LEN];
+ struct efx_nic *efx = netdev_priv(net_dev);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MDIO_WRITE_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_MDIO_WRITE_OUT_LEN);
size_t outlen;
int rc;
- MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_BUS, bus);
+ MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_BUS, efx->mdio_bus);
MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_PRTAD, prtad);
MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_DEVAD, devad);
MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_ADDR, addr);
@@ -173,7 +176,10 @@ int efx_mcdi_mdio_write(struct efx_nic *efx, unsigned int bus,
if (rc)
goto fail;
- *status_out = MCDI_DWORD(outbuf, MDIO_WRITE_OUT_STATUS);
+ if (MCDI_DWORD(outbuf, MDIO_WRITE_OUT_STATUS) !=
+ MC_CMD_MDIO_STATUS_GOOD)
+ return -EIO;
+
return 0;
fail:
@@ -304,10 +310,37 @@ static u32 mcdi_to_ethtool_media(u32 media)
}
}
+static void efx_mcdi_phy_decode_link(struct efx_nic *efx,
+ struct efx_link_state *link_state,
+ u32 speed, u32 flags, u32 fcntl)
+{
+ switch (fcntl) {
+ case MC_CMD_FCNTL_AUTO:
+ WARN_ON(1); /* This is not a link mode */
+ link_state->fc = EFX_FC_AUTO | EFX_FC_TX | EFX_FC_RX;
+ break;
+ case MC_CMD_FCNTL_BIDIR:
+ link_state->fc = EFX_FC_TX | EFX_FC_RX;
+ break;
+ case MC_CMD_FCNTL_RESPOND:
+ link_state->fc = EFX_FC_RX;
+ break;
+ default:
+ WARN_ON(1);
+ case MC_CMD_FCNTL_OFF:
+ link_state->fc = 0;
+ break;
+ }
+
+ link_state->up = !!(flags & (1 << MC_CMD_GET_LINK_OUT_LINK_UP_LBN));
+ link_state->fd = !!(flags & (1 << MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN));
+ link_state->speed = speed;
+}
+
static int efx_mcdi_phy_probe(struct efx_nic *efx)
{
struct efx_mcdi_phy_data *phy_data;
- u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
u32 caps;
int rc;
@@ -403,7 +436,7 @@ fail:
return rc;
}
-int efx_mcdi_phy_reconfigure(struct efx_nic *efx)
+int efx_mcdi_port_reconfigure(struct efx_nic *efx)
{
struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
u32 caps = (efx->link_advertising ?
@@ -414,37 +447,10 @@ int efx_mcdi_phy_reconfigure(struct efx_nic *efx)
efx->loopback_mode, 0);
}
-void efx_mcdi_phy_decode_link(struct efx_nic *efx,
- struct efx_link_state *link_state,
- u32 speed, u32 flags, u32 fcntl)
-{
- switch (fcntl) {
- case MC_CMD_FCNTL_AUTO:
- WARN_ON(1); /* This is not a link mode */
- link_state->fc = EFX_FC_AUTO | EFX_FC_TX | EFX_FC_RX;
- break;
- case MC_CMD_FCNTL_BIDIR:
- link_state->fc = EFX_FC_TX | EFX_FC_RX;
- break;
- case MC_CMD_FCNTL_RESPOND:
- link_state->fc = EFX_FC_RX;
- break;
- default:
- WARN_ON(1);
- case MC_CMD_FCNTL_OFF:
- link_state->fc = 0;
- break;
- }
-
- link_state->up = !!(flags & (1 << MC_CMD_GET_LINK_OUT_LINK_UP_LBN));
- link_state->fd = !!(flags & (1 << MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN));
- link_state->speed = speed;
-}
-
/* Verify that the forced flow control settings (!EFX_FC_AUTO) are
* supported by the link partner. Warn the user if this isn't the case
*/
-void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa)
+static void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa)
{
struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
u32 rmtadv;
@@ -472,7 +478,7 @@ void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa)
static bool efx_mcdi_phy_poll(struct efx_nic *efx)
{
struct efx_link_state old_state = efx->link_state;
- u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
int rc;
WARN_ON(!mutex_is_locked(&efx->mac_lock));
@@ -507,7 +513,7 @@ static void efx_mcdi_phy_remove(struct efx_nic *efx)
static void efx_mcdi_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
{
struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
- u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
int rc;
ecmd->supported =
@@ -550,6 +556,7 @@ static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ec
case 100: caps = 1 << MC_CMD_PHY_CAP_100FDX_LBN; break;
case 1000: caps = 1 << MC_CMD_PHY_CAP_1000FDX_LBN; break;
case 10000: caps = 1 << MC_CMD_PHY_CAP_10000FDX_LBN; break;
+ case 40000: caps = 1 << MC_CMD_PHY_CAP_40000FDX_LBN; break;
default: return -EINVAL;
}
} else {
@@ -579,7 +586,7 @@ static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ec
static int efx_mcdi_phy_test_alive(struct efx_nic *efx)
{
- u8 outbuf[MC_CMD_GET_PHY_STATE_OUT_LEN];
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_STATE_OUT_LEN);
size_t outlen;
int rc;
@@ -615,17 +622,15 @@ static int efx_mcdi_bist(struct efx_nic *efx, unsigned int bist_mode,
unsigned int retry, i, count = 0;
size_t outlen;
u32 status;
- u8 *buf, *ptr;
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_POLL_BIST_OUT_SFT9001_LEN);
+ u8 *ptr;
int rc;
- buf = kzalloc(0x100, GFP_KERNEL);
- if (buf == NULL)
- return -ENOMEM;
-
BUILD_BUG_ON(MC_CMD_START_BIST_OUT_LEN != 0);
- MCDI_SET_DWORD(buf, START_BIST_IN_TYPE, bist_mode);
- rc = efx_mcdi_rpc(efx, MC_CMD_START_BIST, buf, MC_CMD_START_BIST_IN_LEN,
- NULL, 0, NULL);
+ MCDI_SET_DWORD(inbuf, START_BIST_IN_TYPE, bist_mode);
+ rc = efx_mcdi_rpc(efx, MC_CMD_START_BIST,
+ inbuf, MC_CMD_START_BIST_IN_LEN, NULL, 0, NULL);
if (rc)
goto out;
@@ -633,11 +638,11 @@ static int efx_mcdi_bist(struct efx_nic *efx, unsigned int bist_mode,
for (retry = 0; retry < 100; ++retry) {
BUILD_BUG_ON(MC_CMD_POLL_BIST_IN_LEN != 0);
rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0,
- buf, 0x100, &outlen);
+ outbuf, sizeof(outbuf), &outlen);
if (rc)
goto out;
- status = MCDI_DWORD(buf, POLL_BIST_OUT_RESULT);
+ status = MCDI_DWORD(outbuf, POLL_BIST_OUT_RESULT);
if (status != MC_CMD_POLL_BIST_RUNNING)
goto finished;
@@ -654,7 +659,7 @@ finished:
if (efx->phy_type == PHY_TYPE_SFT9001B &&
(bist_mode == MC_CMD_PHY_BIST_CABLE_SHORT ||
bist_mode == MC_CMD_PHY_BIST_CABLE_LONG)) {
- ptr = MCDI_PTR(buf, POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A);
+ ptr = MCDI_PTR(outbuf, POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A);
if (status == MC_CMD_POLL_BIST_PASSED &&
outlen >= MC_CMD_POLL_BIST_OUT_SFT9001_LEN) {
for (i = 0; i < 8; i++) {
@@ -668,8 +673,6 @@ finished:
rc = count;
out:
- kfree(buf);
-
return rc;
}
@@ -744,8 +747,8 @@ static const char *efx_mcdi_phy_test_name(struct efx_nic *efx,
static int efx_mcdi_phy_get_module_eeprom(struct efx_nic *efx,
struct ethtool_eeprom *ee, u8 *data)
{
- u8 outbuf[MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX];
- u8 inbuf[MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN];
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN);
size_t outlen;
int rc;
unsigned int payload_len;
@@ -785,8 +788,7 @@ static int efx_mcdi_phy_get_module_eeprom(struct efx_nic *efx,
space_remaining : payload_len;
memcpy(user_data,
- outbuf + page_off +
- MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST,
+ MCDI_PTR(outbuf, GET_PHY_MEDIA_INFO_OUT_DATA) + page_off,
to_copy);
space_remaining -= to_copy;
@@ -813,10 +815,10 @@ static int efx_mcdi_phy_get_module_info(struct efx_nic *efx,
}
}
-const struct efx_phy_operations efx_mcdi_phy_ops = {
+static const struct efx_phy_operations efx_mcdi_phy_ops = {
.probe = efx_mcdi_phy_probe,
.init = efx_port_dummy_op_int,
- .reconfigure = efx_mcdi_phy_reconfigure,
+ .reconfigure = efx_mcdi_port_reconfigure,
.poll = efx_mcdi_phy_poll,
.fini = efx_port_dummy_op_void,
.remove = efx_mcdi_phy_remove,
@@ -828,3 +830,200 @@ const struct efx_phy_operations efx_mcdi_phy_ops = {
.get_module_eeprom = efx_mcdi_phy_get_module_eeprom,
.get_module_info = efx_mcdi_phy_get_module_info,
};
+
+u32 efx_mcdi_phy_get_caps(struct efx_nic *efx)
+{
+ struct efx_mcdi_phy_data *phy_data = efx->phy_data;
+
+ return phy_data->supported_cap;
+}
+
+static unsigned int efx_mcdi_event_link_speed[] = {
+ [MCDI_EVENT_LINKCHANGE_SPEED_100M] = 100,
+ [MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000,
+ [MCDI_EVENT_LINKCHANGE_SPEED_10G] = 10000,
+ [MCDI_EVENT_LINKCHANGE_SPEED_40G] = 40000,
+};
+
+void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev)
+{
+ u32 flags, fcntl, speed, lpa;
+
+ speed = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_SPEED);
+ EFX_BUG_ON_PARANOID(speed >= ARRAY_SIZE(efx_mcdi_event_link_speed));
+ speed = efx_mcdi_event_link_speed[speed];
+
+ flags = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LINK_FLAGS);
+ fcntl = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_FCNTL);
+ lpa = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LP_CAP);
+
+ /* efx->link_state is only modified by efx_mcdi_phy_get_link(),
+ * which is only run after flushing the event queues. Therefore, it
+ * is safe to modify the link state outside of the mac_lock here.
+ */
+ efx_mcdi_phy_decode_link(efx, &efx->link_state, speed, flags, fcntl);
+
+ efx_mcdi_phy_check_fcntl(efx, lpa);
+
+ efx_link_status_changed(efx);
+}
+
+int efx_mcdi_set_mac(struct efx_nic *efx)
+{
+ u32 fcntl;
+ MCDI_DECLARE_BUF(cmdbytes, MC_CMD_SET_MAC_IN_LEN);
+
+ BUILD_BUG_ON(MC_CMD_SET_MAC_OUT_LEN != 0);
+
+ memcpy(MCDI_PTR(cmdbytes, SET_MAC_IN_ADDR),
+ efx->net_dev->dev_addr, ETH_ALEN);
+
+ MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_MTU,
+ EFX_MAX_FRAME_LEN(efx->net_dev->mtu));
+ MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_DRAIN, 0);
+
+ /* Set simple MAC filter for Siena */
+ MCDI_POPULATE_DWORD_1(cmdbytes, SET_MAC_IN_REJECT,
+ SET_MAC_IN_REJECT_UNCST, efx->unicast_filter);
+
+ switch (efx->wanted_fc) {
+ case EFX_FC_RX | EFX_FC_TX:
+ fcntl = MC_CMD_FCNTL_BIDIR;
+ break;
+ case EFX_FC_RX:
+ fcntl = MC_CMD_FCNTL_RESPOND;
+ break;
+ default:
+ fcntl = MC_CMD_FCNTL_OFF;
+ break;
+ }
+ if (efx->wanted_fc & EFX_FC_AUTO)
+ fcntl = MC_CMD_FCNTL_AUTO;
+ if (efx->fc_disable)
+ fcntl = MC_CMD_FCNTL_OFF;
+
+ MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_FCNTL, fcntl);
+
+ return efx_mcdi_rpc(efx, MC_CMD_SET_MAC, cmdbytes, sizeof(cmdbytes),
+ NULL, 0, NULL);
+}
+
+bool efx_mcdi_mac_check_fault(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
+ size_t outlength;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
+ outbuf, sizeof(outbuf), &outlength);
+ if (rc) {
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
+ __func__, rc);
+ return true;
+ }
+
+ return MCDI_DWORD(outbuf, GET_LINK_OUT_MAC_FAULT) != 0;
+}
+
+static int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
+ u32 dma_len, int enable, int clear)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN);
+ int rc;
+ int period = enable ? 1000 : 0;
+
+ BUILD_BUG_ON(MC_CMD_MAC_STATS_OUT_DMA_LEN != 0);
+
+ MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, dma_addr);
+ MCDI_POPULATE_DWORD_7(inbuf, MAC_STATS_IN_CMD,
+ MAC_STATS_IN_DMA, !!enable,
+ MAC_STATS_IN_CLEAR, clear,
+ MAC_STATS_IN_PERIODIC_CHANGE, 1,
+ MAC_STATS_IN_PERIODIC_ENABLE, !!enable,
+ MAC_STATS_IN_PERIODIC_CLEAR, 0,
+ MAC_STATS_IN_PERIODIC_NOEVENT, 1,
+ MAC_STATS_IN_PERIOD_MS, period);
+ MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ if (rc)
+ goto fail;
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: %s failed rc=%d\n",
+ __func__, enable ? "enable" : "disable", rc);
+ return rc;
+}
+
+void efx_mcdi_mac_start_stats(struct efx_nic *efx)
+{
+ __le64 *dma_stats = efx->stats_buffer.addr;
+
+ dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID;
+
+ efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr,
+ MC_CMD_MAC_NSTATS * sizeof(u64), 1, 0);
+}
+
+void efx_mcdi_mac_stop_stats(struct efx_nic *efx)
+{
+ efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 0);
+}
+
+int efx_mcdi_port_probe(struct efx_nic *efx)
+{
+ int rc;
+
+ /* Hook in PHY operations table */
+ efx->phy_op = &efx_mcdi_phy_ops;
+
+ /* Set up MDIO structure for PHY */
+ efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
+ efx->mdio.mdio_read = efx_mcdi_mdio_read;
+ efx->mdio.mdio_write = efx_mcdi_mdio_write;
+
+ /* Fill out MDIO structure, loopback modes, and initial link state */
+ rc = efx->phy_op->probe(efx);
+ if (rc != 0)
+ return rc;
+
+ /* Allocate buffer for stats */
+ rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
+ MC_CMD_MAC_NSTATS * sizeof(u64), GFP_KERNEL);
+ if (rc)
+ return rc;
+ netif_dbg(efx, probe, efx->net_dev,
+ "stats buffer at %llx (virt %p phys %llx)\n",
+ (u64)efx->stats_buffer.dma_addr,
+ efx->stats_buffer.addr,
+ (u64)virt_to_phys(efx->stats_buffer.addr));
+
+ efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 1);
+
+ return 0;
+}
+
+void efx_mcdi_port_remove(struct efx_nic *efx)
+{
+ efx->phy_op->remove(efx);
+ efx_nic_free_buffer(efx, &efx->stats_buffer);
+}
+
+/* Get physical port number (EF10 only; on Siena it is same as PF number) */
+int efx_mcdi_port_get_number(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN);
+ int rc;
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_PORT_ASSIGNMENT, NULL, 0,
+ outbuf, sizeof(outbuf), NULL);
+ if (rc)
+ return rc;
+
+ return MCDI_DWORD(outbuf, GET_PORT_ASSIGNMENT_OUT_PORT);
+}
diff --git a/drivers/net/ethernet/sfc/mdio_10g.c b/drivers/net/ethernet/sfc/mdio_10g.c
index 9acfd6696ffb..8ff954c59efa 100644
--- a/drivers/net/ethernet/sfc/mdio_10g.c
+++ b/drivers/net/ethernet/sfc/mdio_10g.c
@@ -1,5 +1,5 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2006-2011 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/net/ethernet/sfc/mdio_10g.h b/drivers/net/ethernet/sfc/mdio_10g.h
index a97dbbd2de99..16824fecc5ee 100644
--- a/drivers/net/ethernet/sfc/mdio_10g.h
+++ b/drivers/net/ethernet/sfc/mdio_10g.h
@@ -1,5 +1,5 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2006-2011 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/net/ethernet/sfc/mtd.c b/drivers/net/ethernet/sfc/mtd.c
index 08f825b71ac8..a77a8bd2dd70 100644
--- a/drivers/net/ethernet/sfc/mtd.c
+++ b/drivers/net/ethernet/sfc/mtd.c
@@ -1,194 +1,32 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2010 Solarflare Communications Inc.
+ * Copyright 2006-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
-#include <linux/bitops.h>
#include <linux/module.h>
#include <linux/mtd/mtd.h>
-#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/rtnetlink.h>
#include "net_driver.h"
-#include "spi.h"
#include "efx.h"
-#include "nic.h"
-#include "mcdi.h"
-#include "mcdi_pcol.h"
-
-#define EFX_SPI_VERIFY_BUF_LEN 16
-
-struct efx_mtd_partition {
- struct mtd_info mtd;
- union {
- struct {
- bool updating;
- u8 nvram_type;
- u16 fw_subtype;
- } mcdi;
- size_t offset;
- };
- const char *type_name;
- char name[IFNAMSIZ + 20];
-};
-
-struct efx_mtd_ops {
- int (*read)(struct mtd_info *mtd, loff_t start, size_t len,
- size_t *retlen, u8 *buffer);
- int (*erase)(struct mtd_info *mtd, loff_t start, size_t len);
- int (*write)(struct mtd_info *mtd, loff_t start, size_t len,
- size_t *retlen, const u8 *buffer);
- int (*sync)(struct mtd_info *mtd);
-};
-
-struct efx_mtd {
- struct list_head node;
- struct efx_nic *efx;
- const struct efx_spi_device *spi;
- const char *name;
- const struct efx_mtd_ops *ops;
- size_t n_parts;
- struct efx_mtd_partition part[0];
-};
-
-#define efx_for_each_partition(part, efx_mtd) \
- for ((part) = &(efx_mtd)->part[0]; \
- (part) != &(efx_mtd)->part[(efx_mtd)->n_parts]; \
- (part)++)
#define to_efx_mtd_partition(mtd) \
container_of(mtd, struct efx_mtd_partition, mtd)
-static int falcon_mtd_probe(struct efx_nic *efx);
-static int siena_mtd_probe(struct efx_nic *efx);
-
-/* SPI utilities */
-
-static int
-efx_spi_slow_wait(struct efx_mtd_partition *part, bool uninterruptible)
-{
- struct efx_mtd *efx_mtd = part->mtd.priv;
- const struct efx_spi_device *spi = efx_mtd->spi;
- struct efx_nic *efx = efx_mtd->efx;
- u8 status;
- int rc, i;
-
- /* Wait up to 4s for flash/EEPROM to finish a slow operation. */
- for (i = 0; i < 40; i++) {
- __set_current_state(uninterruptible ?
- TASK_UNINTERRUPTIBLE : TASK_INTERRUPTIBLE);
- schedule_timeout(HZ / 10);
- rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
- &status, sizeof(status));
- if (rc)
- return rc;
- if (!(status & SPI_STATUS_NRDY))
- return 0;
- if (signal_pending(current))
- return -EINTR;
- }
- pr_err("%s: timed out waiting for %s\n", part->name, efx_mtd->name);
- return -ETIMEDOUT;
-}
-
-static int
-efx_spi_unlock(struct efx_nic *efx, const struct efx_spi_device *spi)
-{
- const u8 unlock_mask = (SPI_STATUS_BP2 | SPI_STATUS_BP1 |
- SPI_STATUS_BP0);
- u8 status;
- int rc;
-
- rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
- &status, sizeof(status));
- if (rc)
- return rc;
-
- if (!(status & unlock_mask))
- return 0; /* already unlocked */
-
- rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
- if (rc)
- return rc;
- rc = falcon_spi_cmd(efx, spi, SPI_SST_EWSR, -1, NULL, NULL, 0);
- if (rc)
- return rc;
-
- status &= ~unlock_mask;
- rc = falcon_spi_cmd(efx, spi, SPI_WRSR, -1, &status,
- NULL, sizeof(status));
- if (rc)
- return rc;
- rc = falcon_spi_wait_write(efx, spi);
- if (rc)
- return rc;
-
- return 0;
-}
-
-static int
-efx_spi_erase(struct efx_mtd_partition *part, loff_t start, size_t len)
-{
- struct efx_mtd *efx_mtd = part->mtd.priv;
- const struct efx_spi_device *spi = efx_mtd->spi;
- struct efx_nic *efx = efx_mtd->efx;
- unsigned pos, block_len;
- u8 empty[EFX_SPI_VERIFY_BUF_LEN];
- u8 buffer[EFX_SPI_VERIFY_BUF_LEN];
- int rc;
-
- if (len != spi->erase_size)
- return -EINVAL;
-
- if (spi->erase_command == 0)
- return -EOPNOTSUPP;
-
- rc = efx_spi_unlock(efx, spi);
- if (rc)
- return rc;
- rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
- if (rc)
- return rc;
- rc = falcon_spi_cmd(efx, spi, spi->erase_command, start, NULL,
- NULL, 0);
- if (rc)
- return rc;
- rc = efx_spi_slow_wait(part, false);
-
- /* Verify the entire region has been wiped */
- memset(empty, 0xff, sizeof(empty));
- for (pos = 0; pos < len; pos += block_len) {
- block_len = min(len - pos, sizeof(buffer));
- rc = falcon_spi_read(efx, spi, start + pos, block_len,
- NULL, buffer);
- if (rc)
- return rc;
- if (memcmp(empty, buffer, block_len))
- return -EIO;
-
- /* Avoid locking up the system */
- cond_resched();
- if (signal_pending(current))
- return -EINTR;
- }
-
- return rc;
-}
-
/* MTD interface */
static int efx_mtd_erase(struct mtd_info *mtd, struct erase_info *erase)
{
- struct efx_mtd *efx_mtd = mtd->priv;
+ struct efx_nic *efx = mtd->priv;
int rc;
- rc = efx_mtd->ops->erase(mtd, erase->addr, erase->len);
+ rc = efx->type->mtd_erase(mtd, erase->addr, erase->len);
if (rc == 0) {
erase->state = MTD_ERASE_DONE;
} else {
@@ -202,13 +40,13 @@ static int efx_mtd_erase(struct mtd_info *mtd, struct erase_info *erase)
static void efx_mtd_sync(struct mtd_info *mtd)
{
struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
- struct efx_mtd *efx_mtd = mtd->priv;
+ struct efx_nic *efx = mtd->priv;
int rc;
- rc = efx_mtd->ops->sync(mtd);
+ rc = efx->type->mtd_sync(mtd);
if (rc)
pr_err("%s: %s sync failed (%d)\n",
- part->name, efx_mtd->name, rc);
+ part->name, part->dev_type_name, rc);
}
static void efx_mtd_remove_partition(struct efx_mtd_partition *part)
@@ -222,62 +60,44 @@ static void efx_mtd_remove_partition(struct efx_mtd_partition *part)
ssleep(1);
}
WARN_ON(rc);
+ list_del(&part->node);
}
-static void efx_mtd_remove_device(struct efx_mtd *efx_mtd)
-{
- struct efx_mtd_partition *part;
-
- efx_for_each_partition(part, efx_mtd)
- efx_mtd_remove_partition(part);
- list_del(&efx_mtd->node);
- kfree(efx_mtd);
-}
-
-static void efx_mtd_rename_device(struct efx_mtd *efx_mtd)
-{
- struct efx_mtd_partition *part;
-
- efx_for_each_partition(part, efx_mtd)
- if (efx_nic_rev(efx_mtd->efx) >= EFX_REV_SIENA_A0)
- snprintf(part->name, sizeof(part->name),
- "%s %s:%02x", efx_mtd->efx->name,
- part->type_name, part->mcdi.fw_subtype);
- else
- snprintf(part->name, sizeof(part->name),
- "%s %s", efx_mtd->efx->name,
- part->type_name);
-}
-
-static int efx_mtd_probe_device(struct efx_nic *efx, struct efx_mtd *efx_mtd)
+int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts,
+ size_t n_parts, size_t sizeof_part)
{
struct efx_mtd_partition *part;
+ size_t i;
- efx_mtd->efx = efx;
+ for (i = 0; i < n_parts; i++) {
+ part = (struct efx_mtd_partition *)((char *)parts +
+ i * sizeof_part);
- efx_mtd_rename_device(efx_mtd);
-
- efx_for_each_partition(part, efx_mtd) {
part->mtd.writesize = 1;
part->mtd.owner = THIS_MODULE;
- part->mtd.priv = efx_mtd;
+ part->mtd.priv = efx;
part->mtd.name = part->name;
part->mtd._erase = efx_mtd_erase;
- part->mtd._read = efx_mtd->ops->read;
- part->mtd._write = efx_mtd->ops->write;
+ part->mtd._read = efx->type->mtd_read;
+ part->mtd._write = efx->type->mtd_write;
part->mtd._sync = efx_mtd_sync;
+ efx->type->mtd_rename(part);
+
if (mtd_device_register(&part->mtd, NULL, 0))
goto fail;
+
+ /* Add to list in order - efx_mtd_remove() depends on this */
+ list_add_tail(&part->node, &efx->mtd_list);
}
- list_add(&efx_mtd->node, &efx->mtd_list);
return 0;
fail:
- while (part != &efx_mtd->part[0]) {
- --part;
+ while (i--) {
+ part = (struct efx_mtd_partition *)((char *)parts +
+ i * sizeof_part);
efx_mtd_remove_partition(part);
}
/* Failure is unlikely here, but probably means we're out of memory */
@@ -286,410 +106,28 @@ fail:
void efx_mtd_remove(struct efx_nic *efx)
{
- struct efx_mtd *efx_mtd, *next;
+ struct efx_mtd_partition *parts, *part, *next;
WARN_ON(efx_dev_registered(efx));
- list_for_each_entry_safe(efx_mtd, next, &efx->mtd_list, node)
- efx_mtd_remove_device(efx_mtd);
-}
-
-void efx_mtd_rename(struct efx_nic *efx)
-{
- struct efx_mtd *efx_mtd;
-
- ASSERT_RTNL();
-
- list_for_each_entry(efx_mtd, &efx->mtd_list, node)
- efx_mtd_rename_device(efx_mtd);
-}
-
-int efx_mtd_probe(struct efx_nic *efx)
-{
- if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
- return siena_mtd_probe(efx);
- else
- return falcon_mtd_probe(efx);
-}
-
-/* Implementation of MTD operations for Falcon */
-
-static int falcon_mtd_read(struct mtd_info *mtd, loff_t start,
- size_t len, size_t *retlen, u8 *buffer)
-{
- struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
- struct efx_mtd *efx_mtd = mtd->priv;
- const struct efx_spi_device *spi = efx_mtd->spi;
- struct efx_nic *efx = efx_mtd->efx;
- struct falcon_nic_data *nic_data = efx->nic_data;
- int rc;
-
- rc = mutex_lock_interruptible(&nic_data->spi_lock);
- if (rc)
- return rc;
- rc = falcon_spi_read(efx, spi, part->offset + start, len,
- retlen, buffer);
- mutex_unlock(&nic_data->spi_lock);
- return rc;
-}
-
-static int falcon_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
-{
- struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
- struct efx_mtd *efx_mtd = mtd->priv;
- struct efx_nic *efx = efx_mtd->efx;
- struct falcon_nic_data *nic_data = efx->nic_data;
- int rc;
-
- rc = mutex_lock_interruptible(&nic_data->spi_lock);
- if (rc)
- return rc;
- rc = efx_spi_erase(part, part->offset + start, len);
- mutex_unlock(&nic_data->spi_lock);
- return rc;
-}
-
-static int falcon_mtd_write(struct mtd_info *mtd, loff_t start,
- size_t len, size_t *retlen, const u8 *buffer)
-{
- struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
- struct efx_mtd *efx_mtd = mtd->priv;
- const struct efx_spi_device *spi = efx_mtd->spi;
- struct efx_nic *efx = efx_mtd->efx;
- struct falcon_nic_data *nic_data = efx->nic_data;
- int rc;
-
- rc = mutex_lock_interruptible(&nic_data->spi_lock);
- if (rc)
- return rc;
- rc = falcon_spi_write(efx, spi, part->offset + start, len,
- retlen, buffer);
- mutex_unlock(&nic_data->spi_lock);
- return rc;
-}
-
-static int falcon_mtd_sync(struct mtd_info *mtd)
-{
- struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
- struct efx_mtd *efx_mtd = mtd->priv;
- struct efx_nic *efx = efx_mtd->efx;
- struct falcon_nic_data *nic_data = efx->nic_data;
- int rc;
-
- mutex_lock(&nic_data->spi_lock);
- rc = efx_spi_slow_wait(part, true);
- mutex_unlock(&nic_data->spi_lock);
- return rc;
-}
-
-static const struct efx_mtd_ops falcon_mtd_ops = {
- .read = falcon_mtd_read,
- .erase = falcon_mtd_erase,
- .write = falcon_mtd_write,
- .sync = falcon_mtd_sync,
-};
-
-static int falcon_mtd_probe(struct efx_nic *efx)
-{
- struct falcon_nic_data *nic_data = efx->nic_data;
- struct efx_spi_device *spi;
- struct efx_mtd *efx_mtd;
- int rc = -ENODEV;
-
- ASSERT_RTNL();
-
- spi = &nic_data->spi_flash;
- if (efx_spi_present(spi) && spi->size > FALCON_FLASH_BOOTCODE_START) {
- efx_mtd = kzalloc(sizeof(*efx_mtd) + sizeof(efx_mtd->part[0]),
- GFP_KERNEL);
- if (!efx_mtd)
- return -ENOMEM;
-
- efx_mtd->spi = spi;
- efx_mtd->name = "flash";
- efx_mtd->ops = &falcon_mtd_ops;
-
- efx_mtd->n_parts = 1;
- efx_mtd->part[0].mtd.type = MTD_NORFLASH;
- efx_mtd->part[0].mtd.flags = MTD_CAP_NORFLASH;
- efx_mtd->part[0].mtd.size = spi->size - FALCON_FLASH_BOOTCODE_START;
- efx_mtd->part[0].mtd.erasesize = spi->erase_size;
- efx_mtd->part[0].offset = FALCON_FLASH_BOOTCODE_START;
- efx_mtd->part[0].type_name = "sfc_flash_bootrom";
-
- rc = efx_mtd_probe_device(efx, efx_mtd);
- if (rc) {
- kfree(efx_mtd);
- return rc;
- }
- }
-
- spi = &nic_data->spi_eeprom;
- if (efx_spi_present(spi) && spi->size > EFX_EEPROM_BOOTCONFIG_START) {
- efx_mtd = kzalloc(sizeof(*efx_mtd) + sizeof(efx_mtd->part[0]),
- GFP_KERNEL);
- if (!efx_mtd)
- return -ENOMEM;
-
- efx_mtd->spi = spi;
- efx_mtd->name = "EEPROM";
- efx_mtd->ops = &falcon_mtd_ops;
-
- efx_mtd->n_parts = 1;
- efx_mtd->part[0].mtd.type = MTD_RAM;
- efx_mtd->part[0].mtd.flags = MTD_CAP_RAM;
- efx_mtd->part[0].mtd.size =
- min(spi->size, EFX_EEPROM_BOOTCONFIG_END) -
- EFX_EEPROM_BOOTCONFIG_START;
- efx_mtd->part[0].mtd.erasesize = spi->erase_size;
- efx_mtd->part[0].offset = EFX_EEPROM_BOOTCONFIG_START;
- efx_mtd->part[0].type_name = "sfc_bootconfig";
-
- rc = efx_mtd_probe_device(efx, efx_mtd);
- if (rc) {
- kfree(efx_mtd);
- return rc;
- }
- }
-
- return rc;
-}
-
-/* Implementation of MTD operations for Siena */
-
-static int siena_mtd_read(struct mtd_info *mtd, loff_t start,
- size_t len, size_t *retlen, u8 *buffer)
-{
- struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
- struct efx_mtd *efx_mtd = mtd->priv;
- struct efx_nic *efx = efx_mtd->efx;
- loff_t offset = start;
- loff_t end = min_t(loff_t, start + len, mtd->size);
- size_t chunk;
- int rc = 0;
-
- while (offset < end) {
- chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
- rc = efx_mcdi_nvram_read(efx, part->mcdi.nvram_type, offset,
- buffer, chunk);
- if (rc)
- goto out;
- offset += chunk;
- buffer += chunk;
- }
-out:
- *retlen = offset - start;
- return rc;
-}
-
-static int siena_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
-{
- struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
- struct efx_mtd *efx_mtd = mtd->priv;
- struct efx_nic *efx = efx_mtd->efx;
- loff_t offset = start & ~((loff_t)(mtd->erasesize - 1));
- loff_t end = min_t(loff_t, start + len, mtd->size);
- size_t chunk = part->mtd.erasesize;
- int rc = 0;
-
- if (!part->mcdi.updating) {
- rc = efx_mcdi_nvram_update_start(efx, part->mcdi.nvram_type);
- if (rc)
- goto out;
- part->mcdi.updating = true;
- }
-
- /* The MCDI interface can in fact do multiple erase blocks at once;
- * but erasing may be slow, so we make multiple calls here to avoid
- * tripping the MCDI RPC timeout. */
- while (offset < end) {
- rc = efx_mcdi_nvram_erase(efx, part->mcdi.nvram_type, offset,
- chunk);
- if (rc)
- goto out;
- offset += chunk;
- }
-out:
- return rc;
-}
-
-static int siena_mtd_write(struct mtd_info *mtd, loff_t start,
- size_t len, size_t *retlen, const u8 *buffer)
-{
- struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
- struct efx_mtd *efx_mtd = mtd->priv;
- struct efx_nic *efx = efx_mtd->efx;
- loff_t offset = start;
- loff_t end = min_t(loff_t, start + len, mtd->size);
- size_t chunk;
- int rc = 0;
-
- if (!part->mcdi.updating) {
- rc = efx_mcdi_nvram_update_start(efx, part->mcdi.nvram_type);
- if (rc)
- goto out;
- part->mcdi.updating = true;
- }
-
- while (offset < end) {
- chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
- rc = efx_mcdi_nvram_write(efx, part->mcdi.nvram_type, offset,
- buffer, chunk);
- if (rc)
- goto out;
- offset += chunk;
- buffer += chunk;
- }
-out:
- *retlen = offset - start;
- return rc;
-}
-
-static int siena_mtd_sync(struct mtd_info *mtd)
-{
- struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
- struct efx_mtd *efx_mtd = mtd->priv;
- struct efx_nic *efx = efx_mtd->efx;
- int rc = 0;
-
- if (part->mcdi.updating) {
- part->mcdi.updating = false;
- rc = efx_mcdi_nvram_update_finish(efx, part->mcdi.nvram_type);
- }
-
- return rc;
-}
-
-static const struct efx_mtd_ops siena_mtd_ops = {
- .read = siena_mtd_read,
- .erase = siena_mtd_erase,
- .write = siena_mtd_write,
- .sync = siena_mtd_sync,
-};
-
-struct siena_nvram_type_info {
- int port;
- const char *name;
-};
+ if (list_empty(&efx->mtd_list))
+ return;
-static const struct siena_nvram_type_info siena_nvram_types[] = {
- [MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO] = { 0, "sfc_dummy_phy" },
- [MC_CMD_NVRAM_TYPE_MC_FW] = { 0, "sfc_mcfw" },
- [MC_CMD_NVRAM_TYPE_MC_FW_BACKUP] = { 0, "sfc_mcfw_backup" },
- [MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0] = { 0, "sfc_static_cfg" },
- [MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1] = { 1, "sfc_static_cfg" },
- [MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0] = { 0, "sfc_dynamic_cfg" },
- [MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1] = { 1, "sfc_dynamic_cfg" },
- [MC_CMD_NVRAM_TYPE_EXP_ROM] = { 0, "sfc_exp_rom" },
- [MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0] = { 0, "sfc_exp_rom_cfg" },
- [MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1] = { 1, "sfc_exp_rom_cfg" },
- [MC_CMD_NVRAM_TYPE_PHY_PORT0] = { 0, "sfc_phy_fw" },
- [MC_CMD_NVRAM_TYPE_PHY_PORT1] = { 1, "sfc_phy_fw" },
- [MC_CMD_NVRAM_TYPE_FPGA] = { 0, "sfc_fpga" },
-};
+ parts = list_first_entry(&efx->mtd_list, struct efx_mtd_partition,
+ node);
-static int siena_mtd_probe_partition(struct efx_nic *efx,
- struct efx_mtd *efx_mtd,
- unsigned int part_id,
- unsigned int type)
-{
- struct efx_mtd_partition *part = &efx_mtd->part[part_id];
- const struct siena_nvram_type_info *info;
- size_t size, erase_size;
- bool protected;
- int rc;
-
- if (type >= ARRAY_SIZE(siena_nvram_types) ||
- siena_nvram_types[type].name == NULL)
- return -ENODEV;
-
- info = &siena_nvram_types[type];
-
- if (info->port != efx_port_num(efx))
- return -ENODEV;
-
- rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected);
- if (rc)
- return rc;
- if (protected)
- return -ENODEV; /* hide it */
-
- part->mcdi.nvram_type = type;
- part->type_name = info->name;
-
- part->mtd.type = MTD_NORFLASH;
- part->mtd.flags = MTD_CAP_NORFLASH;
- part->mtd.size = size;
- part->mtd.erasesize = erase_size;
+ list_for_each_entry_safe(part, next, &efx->mtd_list, node)
+ efx_mtd_remove_partition(part);
- return 0;
+ kfree(parts);
}
-static int siena_mtd_get_fw_subtypes(struct efx_nic *efx,
- struct efx_mtd *efx_mtd)
+void efx_mtd_rename(struct efx_nic *efx)
{
struct efx_mtd_partition *part;
- uint16_t fw_subtype_list[
- MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM];
- int rc;
-
- rc = efx_mcdi_get_board_cfg(efx, NULL, fw_subtype_list, NULL);
- if (rc)
- return rc;
-
- efx_for_each_partition(part, efx_mtd)
- part->mcdi.fw_subtype = fw_subtype_list[part->mcdi.nvram_type];
-
- return 0;
-}
-
-static int siena_mtd_probe(struct efx_nic *efx)
-{
- struct efx_mtd *efx_mtd;
- int rc = -ENODEV;
- u32 nvram_types;
- unsigned int type;
ASSERT_RTNL();
- rc = efx_mcdi_nvram_types(efx, &nvram_types);
- if (rc)
- return rc;
-
- efx_mtd = kzalloc(sizeof(*efx_mtd) +
- hweight32(nvram_types) * sizeof(efx_mtd->part[0]),
- GFP_KERNEL);
- if (!efx_mtd)
- return -ENOMEM;
-
- efx_mtd->name = "Siena NVRAM manager";
-
- efx_mtd->ops = &siena_mtd_ops;
-
- type = 0;
- efx_mtd->n_parts = 0;
-
- while (nvram_types != 0) {
- if (nvram_types & 1) {
- rc = siena_mtd_probe_partition(efx, efx_mtd,
- efx_mtd->n_parts, type);
- if (rc == 0)
- efx_mtd->n_parts++;
- else if (rc != -ENODEV)
- goto fail;
- }
- type++;
- nvram_types >>= 1;
- }
-
- rc = siena_mtd_get_fw_subtypes(efx, efx_mtd);
- if (rc)
- goto fail;
-
- rc = efx_mtd_probe_device(efx, efx_mtd);
-fail:
- if (rc)
- kfree(efx_mtd);
- return rc;
+ list_for_each_entry(part, &efx->mtd_list, node)
+ efx->type->mtd_rename(part);
}
-
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index f4c7e6b67743..b172ed133055 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -1,7 +1,7 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2005-2011 Solarflare Communications Inc.
+ * Copyright 2005-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -27,9 +27,11 @@
#include <linux/mutex.h>
#include <linux/vmalloc.h>
#include <linux/i2c.h>
+#include <linux/mtd/mtd.h>
#include "enum.h"
#include "bitfield.h"
+#include "filter.h"
/**************************************************************************
*
@@ -37,7 +39,7 @@
*
**************************************************************************/
-#define EFX_DRIVER_VERSION "3.2"
+#define EFX_DRIVER_VERSION "4.0"
#ifdef DEBUG
#define EFX_BUG_ON_PARANOID(x) BUG_ON(x)
@@ -93,21 +95,36 @@ struct efx_ptp_data;
struct efx_self_tests;
/**
- * struct efx_special_buffer - An Efx special buffer
- * @addr: CPU base address of the buffer
+ * struct efx_buffer - A general-purpose DMA buffer
+ * @addr: host base address of the buffer
* @dma_addr: DMA base address of the buffer
* @len: Buffer length, in bytes
- * @index: Buffer index within controller;s buffer table
- * @entries: Number of buffer table entries
*
- * Special buffers are used for the event queues and the TX and RX
- * descriptor queues for each channel. They are *not* used for the
- * actual transmit and receive buffers.
+ * The NIC uses these buffers for its interrupt status registers and
+ * MAC stats dumps.
*/
-struct efx_special_buffer {
+struct efx_buffer {
void *addr;
dma_addr_t dma_addr;
unsigned int len;
+};
+
+/**
+ * struct efx_special_buffer - DMA buffer entered into buffer table
+ * @buf: Standard &struct efx_buffer
+ * @index: Buffer index within controller;s buffer table
+ * @entries: Number of buffer table entries
+ *
+ * The NIC has a buffer table that maps buffers of size %EFX_BUF_SIZE.
+ * Event and descriptor rings are addressed via one or more buffer
+ * table entries (and so can be physically non-contiguous, although we
+ * currently do not take advantage of that). On Falcon and Siena we
+ * have to take care of allocating and initialising the entries
+ * ourselves. On later hardware this is managed by the firmware and
+ * @index and @entries are left as 0.
+ */
+struct efx_special_buffer {
+ struct efx_buffer buf;
unsigned int index;
unsigned int entries;
};
@@ -118,6 +135,7 @@ struct efx_special_buffer {
* freed when descriptor completes
* @heap_buf: When @flags & %EFX_TX_BUF_HEAP, the associated heap buffer to be
* freed when descriptor completes.
+ * @option: When @flags & %EFX_TX_BUF_OPTION, a NIC-specific option descriptor.
* @dma_addr: DMA address of the fragment.
* @flags: Flags for allocation and DMA mapping type
* @len: Length of this fragment.
@@ -129,7 +147,10 @@ struct efx_tx_buffer {
const struct sk_buff *skb;
void *heap_buf;
};
- dma_addr_t dma_addr;
+ union {
+ efx_qword_t option;
+ dma_addr_t dma_addr;
+ };
unsigned short flags;
unsigned short len;
unsigned short unmap_len;
@@ -138,6 +159,7 @@ struct efx_tx_buffer {
#define EFX_TX_BUF_SKB 2 /* buffer is last part of skb */
#define EFX_TX_BUF_HEAP 4 /* buffer was allocated with kmalloc() */
#define EFX_TX_BUF_MAP_SINGLE 8 /* buffer was mapped with dma_map_single() */
+#define EFX_TX_BUF_OPTION 0x10 /* empty buffer for option descriptor */
/**
* struct efx_tx_queue - An Efx TX queue
@@ -169,6 +191,7 @@ struct efx_tx_buffer {
* variable indicates that the queue is empty. This is to
* avoid cache-line ping-pong between the xmit path and the
* completion path.
+ * @merge_events: Number of TX merged completion events
* @insert_count: Current insert pointer
* This is the number of buffers that have been added to the
* software ring.
@@ -205,6 +228,7 @@ struct efx_tx_queue {
/* Members used mainly on the completion path */
unsigned int read_count ____cacheline_aligned_in_smp;
unsigned int old_write_count;
+ unsigned int merge_events;
/* Members used only on the xmit path */
unsigned int insert_count ____cacheline_aligned_in_smp;
@@ -244,6 +268,7 @@ struct efx_rx_buffer {
#define EFX_RX_PKT_CSUMMED 0x0002
#define EFX_RX_PKT_DISCARD 0x0004
#define EFX_RX_PKT_TCP 0x0040
+#define EFX_RX_PKT_PREFIX_LEN 0x0080 /* length is in prefix only */
/**
* struct efx_rx_page_state - Page-based rx buffer state
@@ -271,13 +296,14 @@ struct efx_rx_page_state {
* @buffer: The software buffer ring
* @rxd: The hardware descriptor ring
* @ptr_mask: The size of the ring minus 1.
- * @enabled: Receive queue enabled indicator.
+ * @refill_enabled: Enable refill whenever fill level is low
* @flush_pending: Set when a RX flush is pending. Has the same lifetime as
* @rxq_flush_pending.
* @added_count: Number of buffers added to the receive queue.
* @notified_count: Number of buffers given to NIC (<= @added_count).
* @removed_count: Number of buffers removed from the receive queue.
- * @scatter_n: Number of buffers used by current packet
+ * @scatter_n: Used by NIC specific receive code.
+ * @scatter_len: Used by NIC specific receive code.
* @page_ring: The ring to store DMA mapped pages for reuse.
* @page_add: Counter to calculate the write pointer for the recycle ring.
* @page_remove: Counter to calculate the read pointer for the recycle ring.
@@ -302,13 +328,14 @@ struct efx_rx_queue {
struct efx_rx_buffer *buffer;
struct efx_special_buffer rxd;
unsigned int ptr_mask;
- bool enabled;
+ bool refill_enabled;
bool flush_pending;
unsigned int added_count;
unsigned int notified_count;
unsigned int removed_count;
unsigned int scatter_n;
+ unsigned int scatter_len;
struct page **page_ring;
unsigned int page_add;
unsigned int page_remove;
@@ -325,22 +352,6 @@ struct efx_rx_queue {
unsigned int slow_fill_count;
};
-/**
- * struct efx_buffer - An Efx general-purpose buffer
- * @addr: host base address of the buffer
- * @dma_addr: DMA base address of the buffer
- * @len: Buffer length, in bytes
- *
- * The NIC uses these buffers for its interrupt status registers and
- * MAC stats dumps.
- */
-struct efx_buffer {
- void *addr;
- dma_addr_t dma_addr;
- unsigned int len;
-};
-
-
enum efx_rx_alloc_method {
RX_ALLOC_METHOD_AUTO = 0,
RX_ALLOC_METHOD_SKB = 1,
@@ -357,12 +368,12 @@ enum efx_rx_alloc_method {
* @efx: Associated Efx NIC
* @channel: Channel instance number
* @type: Channel type definition
+ * @eventq_init: Event queue initialised flag
* @enabled: Channel enabled indicator
* @irq: IRQ number (MSI and MSI-X only)
* @irq_moderation: IRQ moderation value (in hardware ticks)
* @napi_dev: Net device used with NAPI
* @napi_str: NAPI control structure
- * @work_pending: Is work pending via NAPI?
* @eventq: Event queue buffer
* @eventq_mask: Event queue pointer mask
* @eventq_read_ptr: Event queue read pointer
@@ -378,6 +389,8 @@ enum efx_rx_alloc_method {
* @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun
* @n_rx_nodesc_trunc: Number of RX packets truncated and then dropped due to
* lack of descriptors
+ * @n_rx_merge_events: Number of RX merged completion events
+ * @n_rx_merge_packets: Number of RX packets completed by merged events
* @rx_pkt_n_frags: Number of fragments in next packet to be delivered by
* __efx_rx_packet(), or zero if there is none
* @rx_pkt_index: Ring index of first buffer for next packet to be delivered
@@ -389,12 +402,12 @@ struct efx_channel {
struct efx_nic *efx;
int channel;
const struct efx_channel_type *type;
+ bool eventq_init;
bool enabled;
int irq;
unsigned int irq_moderation;
struct net_device *napi_dev;
struct napi_struct napi_str;
- bool work_pending;
struct efx_special_buffer eventq;
unsigned int eventq_mask;
unsigned int eventq_read_ptr;
@@ -414,6 +427,8 @@ struct efx_channel {
unsigned n_rx_overlength;
unsigned n_skbuff_leaks;
unsigned int n_rx_nodesc_trunc;
+ unsigned int n_rx_merge_events;
+ unsigned int n_rx_merge_packets;
unsigned int rx_pkt_n_frags;
unsigned int rx_pkt_index;
@@ -423,6 +438,21 @@ struct efx_channel {
};
/**
+ * struct efx_msi_context - Context for each MSI
+ * @efx: The associated NIC
+ * @index: Index of the channel/IRQ
+ * @name: Name of the channel/IRQ
+ *
+ * Unlike &struct efx_channel, this is never reallocated and is always
+ * safe for the IRQ handler to access.
+ */
+struct efx_msi_context {
+ struct efx_nic *efx;
+ unsigned int index;
+ char name[IFNAMSIZ + 6];
+};
+
+/**
* struct efx_channel_type - distinguishes traffic and extra channels
* @handle_no_channel: Handle failure to allocate an extra channel
* @pre_probe: Set up extra state prior to initialisation
@@ -579,75 +609,17 @@ static inline bool efx_phy_mode_disabled(enum efx_phy_mode mode)
return !!(mode & ~PHY_MODE_TX_DISABLED);
}
-/*
- * Efx extended statistics
- *
- * Not all statistics are provided by all supported MACs. The purpose
- * is this structure is to contain the raw statistics provided by each
- * MAC.
+/**
+ * struct efx_hw_stat_desc - Description of a hardware statistic
+ * @name: Name of the statistic as visible through ethtool, or %NULL if
+ * it should not be exposed
+ * @dma_width: Width in bits (0 for non-DMA statistics)
+ * @offset: Offset within stats (ignored for non-DMA statistics)
*/
-struct efx_mac_stats {
- u64 tx_bytes;
- u64 tx_good_bytes;
- u64 tx_bad_bytes;
- u64 tx_packets;
- u64 tx_bad;
- u64 tx_pause;
- u64 tx_control;
- u64 tx_unicast;
- u64 tx_multicast;
- u64 tx_broadcast;
- u64 tx_lt64;
- u64 tx_64;
- u64 tx_65_to_127;
- u64 tx_128_to_255;
- u64 tx_256_to_511;
- u64 tx_512_to_1023;
- u64 tx_1024_to_15xx;
- u64 tx_15xx_to_jumbo;
- u64 tx_gtjumbo;
- u64 tx_collision;
- u64 tx_single_collision;
- u64 tx_multiple_collision;
- u64 tx_excessive_collision;
- u64 tx_deferred;
- u64 tx_late_collision;
- u64 tx_excessive_deferred;
- u64 tx_non_tcpudp;
- u64 tx_mac_src_error;
- u64 tx_ip_src_error;
- u64 rx_bytes;
- u64 rx_good_bytes;
- u64 rx_bad_bytes;
- u64 rx_packets;
- u64 rx_good;
- u64 rx_bad;
- u64 rx_pause;
- u64 rx_control;
- u64 rx_unicast;
- u64 rx_multicast;
- u64 rx_broadcast;
- u64 rx_lt64;
- u64 rx_64;
- u64 rx_65_to_127;
- u64 rx_128_to_255;
- u64 rx_256_to_511;
- u64 rx_512_to_1023;
- u64 rx_1024_to_15xx;
- u64 rx_15xx_to_jumbo;
- u64 rx_gtjumbo;
- u64 rx_bad_lt64;
- u64 rx_bad_64_to_15xx;
- u64 rx_bad_15xx_to_jumbo;
- u64 rx_bad_gtjumbo;
- u64 rx_overflow;
- u64 rx_missed;
- u64 rx_false_carrier;
- u64 rx_symbol_error;
- u64 rx_align_error;
- u64 rx_length_error;
- u64 rx_internal_error;
- u64 rx_good_lt64;
+struct efx_hw_stat_desc {
+ const char *name;
+ u16 dma_width;
+ u16 offset;
};
/* Number of bits used in a multicast filter hash address */
@@ -662,7 +634,6 @@ union efx_multicast_hash {
efx_oword_t oword[EFX_MCAST_HASH_ENTRIES / sizeof(efx_oword_t) / 8];
};
-struct efx_filter_state;
struct efx_vf;
struct vfdi_status;
@@ -672,7 +643,6 @@ struct vfdi_status;
* @pci_dev: The PCI device
* @type: Controller type attributes
* @legacy_irq: IRQ number
- * @legacy_irq_enabled: Are IRQs enabled on NIC (INT_EN_KER register)?
* @workqueue: Workqueue for port reconfigures and the HW monitor.
* Work items do not hold and must not acquire RTNL.
* @workqueue_name: Name of workqueue
@@ -689,7 +659,7 @@ struct vfdi_status;
* @tx_queue: TX DMA queues
* @rx_queue: RX DMA queues
* @channel: Channels
- * @channel_name: Names for channels and their IRQs
+ * @msi_context: Context for each MSI
* @extra_channel_types: Types of extra (non-traffic) channels that
* should be allocated for this NIC
* @rxq_entries: Size of receive queues requested by user.
@@ -707,17 +677,25 @@ struct vfdi_status;
* @rx_buffer_order: Order (log2) of number of pages for each RX buffer
* @rx_buffer_truesize: Amortised allocation size of an RX buffer,
* for use in sk_buff::truesize
+ * @rx_prefix_size: Size of RX prefix before packet data
+ * @rx_packet_hash_offset: Offset of RX flow hash from start of packet data
+ * (valid only if @rx_prefix_size != 0; always negative)
+ * @rx_packet_len_offset: Offset of RX packet length from start of packet data
+ * (valid only for NICs that set %EFX_RX_PKT_PREFIX_LEN; always negative)
* @rx_hash_key: Toeplitz hash key for RSS
* @rx_indir_table: Indirection table for RSS
* @rx_scatter: Scatter mode enabled for receives
* @int_error_count: Number of internal errors seen recently
* @int_error_expire: Time at which error count will be expired
+ * @irq_soft_enabled: Are IRQs soft-enabled? If not, IRQ handler will
+ * acknowledge but do nothing else.
* @irq_status: Interrupt status buffer
* @irq_zero_count: Number of legacy IRQs seen with queue flags == 0
* @irq_level: IRQ level/index for IRQs not triggered by an event queue
* @selftest_work: Work item for asynchronous self-test
* @mtd_list: List of MTDs attached to the NIC
* @nic_data: Hardware dependent state
+ * @mcdi: Management-Controller-to-Driver Interface state
* @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode,
* efx_monitor() and efx_reconfigure_port()
* @port_enabled: Port enabled indicator.
@@ -737,8 +715,10 @@ struct vfdi_status;
* @link_advertising: Autonegotiation advertising flags
* @link_state: Current state of the link
* @n_link_state_changes: Number of times the link has changed state
- * @promiscuous: Promiscuous flag. Protected by netif_tx_lock.
- * @multicast_hash: Multicast hash table
+ * @unicast_filter: Flag for Falcon-arch simple unicast filter.
+ * Protected by @mac_lock.
+ * @multicast_hash: Multicast hash table for Falcon-arch.
+ * Protected by @mac_lock.
* @wanted_fc: Wanted flow control flags
* @fc_disable: When non-zero flow control is disabled. Typically used to
* ensure that network back pressure doesn't delay dma queue flushes.
@@ -747,7 +727,12 @@ struct vfdi_status;
* @loopback_mode: Loopback status
* @loopback_modes: Supported loopback mode bitmask
* @loopback_selftest: Offline self-test private state
- * @drain_pending: Count of RX and TX queues that haven't been flushed and drained.
+ * @filter_lock: Filter table lock
+ * @filter_state: Architecture-dependent filter table state
+ * @rps_flow_id: Flow IDs of filters allocated for accelerated RFS,
+ * indexed by filter ID
+ * @rps_expire_index: Next index to check for expiry in @rps_flow_id
+ * @active_queues: Count of RX and TX queues that haven't been flushed and drained.
* @rxq_flush_pending: Count of number of receive queues that need to be flushed.
* Decremented when the efx_flush_rx_queue() is called.
* @rxq_flush_outstanding: Count of number of RX flushes started but not yet
@@ -771,12 +756,8 @@ struct vfdi_status;
* @last_irq_cpu: Last CPU to handle a possible test interrupt. This
* field is used by efx_test_interrupts() to verify that an
* interrupt has occurred.
- * @n_rx_nodesc_drop_cnt: RX no descriptor drop count
- * @mac_stats: MAC statistics. These include all statistics the MACs
- * can provide. Generic code converts these into a standard
- * &struct net_device_stats.
- * @stats_lock: Statistics update lock. Serialises statistics fetches
- * and access to @mac_stats.
+ * @stats_lock: Statistics update lock. Must be held when calling
+ * efx_nic_type::{update,start,stop}_stats.
*
* This is stored in the private area of the &struct net_device.
*/
@@ -788,7 +769,6 @@ struct efx_nic {
unsigned int port_num;
const struct efx_nic_type *type;
int legacy_irq;
- bool legacy_irq_enabled;
bool eeh_disabled_legacy_irq;
struct workqueue_struct *workqueue;
char workqueue_name[16];
@@ -806,7 +786,7 @@ struct efx_nic {
unsigned long reset_pending;
struct efx_channel *channel[EFX_MAX_CHANNELS];
- char channel_name[EFX_MAX_CHANNELS][IFNAMSIZ + 6];
+ struct efx_msi_context msi_context[EFX_MAX_CHANNELS];
const struct efx_channel_type *
extra_channel_type[EFX_MAX_EXTRA_CHANNELS];
@@ -819,6 +799,8 @@ struct efx_nic {
unsigned rx_dc_base;
unsigned sram_lim_qw;
unsigned next_buffer_table;
+
+ unsigned int max_channels;
unsigned n_channels;
unsigned n_rx_channels;
unsigned rss_spread;
@@ -830,6 +812,9 @@ struct efx_nic {
unsigned int rx_page_buf_step;
unsigned int rx_bufs_per_page;
unsigned int rx_pages_per_batch;
+ unsigned int rx_prefix_size;
+ int rx_packet_hash_offset;
+ int rx_packet_len_offset;
u8 rx_hash_key[40];
u32 rx_indir_table[128];
bool rx_scatter;
@@ -837,6 +822,7 @@ struct efx_nic {
unsigned int_error_count;
unsigned long int_error_expire;
+ bool irq_soft_enabled;
struct efx_buffer irq_status;
unsigned irq_zero_count;
unsigned irq_level;
@@ -847,6 +833,7 @@ struct efx_nic {
#endif
void *nic_data;
+ struct efx_mcdi_data *mcdi;
struct mutex mac_lock;
struct work_struct mac_work;
@@ -868,7 +855,7 @@ struct efx_nic {
struct efx_link_state link_state;
unsigned int n_link_state_changes;
- bool promiscuous;
+ bool unicast_filter;
union efx_multicast_hash multicast_hash;
u8 wanted_fc;
unsigned fc_disable;
@@ -879,9 +866,14 @@ struct efx_nic {
void *loopback_selftest;
- struct efx_filter_state *filter_state;
+ spinlock_t filter_lock;
+ void *filter_state;
+#ifdef CONFIG_RFS_ACCEL
+ u32 *rps_flow_id;
+ unsigned int rps_expire_index;
+#endif
- atomic_t drain_pending;
+ atomic_t active_queues;
atomic_t rxq_flush_pending;
atomic_t rxq_flush_outstanding;
wait_queue_head_t flush_wq;
@@ -907,8 +899,6 @@ struct efx_nic {
struct delayed_work monitor_work ____cacheline_aligned_in_smp;
spinlock_t biu_lock;
int last_irq_cpu;
- unsigned n_rx_nodesc_drop_cnt;
- struct efx_mac_stats mac_stats;
spinlock_t stats_lock;
};
@@ -922,8 +912,17 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
return efx->port_num;
}
+struct efx_mtd_partition {
+ struct list_head node;
+ struct mtd_info mtd;
+ const char *dev_type_name;
+ const char *type_name;
+ char name[IFNAMSIZ + 20];
+};
+
/**
* struct efx_nic_type - Efx device type definition
+ * @mem_map_size: Get memory BAR mapped size
* @probe: Probe the controller
* @remove: Free resources allocated by probe()
* @init: Initialise the controller
@@ -938,47 +937,118 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
* @probe_port: Probe the MAC and PHY
* @remove_port: Free resources allocated by probe_port()
* @handle_global_event: Handle a "global" event (may be %NULL)
+ * @fini_dmaq: Flush and finalise DMA queues (RX and TX queues)
* @prepare_flush: Prepare the hardware for flushing the DMA queues
- * @finish_flush: Clean up after flushing the DMA queues
- * @update_stats: Update statistics not provided by event handling
+ * (for Falcon architecture)
+ * @finish_flush: Clean up after flushing the DMA queues (for Falcon
+ * architecture)
+ * @describe_stats: Describe statistics for ethtool
+ * @update_stats: Update statistics not provided by event handling.
+ * Either argument may be %NULL.
* @start_stats: Start the regular fetching of statistics
* @stop_stats: Stop the regular fetching of statistics
* @set_id_led: Set state of identifying LED or revert to automatic function
* @push_irq_moderation: Apply interrupt moderation value
* @reconfigure_port: Push loopback/power/txdis changes to the MAC and PHY
+ * @prepare_enable_fc_tx: Prepare MAC to enable pause frame TX (may be %NULL)
* @reconfigure_mac: Push MAC address, MTU, flow control and filter settings
* to the hardware. Serialised by the mac_lock.
* @check_mac_fault: Check MAC fault state. True if fault present.
* @get_wol: Get WoL configuration from driver state
* @set_wol: Push WoL configuration to the NIC
* @resume_wol: Synchronise WoL state between driver and MC (e.g. after resume)
- * @test_chip: Test registers. Should use efx_nic_test_registers(), and is
+ * @test_chip: Test registers. May use efx_farch_test_registers(), and is
* expected to reset the NIC.
* @test_nvram: Test validity of NVRAM contents
+ * @mcdi_request: Send an MCDI request with the given header and SDU.
+ * The SDU length may be any value from 0 up to the protocol-
+ * defined maximum, but its buffer will be padded to a multiple
+ * of 4 bytes.
+ * @mcdi_poll_response: Test whether an MCDI response is available.
+ * @mcdi_read_response: Read the MCDI response PDU. The offset will
+ * be a multiple of 4. The length may not be, but the buffer
+ * will be padded so it is safe to round up.
+ * @mcdi_poll_reboot: Test whether the MCDI has rebooted. If so,
+ * return an appropriate error code for aborting any current
+ * request; otherwise return 0.
+ * @irq_enable_master: Enable IRQs on the NIC. Each event queue must
+ * be separately enabled after this.
+ * @irq_test_generate: Generate a test IRQ
+ * @irq_disable_non_ev: Disable non-event IRQs on the NIC. Each event
+ * queue must be separately disabled before this.
+ * @irq_handle_msi: Handle MSI for a channel. The @dev_id argument is
+ * a pointer to the &struct efx_msi_context for the channel.
+ * @irq_handle_legacy: Handle legacy interrupt. The @dev_id argument
+ * is a pointer to the &struct efx_nic.
+ * @tx_probe: Allocate resources for TX queue
+ * @tx_init: Initialise TX queue on the NIC
+ * @tx_remove: Free resources for TX queue
+ * @tx_write: Write TX descriptors and doorbell
+ * @rx_push_indir_table: Write RSS indirection table to the NIC
+ * @rx_probe: Allocate resources for RX queue
+ * @rx_init: Initialise RX queue on the NIC
+ * @rx_remove: Free resources for RX queue
+ * @rx_write: Write RX descriptors and doorbell
+ * @rx_defer_refill: Generate a refill reminder event
+ * @ev_probe: Allocate resources for event queue
+ * @ev_init: Initialise event queue on the NIC
+ * @ev_fini: Deinitialise event queue on the NIC
+ * @ev_remove: Free resources for event queue
+ * @ev_process: Process events for a queue, up to the given NAPI quota
+ * @ev_read_ack: Acknowledge read events on a queue, rearming its IRQ
+ * @ev_test_generate: Generate a test event
+ * @filter_table_probe: Probe filter capabilities and set up filter software state
+ * @filter_table_restore: Restore filters removed from hardware
+ * @filter_table_remove: Remove filters from hardware and tear down software state
+ * @filter_update_rx_scatter: Update filters after change to rx scatter setting
+ * @filter_insert: add or replace a filter
+ * @filter_remove_safe: remove a filter by ID, carefully
+ * @filter_get_safe: retrieve a filter by ID, carefully
+ * @filter_clear_rx: remove RX filters by priority
+ * @filter_count_rx_used: Get the number of filters in use at a given priority
+ * @filter_get_rx_id_limit: Get maximum value of a filter id, plus 1
+ * @filter_get_rx_ids: Get list of RX filters at a given priority
+ * @filter_rfs_insert: Add or replace a filter for RFS. This must be
+ * atomic. The hardware change may be asynchronous but should
+ * not be delayed for long. It may fail if this can't be done
+ * atomically.
+ * @filter_rfs_expire_one: Consider expiring a filter inserted for RFS.
+ * This must check whether the specified table entry is used by RFS
+ * and that rps_may_expire_flow() returns true for it.
+ * @mtd_probe: Probe and add MTD partitions associated with this net device,
+ * using efx_mtd_add()
+ * @mtd_rename: Set an MTD partition name using the net device name
+ * @mtd_read: Read from an MTD partition
+ * @mtd_erase: Erase part of an MTD partition
+ * @mtd_write: Write to an MTD partition
+ * @mtd_sync: Wait for write-back to complete on MTD partition. This
+ * also notifies the driver that a writer has finished using this
+ * partition.
* @revision: Hardware architecture revision
- * @mem_map_size: Memory BAR mapped size
* @txd_ptr_tbl_base: TX descriptor ring base address
* @rxd_ptr_tbl_base: RX descriptor ring base address
* @buf_tbl_base: Buffer table base address
* @evq_ptr_tbl_base: Event queue pointer table base address
* @evq_rptr_tbl_base: Event queue read-pointer table base address
* @max_dma_mask: Maximum possible DMA mask
- * @rx_buffer_hash_size: Size of hash at start of RX packet
+ * @rx_prefix_size: Size of RX prefix before packet data
+ * @rx_hash_offset: Offset of RX flow hash within prefix
* @rx_buffer_padding: Size of padding at end of RX packet
- * @can_rx_scatter: NIC is able to scatter packet to multiple buffers
+ * @can_rx_scatter: NIC is able to scatter packets to multiple buffers
+ * @always_rx_scatter: NIC will always scatter packets to multiple buffers
* @max_interrupt_mode: Highest capability interrupt mode supported
* from &enum efx_init_mode.
- * @phys_addr_channels: Number of channels with physically addressed
- * descriptors
* @timer_period_max: Maximum period of interrupt timer (in ticks)
* @offload_features: net_device feature flags for protocol offload
* features implemented in hardware
+ * @mcdi_max_ver: Maximum MCDI version supported
*/
struct efx_nic_type {
+ unsigned int (*mem_map_size)(struct efx_nic *efx);
int (*probe)(struct efx_nic *efx);
void (*remove)(struct efx_nic *efx);
int (*init)(struct efx_nic *efx);
- void (*dimension_resources)(struct efx_nic *efx);
+ int (*dimension_resources)(struct efx_nic *efx);
void (*fini)(struct efx_nic *efx);
void (*monitor)(struct efx_nic *efx);
enum reset_type (*map_reset_reason)(enum reset_type reason);
@@ -987,14 +1057,18 @@ struct efx_nic_type {
int (*probe_port)(struct efx_nic *efx);
void (*remove_port)(struct efx_nic *efx);
bool (*handle_global_event)(struct efx_channel *channel, efx_qword_t *);
+ int (*fini_dmaq)(struct efx_nic *efx);
void (*prepare_flush)(struct efx_nic *efx);
void (*finish_flush)(struct efx_nic *efx);
- void (*update_stats)(struct efx_nic *efx);
+ size_t (*describe_stats)(struct efx_nic *efx, u8 *names);
+ size_t (*update_stats)(struct efx_nic *efx, u64 *full_stats,
+ struct rtnl_link_stats64 *core_stats);
void (*start_stats)(struct efx_nic *efx);
void (*stop_stats)(struct efx_nic *efx);
void (*set_id_led)(struct efx_nic *efx, enum efx_led_mode mode);
void (*push_irq_moderation)(struct efx_channel *channel);
int (*reconfigure_port)(struct efx_nic *efx);
+ void (*prepare_enable_fc_tx)(struct efx_nic *efx);
int (*reconfigure_mac)(struct efx_nic *efx);
bool (*check_mac_fault)(struct efx_nic *efx);
void (*get_wol)(struct efx_nic *efx, struct ethtool_wolinfo *wol);
@@ -1002,22 +1076,90 @@ struct efx_nic_type {
void (*resume_wol)(struct efx_nic *efx);
int (*test_chip)(struct efx_nic *efx, struct efx_self_tests *tests);
int (*test_nvram)(struct efx_nic *efx);
+ void (*mcdi_request)(struct efx_nic *efx,
+ const efx_dword_t *hdr, size_t hdr_len,
+ const efx_dword_t *sdu, size_t sdu_len);
+ bool (*mcdi_poll_response)(struct efx_nic *efx);
+ void (*mcdi_read_response)(struct efx_nic *efx, efx_dword_t *pdu,
+ size_t pdu_offset, size_t pdu_len);
+ int (*mcdi_poll_reboot)(struct efx_nic *efx);
+ void (*irq_enable_master)(struct efx_nic *efx);
+ void (*irq_test_generate)(struct efx_nic *efx);
+ void (*irq_disable_non_ev)(struct efx_nic *efx);
+ irqreturn_t (*irq_handle_msi)(int irq, void *dev_id);
+ irqreturn_t (*irq_handle_legacy)(int irq, void *dev_id);
+ int (*tx_probe)(struct efx_tx_queue *tx_queue);
+ void (*tx_init)(struct efx_tx_queue *tx_queue);
+ void (*tx_remove)(struct efx_tx_queue *tx_queue);
+ void (*tx_write)(struct efx_tx_queue *tx_queue);
+ void (*rx_push_indir_table)(struct efx_nic *efx);
+ int (*rx_probe)(struct efx_rx_queue *rx_queue);
+ void (*rx_init)(struct efx_rx_queue *rx_queue);
+ void (*rx_remove)(struct efx_rx_queue *rx_queue);
+ void (*rx_write)(struct efx_rx_queue *rx_queue);
+ void (*rx_defer_refill)(struct efx_rx_queue *rx_queue);
+ int (*ev_probe)(struct efx_channel *channel);
+ int (*ev_init)(struct efx_channel *channel);
+ void (*ev_fini)(struct efx_channel *channel);
+ void (*ev_remove)(struct efx_channel *channel);
+ int (*ev_process)(struct efx_channel *channel, int quota);
+ void (*ev_read_ack)(struct efx_channel *channel);
+ void (*ev_test_generate)(struct efx_channel *channel);
+ int (*filter_table_probe)(struct efx_nic *efx);
+ void (*filter_table_restore)(struct efx_nic *efx);
+ void (*filter_table_remove)(struct efx_nic *efx);
+ void (*filter_update_rx_scatter)(struct efx_nic *efx);
+ s32 (*filter_insert)(struct efx_nic *efx,
+ struct efx_filter_spec *spec, bool replace);
+ int (*filter_remove_safe)(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id);
+ int (*filter_get_safe)(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id, struct efx_filter_spec *);
+ void (*filter_clear_rx)(struct efx_nic *efx,
+ enum efx_filter_priority priority);
+ u32 (*filter_count_rx_used)(struct efx_nic *efx,
+ enum efx_filter_priority priority);
+ u32 (*filter_get_rx_id_limit)(struct efx_nic *efx);
+ s32 (*filter_get_rx_ids)(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 *buf, u32 size);
+#ifdef CONFIG_RFS_ACCEL
+ s32 (*filter_rfs_insert)(struct efx_nic *efx,
+ struct efx_filter_spec *spec);
+ bool (*filter_rfs_expire_one)(struct efx_nic *efx, u32 flow_id,
+ unsigned int index);
+#endif
+#ifdef CONFIG_SFC_MTD
+ int (*mtd_probe)(struct efx_nic *efx);
+ void (*mtd_rename)(struct efx_mtd_partition *part);
+ int (*mtd_read)(struct mtd_info *mtd, loff_t start, size_t len,
+ size_t *retlen, u8 *buffer);
+ int (*mtd_erase)(struct mtd_info *mtd, loff_t start, size_t len);
+ int (*mtd_write)(struct mtd_info *mtd, loff_t start, size_t len,
+ size_t *retlen, const u8 *buffer);
+ int (*mtd_sync)(struct mtd_info *mtd);
+#endif
+ void (*ptp_write_host_time)(struct efx_nic *efx, u32 host_time);
int revision;
- unsigned int mem_map_size;
unsigned int txd_ptr_tbl_base;
unsigned int rxd_ptr_tbl_base;
unsigned int buf_tbl_base;
unsigned int evq_ptr_tbl_base;
unsigned int evq_rptr_tbl_base;
u64 max_dma_mask;
- unsigned int rx_buffer_hash_size;
+ unsigned int rx_prefix_size;
+ unsigned int rx_hash_offset;
unsigned int rx_buffer_padding;
bool can_rx_scatter;
+ bool always_rx_scatter;
unsigned int max_interrupt_mode;
- unsigned int phys_addr_channels;
unsigned int timer_period_max;
netdev_features_t offload_features;
+ int mcdi_max_ver;
+ unsigned int max_rx_ip_filters;
};
/**************************************************************************
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index 56ed3bc71e00..e7dbd2dd202e 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -1,7 +1,7 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2011 Solarflare Communications Inc.
+ * Copyright 2006-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -19,295 +19,22 @@
#include "bitfield.h"
#include "efx.h"
#include "nic.h"
-#include "regs.h"
+#include "farch_regs.h"
#include "io.h"
#include "workarounds.h"
/**************************************************************************
*
- * Configurable values
- *
- **************************************************************************
- */
-
-/* This is set to 16 for a good reason. In summary, if larger than
- * 16, the descriptor cache holds more than a default socket
- * buffer's worth of packets (for UDP we can only have at most one
- * socket buffer's worth outstanding). This combined with the fact
- * that we only get 1 TX event per descriptor cache means the NIC
- * goes idle.
- */
-#define TX_DC_ENTRIES 16
-#define TX_DC_ENTRIES_ORDER 1
-
-#define RX_DC_ENTRIES 64
-#define RX_DC_ENTRIES_ORDER 3
-
-/* If EFX_MAX_INT_ERRORS internal errors occur within
- * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
- * disable it.
- */
-#define EFX_INT_ERROR_EXPIRE 3600
-#define EFX_MAX_INT_ERRORS 5
-
-/* Depth of RX flush request fifo */
-#define EFX_RX_FLUSH_COUNT 4
-
-/* Driver generated events */
-#define _EFX_CHANNEL_MAGIC_TEST 0x000101
-#define _EFX_CHANNEL_MAGIC_FILL 0x000102
-#define _EFX_CHANNEL_MAGIC_RX_DRAIN 0x000103
-#define _EFX_CHANNEL_MAGIC_TX_DRAIN 0x000104
-
-#define _EFX_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data))
-#define _EFX_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8)
-
-#define EFX_CHANNEL_MAGIC_TEST(_channel) \
- _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TEST, (_channel)->channel)
-#define EFX_CHANNEL_MAGIC_FILL(_rx_queue) \
- _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_FILL, \
- efx_rx_queue_index(_rx_queue))
-#define EFX_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \
- _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_RX_DRAIN, \
- efx_rx_queue_index(_rx_queue))
-#define EFX_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \
- _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \
- (_tx_queue)->queue)
-
-static void efx_magic_event(struct efx_channel *channel, u32 magic);
-
-/**************************************************************************
- *
- * Solarstorm hardware access
- *
- **************************************************************************/
-
-static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value,
- unsigned int index)
-{
- efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base,
- value, index);
-}
-
-/* Read the current event from the event queue */
-static inline efx_qword_t *efx_event(struct efx_channel *channel,
- unsigned int index)
-{
- return ((efx_qword_t *) (channel->eventq.addr)) +
- (index & channel->eventq_mask);
-}
-
-/* See if an event is present
- *
- * We check both the high and low dword of the event for all ones. We
- * wrote all ones when we cleared the event, and no valid event can
- * have all ones in either its high or low dwords. This approach is
- * robust against reordering.
- *
- * Note that using a single 64-bit comparison is incorrect; even
- * though the CPU read will be atomic, the DMA write may not be.
- */
-static inline int efx_event_present(efx_qword_t *event)
-{
- return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
- EFX_DWORD_IS_ALL_ONES(event->dword[1]));
-}
-
-static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b,
- const efx_oword_t *mask)
-{
- return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) ||
- ((a->u64[1] ^ b->u64[1]) & mask->u64[1]);
-}
-
-int efx_nic_test_registers(struct efx_nic *efx,
- const struct efx_nic_register_test *regs,
- size_t n_regs)
-{
- unsigned address = 0, i, j;
- efx_oword_t mask, imask, original, reg, buf;
-
- for (i = 0; i < n_regs; ++i) {
- address = regs[i].address;
- mask = imask = regs[i].mask;
- EFX_INVERT_OWORD(imask);
-
- efx_reado(efx, &original, address);
-
- /* bit sweep on and off */
- for (j = 0; j < 128; j++) {
- if (!EFX_EXTRACT_OWORD32(mask, j, j))
- continue;
-
- /* Test this testable bit can be set in isolation */
- EFX_AND_OWORD(reg, original, mask);
- EFX_SET_OWORD32(reg, j, j, 1);
-
- efx_writeo(efx, &reg, address);
- efx_reado(efx, &buf, address);
-
- if (efx_masked_compare_oword(&reg, &buf, &mask))
- goto fail;
-
- /* Test this testable bit can be cleared in isolation */
- EFX_OR_OWORD(reg, original, mask);
- EFX_SET_OWORD32(reg, j, j, 0);
-
- efx_writeo(efx, &reg, address);
- efx_reado(efx, &buf, address);
-
- if (efx_masked_compare_oword(&reg, &buf, &mask))
- goto fail;
- }
-
- efx_writeo(efx, &original, address);
- }
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev,
- "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT
- " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg),
- EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask));
- return -EIO;
-}
-
-/**************************************************************************
- *
- * Special buffer handling
- * Special buffers are used for event queues and the TX and RX
- * descriptor rings.
- *
- *************************************************************************/
-
-/*
- * Initialise a special buffer
- *
- * This will define a buffer (previously allocated via
- * efx_alloc_special_buffer()) in the buffer table, allowing
- * it to be used for event queues, descriptor rings etc.
- */
-static void
-efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
-{
- efx_qword_t buf_desc;
- unsigned int index;
- dma_addr_t dma_addr;
- int i;
-
- EFX_BUG_ON_PARANOID(!buffer->addr);
-
- /* Write buffer descriptors to NIC */
- for (i = 0; i < buffer->entries; i++) {
- index = buffer->index + i;
- dma_addr = buffer->dma_addr + (i * EFX_BUF_SIZE);
- netif_dbg(efx, probe, efx->net_dev,
- "mapping special buffer %d at %llx\n",
- index, (unsigned long long)dma_addr);
- EFX_POPULATE_QWORD_3(buf_desc,
- FRF_AZ_BUF_ADR_REGION, 0,
- FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12,
- FRF_AZ_BUF_OWNER_ID_FBUF, 0);
- efx_write_buf_tbl(efx, &buf_desc, index);
- }
-}
-
-/* Unmaps a buffer and clears the buffer table entries */
-static void
-efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
-{
- efx_oword_t buf_tbl_upd;
- unsigned int start = buffer->index;
- unsigned int end = (buffer->index + buffer->entries - 1);
-
- if (!buffer->entries)
- return;
-
- netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n",
- buffer->index, buffer->index + buffer->entries - 1);
-
- EFX_POPULATE_OWORD_4(buf_tbl_upd,
- FRF_AZ_BUF_UPD_CMD, 0,
- FRF_AZ_BUF_CLR_CMD, 1,
- FRF_AZ_BUF_CLR_END_ID, end,
- FRF_AZ_BUF_CLR_START_ID, start);
- efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD);
-}
-
-/*
- * Allocate a new special buffer
- *
- * This allocates memory for a new buffer, clears it and allocates a
- * new buffer ID range. It does not write into the buffer table.
- *
- * This call will allocate 4KB buffers, since 8KB buffers can't be
- * used for event queues and descriptor rings.
- */
-static int efx_alloc_special_buffer(struct efx_nic *efx,
- struct efx_special_buffer *buffer,
- unsigned int len)
-{
- len = ALIGN(len, EFX_BUF_SIZE);
-
- buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
- &buffer->dma_addr, GFP_KERNEL);
- if (!buffer->addr)
- return -ENOMEM;
- buffer->len = len;
- buffer->entries = len / EFX_BUF_SIZE;
- BUG_ON(buffer->dma_addr & (EFX_BUF_SIZE - 1));
-
- /* Select new buffer ID */
- buffer->index = efx->next_buffer_table;
- efx->next_buffer_table += buffer->entries;
-#ifdef CONFIG_SFC_SRIOV
- BUG_ON(efx_sriov_enabled(efx) &&
- efx->vf_buftbl_base < efx->next_buffer_table);
-#endif
-
- netif_dbg(efx, probe, efx->net_dev,
- "allocating special buffers %d-%d at %llx+%x "
- "(virt %p phys %llx)\n", buffer->index,
- buffer->index + buffer->entries - 1,
- (u64)buffer->dma_addr, len,
- buffer->addr, (u64)virt_to_phys(buffer->addr));
-
- return 0;
-}
-
-static void
-efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
-{
- if (!buffer->addr)
- return;
-
- netif_dbg(efx, hw, efx->net_dev,
- "deallocating special buffers %d-%d at %llx+%x "
- "(virt %p phys %llx)\n", buffer->index,
- buffer->index + buffer->entries - 1,
- (u64)buffer->dma_addr, buffer->len,
- buffer->addr, (u64)virt_to_phys(buffer->addr));
-
- dma_free_coherent(&efx->pci_dev->dev, buffer->len, buffer->addr,
- buffer->dma_addr);
- buffer->addr = NULL;
- buffer->entries = 0;
-}
-
-/**************************************************************************
- *
* Generic buffer handling
* These buffers are used for interrupt status, MAC stats, etc.
*
**************************************************************************/
int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
- unsigned int len)
+ unsigned int len, gfp_t gfp_flags)
{
- buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
- &buffer->dma_addr,
- GFP_ATOMIC | __GFP_ZERO);
+ buffer->addr = dma_zalloc_coherent(&efx->pci_dev->dev, len,
+ &buffer->dma_addr, gfp_flags);
if (!buffer->addr)
return -ENOMEM;
buffer->len = len;
@@ -323,1057 +50,6 @@ void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer)
}
}
-/**************************************************************************
- *
- * TX path
- *
- **************************************************************************/
-
-/* Returns a pointer to the specified transmit descriptor in the TX
- * descriptor queue belonging to the specified channel.
- */
-static inline efx_qword_t *
-efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
-{
- return ((efx_qword_t *) (tx_queue->txd.addr)) + index;
-}
-
-/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
-static inline void efx_notify_tx_desc(struct efx_tx_queue *tx_queue)
-{
- unsigned write_ptr;
- efx_dword_t reg;
-
- write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
- EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr);
- efx_writed_page(tx_queue->efx, &reg,
- FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
-}
-
-/* Write pointer and first descriptor for TX descriptor ring */
-static inline void efx_push_tx_desc(struct efx_tx_queue *tx_queue,
- const efx_qword_t *txd)
-{
- unsigned write_ptr;
- efx_oword_t reg;
-
- BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0);
- BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0);
-
- write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
- EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true,
- FRF_AZ_TX_DESC_WPTR, write_ptr);
- reg.qword[0] = *txd;
- efx_writeo_page(tx_queue->efx, &reg,
- FR_BZ_TX_DESC_UPD_P0, tx_queue->queue);
-}
-
-static inline bool
-efx_may_push_tx_desc(struct efx_tx_queue *tx_queue, unsigned int write_count)
-{
- unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
-
- if (empty_read_count == 0)
- return false;
-
- tx_queue->empty_read_count = 0;
- return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0
- && tx_queue->write_count - write_count == 1;
-}
-
-/* For each entry inserted into the software descriptor ring, create a
- * descriptor in the hardware TX descriptor ring (in host memory), and
- * write a doorbell.
- */
-void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
-{
-
- struct efx_tx_buffer *buffer;
- efx_qword_t *txd;
- unsigned write_ptr;
- unsigned old_write_count = tx_queue->write_count;
-
- BUG_ON(tx_queue->write_count == tx_queue->insert_count);
-
- do {
- write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
- buffer = &tx_queue->buffer[write_ptr];
- txd = efx_tx_desc(tx_queue, write_ptr);
- ++tx_queue->write_count;
-
- /* Create TX descriptor ring entry */
- BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
- EFX_POPULATE_QWORD_4(*txd,
- FSF_AZ_TX_KER_CONT,
- buffer->flags & EFX_TX_BUF_CONT,
- FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
- FSF_AZ_TX_KER_BUF_REGION, 0,
- FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
- } while (tx_queue->write_count != tx_queue->insert_count);
-
- wmb(); /* Ensure descriptors are written before they are fetched */
-
- if (efx_may_push_tx_desc(tx_queue, old_write_count)) {
- txd = efx_tx_desc(tx_queue,
- old_write_count & tx_queue->ptr_mask);
- efx_push_tx_desc(tx_queue, txd);
- ++tx_queue->pushes;
- } else {
- efx_notify_tx_desc(tx_queue);
- }
-}
-
-/* Allocate hardware resources for a TX queue */
-int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
-{
- struct efx_nic *efx = tx_queue->efx;
- unsigned entries;
-
- entries = tx_queue->ptr_mask + 1;
- return efx_alloc_special_buffer(efx, &tx_queue->txd,
- entries * sizeof(efx_qword_t));
-}
-
-void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
-{
- struct efx_nic *efx = tx_queue->efx;
- efx_oword_t reg;
-
- /* Pin TX descriptor ring */
- efx_init_special_buffer(efx, &tx_queue->txd);
-
- /* Push TX descriptor ring to card */
- EFX_POPULATE_OWORD_10(reg,
- FRF_AZ_TX_DESCQ_EN, 1,
- FRF_AZ_TX_ISCSI_DDIG_EN, 0,
- FRF_AZ_TX_ISCSI_HDIG_EN, 0,
- FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
- FRF_AZ_TX_DESCQ_EVQ_ID,
- tx_queue->channel->channel,
- FRF_AZ_TX_DESCQ_OWNER_ID, 0,
- FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue,
- FRF_AZ_TX_DESCQ_SIZE,
- __ffs(tx_queue->txd.entries),
- FRF_AZ_TX_DESCQ_TYPE, 0,
- FRF_BZ_TX_NON_IP_DROP_DIS, 1);
-
- if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
- int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
- EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
- EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS,
- !csum);
- }
-
- efx_writeo_table(efx, &reg, efx->type->txd_ptr_tbl_base,
- tx_queue->queue);
-
- if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
- /* Only 128 bits in this register */
- BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128);
-
- efx_reado(efx, &reg, FR_AA_TX_CHKSM_CFG);
- if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
- __clear_bit_le(tx_queue->queue, &reg);
- else
- __set_bit_le(tx_queue->queue, &reg);
- efx_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG);
- }
-
- if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
- EFX_POPULATE_OWORD_1(reg,
- FRF_BZ_TX_PACE,
- (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
- FFE_BZ_TX_PACE_OFF :
- FFE_BZ_TX_PACE_RESERVED);
- efx_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL,
- tx_queue->queue);
- }
-}
-
-static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue)
-{
- struct efx_nic *efx = tx_queue->efx;
- efx_oword_t tx_flush_descq;
-
- WARN_ON(atomic_read(&tx_queue->flush_outstanding));
- atomic_set(&tx_queue->flush_outstanding, 1);
-
- EFX_POPULATE_OWORD_2(tx_flush_descq,
- FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
- FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
- efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ);
-}
-
-void efx_nic_fini_tx(struct efx_tx_queue *tx_queue)
-{
- struct efx_nic *efx = tx_queue->efx;
- efx_oword_t tx_desc_ptr;
-
- /* Remove TX descriptor ring from card */
- EFX_ZERO_OWORD(tx_desc_ptr);
- efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
- tx_queue->queue);
-
- /* Unpin TX descriptor ring */
- efx_fini_special_buffer(efx, &tx_queue->txd);
-}
-
-/* Free buffers backing TX queue */
-void efx_nic_remove_tx(struct efx_tx_queue *tx_queue)
-{
- efx_free_special_buffer(tx_queue->efx, &tx_queue->txd);
-}
-
-/**************************************************************************
- *
- * RX path
- *
- **************************************************************************/
-
-/* Returns a pointer to the specified descriptor in the RX descriptor queue */
-static inline efx_qword_t *
-efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
-{
- return ((efx_qword_t *) (rx_queue->rxd.addr)) + index;
-}
-
-/* This creates an entry in the RX descriptor queue */
-static inline void
-efx_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index)
-{
- struct efx_rx_buffer *rx_buf;
- efx_qword_t *rxd;
-
- rxd = efx_rx_desc(rx_queue, index);
- rx_buf = efx_rx_buffer(rx_queue, index);
- EFX_POPULATE_QWORD_3(*rxd,
- FSF_AZ_RX_KER_BUF_SIZE,
- rx_buf->len -
- rx_queue->efx->type->rx_buffer_padding,
- FSF_AZ_RX_KER_BUF_REGION, 0,
- FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
-}
-
-/* This writes to the RX_DESC_WPTR register for the specified receive
- * descriptor ring.
- */
-void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue)
-{
- struct efx_nic *efx = rx_queue->efx;
- efx_dword_t reg;
- unsigned write_ptr;
-
- while (rx_queue->notified_count != rx_queue->added_count) {
- efx_build_rx_desc(
- rx_queue,
- rx_queue->notified_count & rx_queue->ptr_mask);
- ++rx_queue->notified_count;
- }
-
- wmb();
- write_ptr = rx_queue->added_count & rx_queue->ptr_mask;
- EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr);
- efx_writed_page(efx, &reg, FR_AZ_RX_DESC_UPD_DWORD_P0,
- efx_rx_queue_index(rx_queue));
-}
-
-int efx_nic_probe_rx(struct efx_rx_queue *rx_queue)
-{
- struct efx_nic *efx = rx_queue->efx;
- unsigned entries;
-
- entries = rx_queue->ptr_mask + 1;
- return efx_alloc_special_buffer(efx, &rx_queue->rxd,
- entries * sizeof(efx_qword_t));
-}
-
-void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
-{
- efx_oword_t rx_desc_ptr;
- struct efx_nic *efx = rx_queue->efx;
- bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0;
- bool iscsi_digest_en = is_b0;
- bool jumbo_en;
-
- /* For kernel-mode queues in Falcon A1, the JUMBO flag enables
- * DMA to continue after a PCIe page boundary (and scattering
- * is not possible). In Falcon B0 and Siena, it enables
- * scatter.
- */
- jumbo_en = !is_b0 || efx->rx_scatter;
-
- netif_dbg(efx, hw, efx->net_dev,
- "RX queue %d ring in special buffers %d-%d\n",
- efx_rx_queue_index(rx_queue), rx_queue->rxd.index,
- rx_queue->rxd.index + rx_queue->rxd.entries - 1);
-
- rx_queue->scatter_n = 0;
-
- /* Pin RX descriptor ring */
- efx_init_special_buffer(efx, &rx_queue->rxd);
-
- /* Push RX descriptor ring to card */
- EFX_POPULATE_OWORD_10(rx_desc_ptr,
- FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en,
- FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en,
- FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
- FRF_AZ_RX_DESCQ_EVQ_ID,
- efx_rx_queue_channel(rx_queue)->channel,
- FRF_AZ_RX_DESCQ_OWNER_ID, 0,
- FRF_AZ_RX_DESCQ_LABEL,
- efx_rx_queue_index(rx_queue),
- FRF_AZ_RX_DESCQ_SIZE,
- __ffs(rx_queue->rxd.entries),
- FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
- FRF_AZ_RX_DESCQ_JUMBO, jumbo_en,
- FRF_AZ_RX_DESCQ_EN, 1);
- efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
- efx_rx_queue_index(rx_queue));
-}
-
-static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue)
-{
- struct efx_nic *efx = rx_queue->efx;
- efx_oword_t rx_flush_descq;
-
- EFX_POPULATE_OWORD_2(rx_flush_descq,
- FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
- FRF_AZ_RX_FLUSH_DESCQ,
- efx_rx_queue_index(rx_queue));
- efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ);
-}
-
-void efx_nic_fini_rx(struct efx_rx_queue *rx_queue)
-{
- efx_oword_t rx_desc_ptr;
- struct efx_nic *efx = rx_queue->efx;
-
- /* Remove RX descriptor ring from card */
- EFX_ZERO_OWORD(rx_desc_ptr);
- efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
- efx_rx_queue_index(rx_queue));
-
- /* Unpin RX descriptor ring */
- efx_fini_special_buffer(efx, &rx_queue->rxd);
-}
-
-/* Free buffers backing RX queue */
-void efx_nic_remove_rx(struct efx_rx_queue *rx_queue)
-{
- efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd);
-}
-
-/**************************************************************************
- *
- * Flush handling
- *
- **************************************************************************/
-
-/* efx_nic_flush_queues() must be woken up when all flushes are completed,
- * or more RX flushes can be kicked off.
- */
-static bool efx_flush_wake(struct efx_nic *efx)
-{
- /* Ensure that all updates are visible to efx_nic_flush_queues() */
- smp_mb();
-
- return (atomic_read(&efx->drain_pending) == 0 ||
- (atomic_read(&efx->rxq_flush_outstanding) < EFX_RX_FLUSH_COUNT
- && atomic_read(&efx->rxq_flush_pending) > 0));
-}
-
-static bool efx_check_tx_flush_complete(struct efx_nic *efx)
-{
- bool i = true;
- efx_oword_t txd_ptr_tbl;
- struct efx_channel *channel;
- struct efx_tx_queue *tx_queue;
-
- efx_for_each_channel(channel, efx) {
- efx_for_each_channel_tx_queue(tx_queue, channel) {
- efx_reado_table(efx, &txd_ptr_tbl,
- FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue);
- if (EFX_OWORD_FIELD(txd_ptr_tbl,
- FRF_AZ_TX_DESCQ_FLUSH) ||
- EFX_OWORD_FIELD(txd_ptr_tbl,
- FRF_AZ_TX_DESCQ_EN)) {
- netif_dbg(efx, hw, efx->net_dev,
- "flush did not complete on TXQ %d\n",
- tx_queue->queue);
- i = false;
- } else if (atomic_cmpxchg(&tx_queue->flush_outstanding,
- 1, 0)) {
- /* The flush is complete, but we didn't
- * receive a flush completion event
- */
- netif_dbg(efx, hw, efx->net_dev,
- "flush complete on TXQ %d, so drain "
- "the queue\n", tx_queue->queue);
- /* Don't need to increment drain_pending as it
- * has already been incremented for the queues
- * which did not drain
- */
- efx_magic_event(channel,
- EFX_CHANNEL_MAGIC_TX_DRAIN(
- tx_queue));
- }
- }
- }
-
- return i;
-}
-
-/* Flush all the transmit queues, and continue flushing receive queues until
- * they're all flushed. Wait for the DRAIN events to be recieved so that there
- * are no more RX and TX events left on any channel. */
-int efx_nic_flush_queues(struct efx_nic *efx)
-{
- unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */
- struct efx_channel *channel;
- struct efx_rx_queue *rx_queue;
- struct efx_tx_queue *tx_queue;
- int rc = 0;
-
- efx->type->prepare_flush(efx);
-
- efx_for_each_channel(channel, efx) {
- efx_for_each_channel_tx_queue(tx_queue, channel) {
- atomic_inc(&efx->drain_pending);
- efx_flush_tx_queue(tx_queue);
- }
- efx_for_each_channel_rx_queue(rx_queue, channel) {
- atomic_inc(&efx->drain_pending);
- rx_queue->flush_pending = true;
- atomic_inc(&efx->rxq_flush_pending);
- }
- }
-
- while (timeout && atomic_read(&efx->drain_pending) > 0) {
- /* If SRIOV is enabled, then offload receive queue flushing to
- * the firmware (though we will still have to poll for
- * completion). If that fails, fall back to the old scheme.
- */
- if (efx_sriov_enabled(efx)) {
- rc = efx_mcdi_flush_rxqs(efx);
- if (!rc)
- goto wait;
- }
-
- /* The hardware supports four concurrent rx flushes, each of
- * which may need to be retried if there is an outstanding
- * descriptor fetch
- */
- efx_for_each_channel(channel, efx) {
- efx_for_each_channel_rx_queue(rx_queue, channel) {
- if (atomic_read(&efx->rxq_flush_outstanding) >=
- EFX_RX_FLUSH_COUNT)
- break;
-
- if (rx_queue->flush_pending) {
- rx_queue->flush_pending = false;
- atomic_dec(&efx->rxq_flush_pending);
- atomic_inc(&efx->rxq_flush_outstanding);
- efx_flush_rx_queue(rx_queue);
- }
- }
- }
-
- wait:
- timeout = wait_event_timeout(efx->flush_wq, efx_flush_wake(efx),
- timeout);
- }
-
- if (atomic_read(&efx->drain_pending) &&
- !efx_check_tx_flush_complete(efx)) {
- netif_err(efx, hw, efx->net_dev, "failed to flush %d queues "
- "(rx %d+%d)\n", atomic_read(&efx->drain_pending),
- atomic_read(&efx->rxq_flush_outstanding),
- atomic_read(&efx->rxq_flush_pending));
- rc = -ETIMEDOUT;
-
- atomic_set(&efx->drain_pending, 0);
- atomic_set(&efx->rxq_flush_pending, 0);
- atomic_set(&efx->rxq_flush_outstanding, 0);
- }
-
- efx->type->finish_flush(efx);
-
- return rc;
-}
-
-/**************************************************************************
- *
- * Event queue processing
- * Event queues are processed by per-channel tasklets.
- *
- **************************************************************************/
-
-/* Update a channel's event queue's read pointer (RPTR) register
- *
- * This writes the EVQ_RPTR_REG register for the specified channel's
- * event queue.
- */
-void efx_nic_eventq_read_ack(struct efx_channel *channel)
-{
- efx_dword_t reg;
- struct efx_nic *efx = channel->efx;
-
- EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR,
- channel->eventq_read_ptr & channel->eventq_mask);
-
- /* For Falcon A1, EVQ_RPTR_KER is documented as having a step size
- * of 4 bytes, but it is really 16 bytes just like later revisions.
- */
- efx_writed(efx, &reg,
- efx->type->evq_rptr_tbl_base +
- FR_BZ_EVQ_RPTR_STEP * channel->channel);
-}
-
-/* Use HW to insert a SW defined event */
-void efx_generate_event(struct efx_nic *efx, unsigned int evq,
- efx_qword_t *event)
-{
- efx_oword_t drv_ev_reg;
-
- BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 ||
- FRF_AZ_DRV_EV_DATA_WIDTH != 64);
- drv_ev_reg.u32[0] = event->u32[0];
- drv_ev_reg.u32[1] = event->u32[1];
- drv_ev_reg.u32[2] = 0;
- drv_ev_reg.u32[3] = 0;
- EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, evq);
- efx_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV);
-}
-
-static void efx_magic_event(struct efx_channel *channel, u32 magic)
-{
- efx_qword_t event;
-
- EFX_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE,
- FSE_AZ_EV_CODE_DRV_GEN_EV,
- FSF_AZ_DRV_GEN_EV_MAGIC, magic);
- efx_generate_event(channel->efx, channel->channel, &event);
-}
-
-/* Handle a transmit completion event
- *
- * The NIC batches TX completion events; the message we receive is of
- * the form "complete all TX events up to this index".
- */
-static int
-efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
-{
- unsigned int tx_ev_desc_ptr;
- unsigned int tx_ev_q_label;
- struct efx_tx_queue *tx_queue;
- struct efx_nic *efx = channel->efx;
- int tx_packets = 0;
-
- if (unlikely(ACCESS_ONCE(efx->reset_pending)))
- return 0;
-
- if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
- /* Transmit completion */
- tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
- tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
- tx_queue = efx_channel_get_tx_queue(
- channel, tx_ev_q_label % EFX_TXQ_TYPES);
- tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) &
- tx_queue->ptr_mask);
- efx_xmit_done(tx_queue, tx_ev_desc_ptr);
- } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
- /* Rewrite the FIFO write pointer */
- tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
- tx_queue = efx_channel_get_tx_queue(
- channel, tx_ev_q_label % EFX_TXQ_TYPES);
-
- netif_tx_lock(efx->net_dev);
- efx_notify_tx_desc(tx_queue);
- netif_tx_unlock(efx->net_dev);
- } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) &&
- EFX_WORKAROUND_10727(efx)) {
- efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
- } else {
- netif_err(efx, tx_err, efx->net_dev,
- "channel %d unexpected TX event "
- EFX_QWORD_FMT"\n", channel->channel,
- EFX_QWORD_VAL(*event));
- }
-
- return tx_packets;
-}
-
-/* Detect errors included in the rx_evt_pkt_ok bit. */
-static u16 efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
- const efx_qword_t *event)
-{
- struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
- struct efx_nic *efx = rx_queue->efx;
- bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
- bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
- bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
- bool rx_ev_other_err, rx_ev_pause_frm;
- bool rx_ev_hdr_type, rx_ev_mcast_pkt;
- unsigned rx_ev_pkt_type;
-
- rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
- rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
- rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC);
- rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE);
- rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event,
- FSF_AZ_RX_EV_BUF_OWNER_ID_ERR);
- rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event,
- FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR);
- rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event,
- FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR);
- rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR);
- rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC);
- rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ?
- 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB));
- rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR);
-
- /* Every error apart from tobe_disc and pause_frm */
- rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
- rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
- rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
-
- /* Count errors that are not in MAC stats. Ignore expected
- * checksum errors during self-test. */
- if (rx_ev_frm_trunc)
- ++channel->n_rx_frm_trunc;
- else if (rx_ev_tobe_disc)
- ++channel->n_rx_tobe_disc;
- else if (!efx->loopback_selftest) {
- if (rx_ev_ip_hdr_chksum_err)
- ++channel->n_rx_ip_hdr_chksum_err;
- else if (rx_ev_tcp_udp_chksum_err)
- ++channel->n_rx_tcp_udp_chksum_err;
- }
-
- /* TOBE_DISC is expected on unicast mismatches; don't print out an
- * error message. FRM_TRUNC indicates RXDP dropped the packet due
- * to a FIFO overflow.
- */
-#ifdef DEBUG
- if (rx_ev_other_err && net_ratelimit()) {
- netif_dbg(efx, rx_err, efx->net_dev,
- " RX queue %d unexpected RX event "
- EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
- efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event),
- rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
- rx_ev_ip_hdr_chksum_err ?
- " [IP_HDR_CHKSUM_ERR]" : "",
- rx_ev_tcp_udp_chksum_err ?
- " [TCP_UDP_CHKSUM_ERR]" : "",
- rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
- rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
- rx_ev_drib_nib ? " [DRIB_NIB]" : "",
- rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
- rx_ev_pause_frm ? " [PAUSE]" : "");
- }
-#endif
-
- /* The frame must be discarded if any of these are true. */
- return (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
- rx_ev_tobe_disc | rx_ev_pause_frm) ?
- EFX_RX_PKT_DISCARD : 0;
-}
-
-/* Handle receive events that are not in-order. Return true if this
- * can be handled as a partial packet discard, false if it's more
- * serious.
- */
-static bool
-efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
-{
- struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
- struct efx_nic *efx = rx_queue->efx;
- unsigned expected, dropped;
-
- if (rx_queue->scatter_n &&
- index == ((rx_queue->removed_count + rx_queue->scatter_n - 1) &
- rx_queue->ptr_mask)) {
- ++channel->n_rx_nodesc_trunc;
- return true;
- }
-
- expected = rx_queue->removed_count & rx_queue->ptr_mask;
- dropped = (index - expected) & rx_queue->ptr_mask;
- netif_info(efx, rx_err, efx->net_dev,
- "dropped %d events (index=%d expected=%d)\n",
- dropped, index, expected);
-
- efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ?
- RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
- return false;
-}
-
-/* Handle a packet received event
- *
- * The NIC gives a "discard" flag if it's a unicast packet with the
- * wrong destination address
- * Also "is multicast" and "matches multicast filter" flags can be used to
- * discard non-matching multicast packets.
- */
-static void
-efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
-{
- unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
- unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
- unsigned expected_ptr;
- bool rx_ev_pkt_ok, rx_ev_sop, rx_ev_cont;
- u16 flags;
- struct efx_rx_queue *rx_queue;
- struct efx_nic *efx = channel->efx;
-
- if (unlikely(ACCESS_ONCE(efx->reset_pending)))
- return;
-
- rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT);
- rx_ev_sop = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP);
- WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
- channel->channel);
-
- rx_queue = efx_channel_get_rx_queue(channel);
-
- rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
- expected_ptr = ((rx_queue->removed_count + rx_queue->scatter_n) &
- rx_queue->ptr_mask);
-
- /* Check for partial drops and other errors */
- if (unlikely(rx_ev_desc_ptr != expected_ptr) ||
- unlikely(rx_ev_sop != (rx_queue->scatter_n == 0))) {
- if (rx_ev_desc_ptr != expected_ptr &&
- !efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr))
- return;
-
- /* Discard all pending fragments */
- if (rx_queue->scatter_n) {
- efx_rx_packet(
- rx_queue,
- rx_queue->removed_count & rx_queue->ptr_mask,
- rx_queue->scatter_n, 0, EFX_RX_PKT_DISCARD);
- rx_queue->removed_count += rx_queue->scatter_n;
- rx_queue->scatter_n = 0;
- }
-
- /* Return if there is no new fragment */
- if (rx_ev_desc_ptr != expected_ptr)
- return;
-
- /* Discard new fragment if not SOP */
- if (!rx_ev_sop) {
- efx_rx_packet(
- rx_queue,
- rx_queue->removed_count & rx_queue->ptr_mask,
- 1, 0, EFX_RX_PKT_DISCARD);
- ++rx_queue->removed_count;
- return;
- }
- }
-
- ++rx_queue->scatter_n;
- if (rx_ev_cont)
- return;
-
- rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
- rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
- rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
-
- if (likely(rx_ev_pkt_ok)) {
- /* If packet is marked as OK then we can rely on the
- * hardware checksum and classification.
- */
- flags = 0;
- switch (rx_ev_hdr_type) {
- case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP:
- flags |= EFX_RX_PKT_TCP;
- /* fall through */
- case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP:
- flags |= EFX_RX_PKT_CSUMMED;
- /* fall through */
- case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER:
- case FSE_AZ_RX_EV_HDR_TYPE_OTHER:
- break;
- }
- } else {
- flags = efx_handle_rx_not_ok(rx_queue, event);
- }
-
- /* Detect multicast packets that didn't match the filter */
- rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
- if (rx_ev_mcast_pkt) {
- unsigned int rx_ev_mcast_hash_match =
- EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH);
-
- if (unlikely(!rx_ev_mcast_hash_match)) {
- ++channel->n_rx_mcast_mismatch;
- flags |= EFX_RX_PKT_DISCARD;
- }
- }
-
- channel->irq_mod_score += 2;
-
- /* Handle received packet */
- efx_rx_packet(rx_queue,
- rx_queue->removed_count & rx_queue->ptr_mask,
- rx_queue->scatter_n, rx_ev_byte_cnt, flags);
- rx_queue->removed_count += rx_queue->scatter_n;
- rx_queue->scatter_n = 0;
-}
-
-/* If this flush done event corresponds to a &struct efx_tx_queue, then
- * send an %EFX_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue
- * of all transmit completions.
- */
-static void
-efx_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
-{
- struct efx_tx_queue *tx_queue;
- int qid;
-
- qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
- if (qid < EFX_TXQ_TYPES * efx->n_tx_channels) {
- tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES,
- qid % EFX_TXQ_TYPES);
- if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) {
- efx_magic_event(tx_queue->channel,
- EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue));
- }
- }
-}
-
-/* If this flush done event corresponds to a &struct efx_rx_queue: If the flush
- * was succesful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add
- * the RX queue back to the mask of RX queues in need of flushing.
- */
-static void
-efx_handle_rx_flush_done(struct efx_nic *efx, efx_qword_t *event)
-{
- struct efx_channel *channel;
- struct efx_rx_queue *rx_queue;
- int qid;
- bool failed;
-
- qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
- failed = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
- if (qid >= efx->n_channels)
- return;
- channel = efx_get_channel(efx, qid);
- if (!efx_channel_has_rx_queue(channel))
- return;
- rx_queue = efx_channel_get_rx_queue(channel);
-
- if (failed) {
- netif_info(efx, hw, efx->net_dev,
- "RXQ %d flush retry\n", qid);
- rx_queue->flush_pending = true;
- atomic_inc(&efx->rxq_flush_pending);
- } else {
- efx_magic_event(efx_rx_queue_channel(rx_queue),
- EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue));
- }
- atomic_dec(&efx->rxq_flush_outstanding);
- if (efx_flush_wake(efx))
- wake_up(&efx->flush_wq);
-}
-
-static void
-efx_handle_drain_event(struct efx_channel *channel)
-{
- struct efx_nic *efx = channel->efx;
-
- WARN_ON(atomic_read(&efx->drain_pending) == 0);
- atomic_dec(&efx->drain_pending);
- if (efx_flush_wake(efx))
- wake_up(&efx->flush_wq);
-}
-
-static void
-efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event)
-{
- struct efx_nic *efx = channel->efx;
- struct efx_rx_queue *rx_queue =
- efx_channel_has_rx_queue(channel) ?
- efx_channel_get_rx_queue(channel) : NULL;
- unsigned magic, code;
-
- magic = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
- code = _EFX_CHANNEL_MAGIC_CODE(magic);
-
- if (magic == EFX_CHANNEL_MAGIC_TEST(channel)) {
- channel->event_test_cpu = raw_smp_processor_id();
- } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_FILL(rx_queue)) {
- /* The queue must be empty, so we won't receive any rx
- * events, so efx_process_channel() won't refill the
- * queue. Refill it here */
- efx_fast_push_rx_descriptors(rx_queue);
- } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) {
- rx_queue->enabled = false;
- efx_handle_drain_event(channel);
- } else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) {
- efx_handle_drain_event(channel);
- } else {
- netif_dbg(efx, hw, efx->net_dev, "channel %d received "
- "generated event "EFX_QWORD_FMT"\n",
- channel->channel, EFX_QWORD_VAL(*event));
- }
-}
-
-static void
-efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
-{
- struct efx_nic *efx = channel->efx;
- unsigned int ev_sub_code;
- unsigned int ev_sub_data;
-
- ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE);
- ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
-
- switch (ev_sub_code) {
- case FSE_AZ_TX_DESCQ_FLS_DONE_EV:
- netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
- channel->channel, ev_sub_data);
- efx_handle_tx_flush_done(efx, event);
- efx_sriov_tx_flush_done(efx, event);
- break;
- case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
- netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
- channel->channel, ev_sub_data);
- efx_handle_rx_flush_done(efx, event);
- efx_sriov_rx_flush_done(efx, event);
- break;
- case FSE_AZ_EVQ_INIT_DONE_EV:
- netif_dbg(efx, hw, efx->net_dev,
- "channel %d EVQ %d initialised\n",
- channel->channel, ev_sub_data);
- break;
- case FSE_AZ_SRM_UPD_DONE_EV:
- netif_vdbg(efx, hw, efx->net_dev,
- "channel %d SRAM update done\n", channel->channel);
- break;
- case FSE_AZ_WAKE_UP_EV:
- netif_vdbg(efx, hw, efx->net_dev,
- "channel %d RXQ %d wakeup event\n",
- channel->channel, ev_sub_data);
- break;
- case FSE_AZ_TIMER_EV:
- netif_vdbg(efx, hw, efx->net_dev,
- "channel %d RX queue %d timer expired\n",
- channel->channel, ev_sub_data);
- break;
- case FSE_AA_RX_RECOVER_EV:
- netif_err(efx, rx_err, efx->net_dev,
- "channel %d seen DRIVER RX_RESET event. "
- "Resetting.\n", channel->channel);
- atomic_inc(&efx->rx_reset);
- efx_schedule_reset(efx,
- EFX_WORKAROUND_6555(efx) ?
- RESET_TYPE_RX_RECOVERY :
- RESET_TYPE_DISABLE);
- break;
- case FSE_BZ_RX_DSC_ERROR_EV:
- if (ev_sub_data < EFX_VI_BASE) {
- netif_err(efx, rx_err, efx->net_dev,
- "RX DMA Q %d reports descriptor fetch error."
- " RX Q %d is disabled.\n", ev_sub_data,
- ev_sub_data);
- efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH);
- } else
- efx_sriov_desc_fetch_err(efx, ev_sub_data);
- break;
- case FSE_BZ_TX_DSC_ERROR_EV:
- if (ev_sub_data < EFX_VI_BASE) {
- netif_err(efx, tx_err, efx->net_dev,
- "TX DMA Q %d reports descriptor fetch error."
- " TX Q %d is disabled.\n", ev_sub_data,
- ev_sub_data);
- efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
- } else
- efx_sriov_desc_fetch_err(efx, ev_sub_data);
- break;
- default:
- netif_vdbg(efx, hw, efx->net_dev,
- "channel %d unknown driver event code %d "
- "data %04x\n", channel->channel, ev_sub_code,
- ev_sub_data);
- break;
- }
-}
-
-int efx_nic_process_eventq(struct efx_channel *channel, int budget)
-{
- struct efx_nic *efx = channel->efx;
- unsigned int read_ptr;
- efx_qword_t event, *p_event;
- int ev_code;
- int tx_packets = 0;
- int spent = 0;
-
- read_ptr = channel->eventq_read_ptr;
-
- for (;;) {
- p_event = efx_event(channel, read_ptr);
- event = *p_event;
-
- if (!efx_event_present(&event))
- /* End of events */
- break;
-
- netif_vdbg(channel->efx, intr, channel->efx->net_dev,
- "channel %d event is "EFX_QWORD_FMT"\n",
- channel->channel, EFX_QWORD_VAL(event));
-
- /* Clear this event by marking it all ones */
- EFX_SET_QWORD(*p_event);
-
- ++read_ptr;
-
- ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
-
- switch (ev_code) {
- case FSE_AZ_EV_CODE_RX_EV:
- efx_handle_rx_event(channel, &event);
- if (++spent == budget)
- goto out;
- break;
- case FSE_AZ_EV_CODE_TX_EV:
- tx_packets += efx_handle_tx_event(channel, &event);
- if (tx_packets > efx->txq_entries) {
- spent = budget;
- goto out;
- }
- break;
- case FSE_AZ_EV_CODE_DRV_GEN_EV:
- efx_handle_generated_event(channel, &event);
- break;
- case FSE_AZ_EV_CODE_DRIVER_EV:
- efx_handle_driver_event(channel, &event);
- break;
- case FSE_CZ_EV_CODE_USER_EV:
- efx_sriov_event(channel, &event);
- break;
- case FSE_CZ_EV_CODE_MCDI_EV:
- efx_mcdi_process_event(channel, &event);
- break;
- case FSE_AZ_EV_CODE_GLOBAL_EV:
- if (efx->type->handle_global_event &&
- efx->type->handle_global_event(channel, &event))
- break;
- /* else fall through */
- default:
- netif_err(channel->efx, hw, channel->efx->net_dev,
- "channel %d unknown event type %d (data "
- EFX_QWORD_FMT ")\n", channel->channel,
- ev_code, EFX_QWORD_VAL(event));
- }
- }
-
-out:
- channel->eventq_read_ptr = read_ptr;
- return spent;
-}
-
/* Check whether an event is present in the eventq at the current
* read pointer. Only useful for self-test.
*/
@@ -1382,323 +58,18 @@ bool efx_nic_event_present(struct efx_channel *channel)
return efx_event_present(efx_event(channel, channel->eventq_read_ptr));
}
-/* Allocate buffer table entries for event queue */
-int efx_nic_probe_eventq(struct efx_channel *channel)
-{
- struct efx_nic *efx = channel->efx;
- unsigned entries;
-
- entries = channel->eventq_mask + 1;
- return efx_alloc_special_buffer(efx, &channel->eventq,
- entries * sizeof(efx_qword_t));
-}
-
-void efx_nic_init_eventq(struct efx_channel *channel)
-{
- efx_oword_t reg;
- struct efx_nic *efx = channel->efx;
-
- netif_dbg(efx, hw, efx->net_dev,
- "channel %d event queue in special buffers %d-%d\n",
- channel->channel, channel->eventq.index,
- channel->eventq.index + channel->eventq.entries - 1);
-
- if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
- EFX_POPULATE_OWORD_3(reg,
- FRF_CZ_TIMER_Q_EN, 1,
- FRF_CZ_HOST_NOTIFY_MODE, 0,
- FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
- efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
- }
-
- /* Pin event queue buffer */
- efx_init_special_buffer(efx, &channel->eventq);
-
- /* Fill event queue with all ones (i.e. empty events) */
- memset(channel->eventq.addr, 0xff, channel->eventq.len);
-
- /* Push event queue to card */
- EFX_POPULATE_OWORD_3(reg,
- FRF_AZ_EVQ_EN, 1,
- FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries),
- FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index);
- efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
- channel->channel);
-
- efx->type->push_irq_moderation(channel);
-}
-
-void efx_nic_fini_eventq(struct efx_channel *channel)
-{
- efx_oword_t reg;
- struct efx_nic *efx = channel->efx;
-
- /* Remove event queue from card */
- EFX_ZERO_OWORD(reg);
- efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
- channel->channel);
- if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
- efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
-
- /* Unpin event queue */
- efx_fini_special_buffer(efx, &channel->eventq);
-}
-
-/* Free buffers backing event queue */
-void efx_nic_remove_eventq(struct efx_channel *channel)
-{
- efx_free_special_buffer(channel->efx, &channel->eventq);
-}
-
-
void efx_nic_event_test_start(struct efx_channel *channel)
{
channel->event_test_cpu = -1;
smp_wmb();
- efx_magic_event(channel, EFX_CHANNEL_MAGIC_TEST(channel));
+ channel->efx->type->ev_test_generate(channel);
}
-void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue)
-{
- efx_magic_event(efx_rx_queue_channel(rx_queue),
- EFX_CHANNEL_MAGIC_FILL(rx_queue));
-}
-
-/**************************************************************************
- *
- * Hardware interrupts
- * The hardware interrupt handler does very little work; all the event
- * queue processing is carried out by per-channel tasklets.
- *
- **************************************************************************/
-
-/* Enable/disable/generate interrupts */
-static inline void efx_nic_interrupts(struct efx_nic *efx,
- bool enabled, bool force)
-{
- efx_oword_t int_en_reg_ker;
-
- EFX_POPULATE_OWORD_3(int_en_reg_ker,
- FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level,
- FRF_AZ_KER_INT_KER, force,
- FRF_AZ_DRV_INT_EN_KER, enabled);
- efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
-}
-
-void efx_nic_enable_interrupts(struct efx_nic *efx)
-{
- EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr));
- wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
-
- efx_nic_interrupts(efx, true, false);
-}
-
-void efx_nic_disable_interrupts(struct efx_nic *efx)
-{
- /* Disable interrupts */
- efx_nic_interrupts(efx, false, false);
-}
-
-/* Generate a test interrupt
- * Interrupt must already have been enabled, otherwise nasty things
- * may happen.
- */
void efx_nic_irq_test_start(struct efx_nic *efx)
{
efx->last_irq_cpu = -1;
smp_wmb();
- efx_nic_interrupts(efx, true, true);
-}
-
-/* Process a fatal interrupt
- * Disable bus mastering ASAP and schedule a reset
- */
-irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx)
-{
- struct falcon_nic_data *nic_data = efx->nic_data;
- efx_oword_t *int_ker = efx->irq_status.addr;
- efx_oword_t fatal_intr;
- int error, mem_perr;
-
- efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER);
- error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR);
-
- netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status "
- EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
- EFX_OWORD_VAL(fatal_intr),
- error ? "disabling bus mastering" : "no recognised error");
-
- /* If this is a memory parity error dump which blocks are offending */
- mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) ||
- EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER));
- if (mem_perr) {
- efx_oword_t reg;
- efx_reado(efx, &reg, FR_AZ_MEM_STAT);
- netif_err(efx, hw, efx->net_dev,
- "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n",
- EFX_OWORD_VAL(reg));
- }
-
- /* Disable both devices */
- pci_clear_master(efx->pci_dev);
- if (efx_nic_is_dual_func(efx))
- pci_clear_master(nic_data->pci_dev2);
- efx_nic_disable_interrupts(efx);
-
- /* Count errors and reset or disable the NIC accordingly */
- if (efx->int_error_count == 0 ||
- time_after(jiffies, efx->int_error_expire)) {
- efx->int_error_count = 0;
- efx->int_error_expire =
- jiffies + EFX_INT_ERROR_EXPIRE * HZ;
- }
- if (++efx->int_error_count < EFX_MAX_INT_ERRORS) {
- netif_err(efx, hw, efx->net_dev,
- "SYSTEM ERROR - reset scheduled\n");
- efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
- } else {
- netif_err(efx, hw, efx->net_dev,
- "SYSTEM ERROR - max number of errors seen."
- "NIC will be disabled\n");
- efx_schedule_reset(efx, RESET_TYPE_DISABLE);
- }
-
- return IRQ_HANDLED;
-}
-
-/* Handle a legacy interrupt
- * Acknowledges the interrupt and schedule event queue processing.
- */
-static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
-{
- struct efx_nic *efx = dev_id;
- efx_oword_t *int_ker = efx->irq_status.addr;
- irqreturn_t result = IRQ_NONE;
- struct efx_channel *channel;
- efx_dword_t reg;
- u32 queues;
- int syserr;
-
- /* Could this be ours? If interrupts are disabled then the
- * channel state may not be valid.
- */
- if (!efx->legacy_irq_enabled)
- return result;
-
- /* Read the ISR which also ACKs the interrupts */
- efx_readd(efx, &reg, FR_BZ_INT_ISR0);
- queues = EFX_EXTRACT_DWORD(reg, 0, 31);
-
- /* Legacy interrupts are disabled too late by the EEH kernel
- * code. Disable them earlier.
- * If an EEH error occurred, the read will have returned all ones.
- */
- if (EFX_DWORD_IS_ALL_ONES(reg) && efx_try_recovery(efx) &&
- !efx->eeh_disabled_legacy_irq) {
- disable_irq_nosync(efx->legacy_irq);
- efx->eeh_disabled_legacy_irq = true;
- }
-
- /* Handle non-event-queue sources */
- if (queues & (1U << efx->irq_level)) {
- syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
- if (unlikely(syserr))
- return efx_nic_fatal_interrupt(efx);
- efx->last_irq_cpu = raw_smp_processor_id();
- }
-
- if (queues != 0) {
- if (EFX_WORKAROUND_15783(efx))
- efx->irq_zero_count = 0;
-
- /* Schedule processing of any interrupting queues */
- efx_for_each_channel(channel, efx) {
- if (queues & 1)
- efx_schedule_channel_irq(channel);
- queues >>= 1;
- }
- result = IRQ_HANDLED;
-
- } else if (EFX_WORKAROUND_15783(efx)) {
- efx_qword_t *event;
-
- /* We can't return IRQ_HANDLED more than once on seeing ISR=0
- * because this might be a shared interrupt. */
- if (efx->irq_zero_count++ == 0)
- result = IRQ_HANDLED;
-
- /* Ensure we schedule or rearm all event queues */
- efx_for_each_channel(channel, efx) {
- event = efx_event(channel, channel->eventq_read_ptr);
- if (efx_event_present(event))
- efx_schedule_channel_irq(channel);
- else
- efx_nic_eventq_read_ack(channel);
- }
- }
-
- if (result == IRQ_HANDLED)
- netif_vdbg(efx, intr, efx->net_dev,
- "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
- irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
-
- return result;
-}
-
-/* Handle an MSI interrupt
- *
- * Handle an MSI hardware interrupt. This routine schedules event
- * queue processing. No interrupt acknowledgement cycle is necessary.
- * Also, we never need to check that the interrupt is for us, since
- * MSI interrupts cannot be shared.
- */
-static irqreturn_t efx_msi_interrupt(int irq, void *dev_id)
-{
- struct efx_channel *channel = *(struct efx_channel **)dev_id;
- struct efx_nic *efx = channel->efx;
- efx_oword_t *int_ker = efx->irq_status.addr;
- int syserr;
-
- netif_vdbg(efx, intr, efx->net_dev,
- "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
- irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
-
- /* Handle non-event-queue sources */
- if (channel->channel == efx->irq_level) {
- syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
- if (unlikely(syserr))
- return efx_nic_fatal_interrupt(efx);
- efx->last_irq_cpu = raw_smp_processor_id();
- }
-
- /* Schedule processing of the channel */
- efx_schedule_channel_irq(channel);
-
- return IRQ_HANDLED;
-}
-
-
-/* Setup RSS indirection table.
- * This maps from the hash value of the packet to RXQ
- */
-void efx_nic_push_rx_indir_table(struct efx_nic *efx)
-{
- size_t i = 0;
- efx_dword_t dword;
-
- if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
- return;
-
- BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
- FR_BZ_RX_INDIRECTION_TBL_ROWS);
-
- for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) {
- EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
- efx->rx_indir_table[i]);
- efx_writed(efx, &dword,
- FR_BZ_RX_INDIRECTION_TBL +
- FR_BZ_RX_INDIRECTION_TBL_STEP * i);
- }
+ efx->type->irq_test_generate(efx);
}
/* Hook interrupt handler(s)
@@ -1711,13 +82,8 @@ int efx_nic_init_interrupt(struct efx_nic *efx)
int rc;
if (!EFX_INT_MODE_USE_MSI(efx)) {
- irq_handler_t handler;
- if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
- handler = efx_legacy_interrupt;
- else
- handler = falcon_legacy_interrupt_a1;
-
- rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED,
+ rc = request_irq(efx->legacy_irq,
+ efx->type->irq_handle_legacy, IRQF_SHARED,
efx->name, efx);
if (rc) {
netif_err(efx, drv, efx->net_dev,
@@ -1742,10 +108,10 @@ int efx_nic_init_interrupt(struct efx_nic *efx)
/* Hook MSI or MSI-X interrupt */
n_irqs = 0;
efx_for_each_channel(channel, efx) {
- rc = request_irq(channel->irq, efx_msi_interrupt,
+ rc = request_irq(channel->irq, efx->type->irq_handle_msi,
IRQF_PROBE_SHARED, /* Not shared */
- efx->channel_name[channel->channel],
- &efx->channel[channel->channel]);
+ efx->msi_context[channel->channel].name,
+ &efx->msi_context[channel->channel]);
if (rc) {
netif_err(efx, drv, efx->net_dev,
"failed to hook IRQ %d\n", channel->irq);
@@ -1774,7 +140,7 @@ int efx_nic_init_interrupt(struct efx_nic *efx)
efx_for_each_channel(channel, efx) {
if (n_irqs-- == 0)
break;
- free_irq(channel->irq, &efx->channel[channel->channel]);
+ free_irq(channel->irq, &efx->msi_context[channel->channel]);
}
fail1:
return rc;
@@ -1783,7 +149,6 @@ int efx_nic_init_interrupt(struct efx_nic *efx)
void efx_nic_fini_interrupt(struct efx_nic *efx)
{
struct efx_channel *channel;
- efx_oword_t reg;
#ifdef CONFIG_RFS_ACCEL
free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
@@ -1792,167 +157,13 @@ void efx_nic_fini_interrupt(struct efx_nic *efx)
/* Disable MSI/MSI-X interrupts */
efx_for_each_channel(channel, efx)
- free_irq(channel->irq, &efx->channel[channel->channel]);
-
- /* ACK legacy interrupt */
- if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
- efx_reado(efx, &reg, FR_BZ_INT_ISR0);
- else
- falcon_irq_ack_a1(efx);
+ free_irq(channel->irq, &efx->msi_context[channel->channel]);
/* Disable legacy interrupt */
if (efx->legacy_irq)
free_irq(efx->legacy_irq, efx);
}
-/* Looks at available SRAM resources and works out how many queues we
- * can support, and where things like descriptor caches should live.
- *
- * SRAM is split up as follows:
- * 0 buftbl entries for channels
- * efx->vf_buftbl_base buftbl entries for SR-IOV
- * efx->rx_dc_base RX descriptor caches
- * efx->tx_dc_base TX descriptor caches
- */
-void efx_nic_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw)
-{
- unsigned vi_count, buftbl_min;
-
- /* Account for the buffer table entries backing the datapath channels
- * and the descriptor caches for those channels.
- */
- buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE +
- efx->n_tx_channels * EFX_TXQ_TYPES * EFX_MAX_DMAQ_SIZE +
- efx->n_channels * EFX_MAX_EVQ_SIZE)
- * sizeof(efx_qword_t) / EFX_BUF_SIZE);
- vi_count = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
-
-#ifdef CONFIG_SFC_SRIOV
- if (efx_sriov_wanted(efx)) {
- unsigned vi_dc_entries, buftbl_free, entries_per_vf, vf_limit;
-
- efx->vf_buftbl_base = buftbl_min;
-
- vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES;
- vi_count = max(vi_count, EFX_VI_BASE);
- buftbl_free = (sram_lim_qw - buftbl_min -
- vi_count * vi_dc_entries);
-
- entries_per_vf = ((vi_dc_entries + EFX_VF_BUFTBL_PER_VI) *
- efx_vf_size(efx));
- vf_limit = min(buftbl_free / entries_per_vf,
- (1024U - EFX_VI_BASE) >> efx->vi_scale);
-
- if (efx->vf_count > vf_limit) {
- netif_err(efx, probe, efx->net_dev,
- "Reducing VF count from from %d to %d\n",
- efx->vf_count, vf_limit);
- efx->vf_count = vf_limit;
- }
- vi_count += efx->vf_count * efx_vf_size(efx);
- }
-#endif
-
- efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES;
- efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES;
-}
-
-u32 efx_nic_fpga_ver(struct efx_nic *efx)
-{
- efx_oword_t altera_build;
- efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD);
- return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER);
-}
-
-void efx_nic_init_common(struct efx_nic *efx)
-{
- efx_oword_t temp;
-
- /* Set positions of descriptor caches in SRAM. */
- EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base);
- efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG);
- EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base);
- efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG);
-
- /* Set TX descriptor cache size. */
- BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER));
- EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
- efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG);
-
- /* Set RX descriptor cache size. Set low watermark to size-8, as
- * this allows most efficient prefetching.
- */
- BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER));
- EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
- efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG);
- EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
- efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM);
-
- /* Program INT_KER address */
- EFX_POPULATE_OWORD_2(temp,
- FRF_AZ_NORM_INT_VEC_DIS_KER,
- EFX_INT_MODE_USE_MSI(efx),
- FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
- efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER);
-
- if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx))
- /* Use an interrupt level unused by event queues */
- efx->irq_level = 0x1f;
- else
- /* Use a valid MSI-X vector */
- efx->irq_level = 0;
-
- /* Enable all the genuinely fatal interrupts. (They are still
- * masked by the overall interrupt mask, controlled by
- * falcon_interrupts()).
- *
- * Note: All other fatal interrupts are enabled
- */
- EFX_POPULATE_OWORD_3(temp,
- FRF_AZ_ILL_ADR_INT_KER_EN, 1,
- FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
- FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
- if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
- EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1);
- EFX_INVERT_OWORD(temp);
- efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
-
- efx_nic_push_rx_indir_table(efx);
-
- /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
- * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
- */
- efx_reado(efx, &temp, FR_AZ_TX_RESERVED);
- EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
- EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
- EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
- EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1);
- EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1);
- /* Enable SW_EV to inherit in char driver - assume harmless here */
- EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
- /* Prefetch threshold 2 => fetch when descriptor cache half empty */
- EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
- /* Disable hardware watchdog which can misfire */
- EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff);
- /* Squash TX of packets of 16 bytes or less */
- if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
- EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
- efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
-
- if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
- EFX_POPULATE_OWORD_4(temp,
- /* Default values */
- FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
- FRF_BZ_TX_PACE_SB_AF, 0xb,
- FRF_BZ_TX_PACE_FB_BASE, 0,
- /* Allow large pace values in the
- * fast bin. */
- FRF_BZ_TX_PACE_BIN_TH,
- FFE_BZ_TX_PACE_RESERVED);
- efx_writeo(efx, &temp, FR_BZ_TX_PACE);
- }
-}
-
/* Register dump */
#define REGISTER_REVISION_A 1
@@ -2217,3 +428,86 @@ void efx_nic_get_regs(struct efx_nic *efx, void *buf)
}
}
}
+
+/**
+ * efx_nic_describe_stats - Describe supported statistics for ethtool
+ * @desc: Array of &struct efx_hw_stat_desc describing the statistics
+ * @count: Length of the @desc array
+ * @mask: Bitmask of which elements of @desc are enabled
+ * @names: Buffer to copy names to, or %NULL. The names are copied
+ * starting at intervals of %ETH_GSTRING_LEN bytes.
+ *
+ * Returns the number of visible statistics, i.e. the number of set
+ * bits in the first @count bits of @mask for which a name is defined.
+ */
+size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
+ const unsigned long *mask, u8 *names)
+{
+ size_t visible = 0;
+ size_t index;
+
+ for_each_set_bit(index, mask, count) {
+ if (desc[index].name) {
+ if (names) {
+ strlcpy(names, desc[index].name,
+ ETH_GSTRING_LEN);
+ names += ETH_GSTRING_LEN;
+ }
+ ++visible;
+ }
+ }
+
+ return visible;
+}
+
+/**
+ * efx_nic_update_stats - Convert statistics DMA buffer to array of u64
+ * @desc: Array of &struct efx_hw_stat_desc describing the DMA buffer
+ * layout. DMA widths of 0, 16, 32 and 64 are supported; where
+ * the width is specified as 0 the corresponding element of
+ * @stats is not updated.
+ * @count: Length of the @desc array
+ * @mask: Bitmask of which elements of @desc are enabled
+ * @stats: Buffer to update with the converted statistics. The length
+ * of this array must be at least the number of set bits in the
+ * first @count bits of @mask.
+ * @dma_buf: DMA buffer containing hardware statistics
+ * @accumulate: If set, the converted values will be added rather than
+ * directly stored to the corresponding elements of @stats
+ */
+void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
+ const unsigned long *mask,
+ u64 *stats, const void *dma_buf, bool accumulate)
+{
+ size_t index;
+
+ for_each_set_bit(index, mask, count) {
+ if (desc[index].dma_width) {
+ const void *addr = dma_buf + desc[index].offset;
+ u64 val;
+
+ switch (desc[index].dma_width) {
+ case 16:
+ val = le16_to_cpup((__le16 *)addr);
+ break;
+ case 32:
+ val = le32_to_cpup((__le32 *)addr);
+ break;
+ case 64:
+ val = le64_to_cpup((__le64 *)addr);
+ break;
+ default:
+ WARN_ON(1);
+ val = 0;
+ break;
+ }
+
+ if (accumulate)
+ *stats += val;
+ else
+ *stats = val;
+ }
+
+ ++stats;
+ }
+}
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index d63c2991a751..fda29d39032f 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -1,7 +1,7 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2011 Solarflare Communications Inc.
+ * Copyright 2006-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -16,17 +16,13 @@
#include "net_driver.h"
#include "efx.h"
#include "mcdi.h"
-#include "spi.h"
-
-/*
- * Falcon hardware control
- */
enum {
EFX_REV_FALCON_A0 = 0,
EFX_REV_FALCON_A1 = 1,
EFX_REV_FALCON_B0 = 2,
EFX_REV_SIENA_A0 = 3,
+ EFX_REV_HUNT_A0 = 4,
};
static inline int efx_nic_rev(struct efx_nic *efx)
@@ -34,7 +30,7 @@ static inline int efx_nic_rev(struct efx_nic *efx)
return efx->type->revision;
}
-extern u32 efx_nic_fpga_ver(struct efx_nic *efx);
+extern u32 efx_farch_fpga_ver(struct efx_nic *efx);
/* NIC has two interlinked PCI functions for the same port. */
static inline bool efx_nic_is_dual_func(struct efx_nic *efx)
@@ -42,6 +38,65 @@ static inline bool efx_nic_is_dual_func(struct efx_nic *efx)
return efx_nic_rev(efx) < EFX_REV_FALCON_B0;
}
+/* Read the current event from the event queue */
+static inline efx_qword_t *efx_event(struct efx_channel *channel,
+ unsigned int index)
+{
+ return ((efx_qword_t *) (channel->eventq.buf.addr)) +
+ (index & channel->eventq_mask);
+}
+
+/* See if an event is present
+ *
+ * We check both the high and low dword of the event for all ones. We
+ * wrote all ones when we cleared the event, and no valid event can
+ * have all ones in either its high or low dwords. This approach is
+ * robust against reordering.
+ *
+ * Note that using a single 64-bit comparison is incorrect; even
+ * though the CPU read will be atomic, the DMA write may not be.
+ */
+static inline int efx_event_present(efx_qword_t *event)
+{
+ return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
+ EFX_DWORD_IS_ALL_ONES(event->dword[1]));
+}
+
+/* Returns a pointer to the specified transmit descriptor in the TX
+ * descriptor queue belonging to the specified channel.
+ */
+static inline efx_qword_t *
+efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
+{
+ return ((efx_qword_t *) (tx_queue->txd.buf.addr)) + index;
+}
+
+/* Decide whether to push a TX descriptor to the NIC vs merely writing
+ * the doorbell. This can reduce latency when we are adding a single
+ * descriptor to an empty queue, but is otherwise pointless. Further,
+ * Falcon and Siena have hardware bugs (SF bug 33851) that may be
+ * triggered if we don't check this.
+ */
+static inline bool efx_nic_may_push_tx_desc(struct efx_tx_queue *tx_queue,
+ unsigned int write_count)
+{
+ unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
+
+ if (empty_read_count == 0)
+ return false;
+
+ tx_queue->empty_read_count = 0;
+ return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0
+ && tx_queue->write_count - write_count == 1;
+}
+
+/* Returns a pointer to the specified descriptor in the RX descriptor queue */
+static inline efx_qword_t *
+efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
+{
+ return ((efx_qword_t *) (rx_queue->rxd.buf.addr)) + index;
+}
+
enum {
PHY_TYPE_NONE = 0,
PHY_TYPE_TXC43128 = 1,
@@ -59,9 +114,6 @@ enum {
(1 << LOOPBACK_XGXS) | \
(1 << LOOPBACK_XAUI))
-#define FALCON_GMAC_LOOPBACKS \
- (1 << LOOPBACK_GMAC)
-
/* Alignment of PCIe DMA boundaries (4KB) */
#define EFX_PAGE_SIZE 4096
/* Size and alignment of buffer table entries (same) */
@@ -105,13 +157,96 @@ struct falcon_board {
};
/**
+ * struct falcon_spi_device - a Falcon SPI (Serial Peripheral Interface) device
+ * @device_id: Controller's id for the device
+ * @size: Size (in bytes)
+ * @addr_len: Number of address bytes in read/write commands
+ * @munge_address: Flag whether addresses should be munged.
+ * Some devices with 9-bit addresses (e.g. AT25040A EEPROM)
+ * use bit 3 of the command byte as address bit A8, rather
+ * than having a two-byte address. If this flag is set, then
+ * commands should be munged in this way.
+ * @erase_command: Erase command (or 0 if sector erase not needed).
+ * @erase_size: Erase sector size (in bytes)
+ * Erase commands affect sectors with this size and alignment.
+ * This must be a power of two.
+ * @block_size: Write block size (in bytes).
+ * Write commands are limited to blocks with this size and alignment.
+ */
+struct falcon_spi_device {
+ int device_id;
+ unsigned int size;
+ unsigned int addr_len;
+ unsigned int munge_address:1;
+ u8 erase_command;
+ unsigned int erase_size;
+ unsigned int block_size;
+};
+
+static inline bool falcon_spi_present(const struct falcon_spi_device *spi)
+{
+ return spi->size != 0;
+}
+
+enum {
+ FALCON_STAT_tx_bytes,
+ FALCON_STAT_tx_packets,
+ FALCON_STAT_tx_pause,
+ FALCON_STAT_tx_control,
+ FALCON_STAT_tx_unicast,
+ FALCON_STAT_tx_multicast,
+ FALCON_STAT_tx_broadcast,
+ FALCON_STAT_tx_lt64,
+ FALCON_STAT_tx_64,
+ FALCON_STAT_tx_65_to_127,
+ FALCON_STAT_tx_128_to_255,
+ FALCON_STAT_tx_256_to_511,
+ FALCON_STAT_tx_512_to_1023,
+ FALCON_STAT_tx_1024_to_15xx,
+ FALCON_STAT_tx_15xx_to_jumbo,
+ FALCON_STAT_tx_gtjumbo,
+ FALCON_STAT_tx_non_tcpudp,
+ FALCON_STAT_tx_mac_src_error,
+ FALCON_STAT_tx_ip_src_error,
+ FALCON_STAT_rx_bytes,
+ FALCON_STAT_rx_good_bytes,
+ FALCON_STAT_rx_bad_bytes,
+ FALCON_STAT_rx_packets,
+ FALCON_STAT_rx_good,
+ FALCON_STAT_rx_bad,
+ FALCON_STAT_rx_pause,
+ FALCON_STAT_rx_control,
+ FALCON_STAT_rx_unicast,
+ FALCON_STAT_rx_multicast,
+ FALCON_STAT_rx_broadcast,
+ FALCON_STAT_rx_lt64,
+ FALCON_STAT_rx_64,
+ FALCON_STAT_rx_65_to_127,
+ FALCON_STAT_rx_128_to_255,
+ FALCON_STAT_rx_256_to_511,
+ FALCON_STAT_rx_512_to_1023,
+ FALCON_STAT_rx_1024_to_15xx,
+ FALCON_STAT_rx_15xx_to_jumbo,
+ FALCON_STAT_rx_gtjumbo,
+ FALCON_STAT_rx_bad_lt64,
+ FALCON_STAT_rx_bad_gtjumbo,
+ FALCON_STAT_rx_overflow,
+ FALCON_STAT_rx_symbol_error,
+ FALCON_STAT_rx_align_error,
+ FALCON_STAT_rx_length_error,
+ FALCON_STAT_rx_internal_error,
+ FALCON_STAT_rx_nodesc_drop_cnt,
+ FALCON_STAT_COUNT
+};
+
+/**
* struct falcon_nic_data - Falcon NIC state
* @pci_dev2: Secondary function of Falcon A
* @board: Board state and functions
+ * @stats: Hardware statistics
* @stats_disable_count: Nest count for disabling statistics fetches
* @stats_pending: Is there a pending DMA of MAC statistics.
* @stats_timer: A timer for regularly fetching MAC statistics.
- * @stats_dma_done: Pointer to the flag which indicates DMA completion.
* @spi_flash: SPI flash device
* @spi_eeprom: SPI EEPROM device
* @spi_lock: SPI bus lock
@@ -121,12 +256,12 @@ struct falcon_board {
struct falcon_nic_data {
struct pci_dev *pci_dev2;
struct falcon_board board;
+ u64 stats[FALCON_STAT_COUNT];
unsigned int stats_disable_count;
bool stats_pending;
struct timer_list stats_timer;
- u32 *stats_dma_done;
- struct efx_spi_device spi_flash;
- struct efx_spi_device spi_eeprom;
+ struct falcon_spi_device spi_flash;
+ struct falcon_spi_device spi_eeprom;
struct mutex spi_lock;
struct mutex mdio_lock;
bool xmac_poll_required;
@@ -138,29 +273,151 @@ static inline struct falcon_board *falcon_board(struct efx_nic *efx)
return &data->board;
}
+enum {
+ SIENA_STAT_tx_bytes,
+ SIENA_STAT_tx_good_bytes,
+ SIENA_STAT_tx_bad_bytes,
+ SIENA_STAT_tx_packets,
+ SIENA_STAT_tx_bad,
+ SIENA_STAT_tx_pause,
+ SIENA_STAT_tx_control,
+ SIENA_STAT_tx_unicast,
+ SIENA_STAT_tx_multicast,
+ SIENA_STAT_tx_broadcast,
+ SIENA_STAT_tx_lt64,
+ SIENA_STAT_tx_64,
+ SIENA_STAT_tx_65_to_127,
+ SIENA_STAT_tx_128_to_255,
+ SIENA_STAT_tx_256_to_511,
+ SIENA_STAT_tx_512_to_1023,
+ SIENA_STAT_tx_1024_to_15xx,
+ SIENA_STAT_tx_15xx_to_jumbo,
+ SIENA_STAT_tx_gtjumbo,
+ SIENA_STAT_tx_collision,
+ SIENA_STAT_tx_single_collision,
+ SIENA_STAT_tx_multiple_collision,
+ SIENA_STAT_tx_excessive_collision,
+ SIENA_STAT_tx_deferred,
+ SIENA_STAT_tx_late_collision,
+ SIENA_STAT_tx_excessive_deferred,
+ SIENA_STAT_tx_non_tcpudp,
+ SIENA_STAT_tx_mac_src_error,
+ SIENA_STAT_tx_ip_src_error,
+ SIENA_STAT_rx_bytes,
+ SIENA_STAT_rx_good_bytes,
+ SIENA_STAT_rx_bad_bytes,
+ SIENA_STAT_rx_packets,
+ SIENA_STAT_rx_good,
+ SIENA_STAT_rx_bad,
+ SIENA_STAT_rx_pause,
+ SIENA_STAT_rx_control,
+ SIENA_STAT_rx_unicast,
+ SIENA_STAT_rx_multicast,
+ SIENA_STAT_rx_broadcast,
+ SIENA_STAT_rx_lt64,
+ SIENA_STAT_rx_64,
+ SIENA_STAT_rx_65_to_127,
+ SIENA_STAT_rx_128_to_255,
+ SIENA_STAT_rx_256_to_511,
+ SIENA_STAT_rx_512_to_1023,
+ SIENA_STAT_rx_1024_to_15xx,
+ SIENA_STAT_rx_15xx_to_jumbo,
+ SIENA_STAT_rx_gtjumbo,
+ SIENA_STAT_rx_bad_gtjumbo,
+ SIENA_STAT_rx_overflow,
+ SIENA_STAT_rx_false_carrier,
+ SIENA_STAT_rx_symbol_error,
+ SIENA_STAT_rx_align_error,
+ SIENA_STAT_rx_length_error,
+ SIENA_STAT_rx_internal_error,
+ SIENA_STAT_rx_nodesc_drop_cnt,
+ SIENA_STAT_COUNT
+};
+
/**
* struct siena_nic_data - Siena NIC state
- * @mcdi: Management-Controller-to-Driver Interface
* @wol_filter_id: Wake-on-LAN packet filter id
- * @hwmon: Hardware monitor state
+ * @stats: Hardware statistics
*/
struct siena_nic_data {
- struct efx_mcdi_iface mcdi;
int wol_filter_id;
-#ifdef CONFIG_SFC_MCDI_MON
- struct efx_mcdi_mon hwmon;
-#endif
+ u64 stats[SIENA_STAT_COUNT];
};
-#ifdef CONFIG_SFC_MCDI_MON
-static inline struct efx_mcdi_mon *efx_mcdi_mon(struct efx_nic *efx)
-{
- struct siena_nic_data *nic_data;
- EFX_BUG_ON_PARANOID(efx_nic_rev(efx) < EFX_REV_SIENA_A0);
- nic_data = efx->nic_data;
- return &nic_data->hwmon;
-}
-#endif
+enum {
+ EF10_STAT_tx_bytes,
+ EF10_STAT_tx_packets,
+ EF10_STAT_tx_pause,
+ EF10_STAT_tx_control,
+ EF10_STAT_tx_unicast,
+ EF10_STAT_tx_multicast,
+ EF10_STAT_tx_broadcast,
+ EF10_STAT_tx_lt64,
+ EF10_STAT_tx_64,
+ EF10_STAT_tx_65_to_127,
+ EF10_STAT_tx_128_to_255,
+ EF10_STAT_tx_256_to_511,
+ EF10_STAT_tx_512_to_1023,
+ EF10_STAT_tx_1024_to_15xx,
+ EF10_STAT_tx_15xx_to_jumbo,
+ EF10_STAT_rx_bytes,
+ EF10_STAT_rx_bytes_minus_good_bytes,
+ EF10_STAT_rx_good_bytes,
+ EF10_STAT_rx_bad_bytes,
+ EF10_STAT_rx_packets,
+ EF10_STAT_rx_good,
+ EF10_STAT_rx_bad,
+ EF10_STAT_rx_pause,
+ EF10_STAT_rx_control,
+ EF10_STAT_rx_unicast,
+ EF10_STAT_rx_multicast,
+ EF10_STAT_rx_broadcast,
+ EF10_STAT_rx_lt64,
+ EF10_STAT_rx_64,
+ EF10_STAT_rx_65_to_127,
+ EF10_STAT_rx_128_to_255,
+ EF10_STAT_rx_256_to_511,
+ EF10_STAT_rx_512_to_1023,
+ EF10_STAT_rx_1024_to_15xx,
+ EF10_STAT_rx_15xx_to_jumbo,
+ EF10_STAT_rx_gtjumbo,
+ EF10_STAT_rx_bad_gtjumbo,
+ EF10_STAT_rx_overflow,
+ EF10_STAT_rx_align_error,
+ EF10_STAT_rx_length_error,
+ EF10_STAT_rx_nodesc_drops,
+ EF10_STAT_COUNT
+};
+
+/**
+ * struct efx_ef10_nic_data - EF10 architecture NIC state
+ * @mcdi_buf: DMA buffer for MCDI
+ * @warm_boot_count: Last seen MC warm boot count
+ * @vi_base: Absolute index of first VI in this function
+ * @n_allocated_vis: Number of VIs allocated to this function
+ * @must_realloc_vis: Flag: VIs have yet to be reallocated after MC reboot
+ * @must_restore_filters: Flag: filters have yet to be restored after MC reboot
+ * @rx_rss_context: Firmware handle for our RSS context
+ * @stats: Hardware statistics
+ * @workaround_35388: Flag: firmware supports workaround for bug 35388
+ * @must_check_datapath_caps: Flag: @datapath_caps needs to be revalidated
+ * after MC reboot
+ * @datapath_caps: Capabilities of datapath firmware (FLAGS1 field of
+ * %MC_CMD_GET_CAPABILITIES response)
+ */
+struct efx_ef10_nic_data {
+ struct efx_buffer mcdi_buf;
+ u16 warm_boot_count;
+ unsigned int vi_base;
+ unsigned int n_allocated_vis;
+ bool must_realloc_vis;
+ bool must_restore_filters;
+ u32 rx_rss_context;
+ u64 stats[EF10_STAT_COUNT];
+ bool workaround_35388;
+ bool must_check_datapath_caps;
+ u32 datapath_caps;
+};
/*
* On the SFC9000 family each port is associated with 1 PCI physical
@@ -263,6 +520,7 @@ extern void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
extern const struct efx_nic_type falcon_a1_nic_type;
extern const struct efx_nic_type falcon_b0_nic_type;
extern const struct efx_nic_type siena_a0_nic_type;
+extern const struct efx_nic_type efx_hunt_a0_nic_type;
/**************************************************************************
*
@@ -274,35 +532,123 @@ extern const struct efx_nic_type siena_a0_nic_type;
extern int falcon_probe_board(struct efx_nic *efx, u16 revision_info);
/* TX data path */
-extern int efx_nic_probe_tx(struct efx_tx_queue *tx_queue);
-extern void efx_nic_init_tx(struct efx_tx_queue *tx_queue);
-extern void efx_nic_fini_tx(struct efx_tx_queue *tx_queue);
-extern void efx_nic_remove_tx(struct efx_tx_queue *tx_queue);
-extern void efx_nic_push_buffers(struct efx_tx_queue *tx_queue);
+static inline int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
+{
+ return tx_queue->efx->type->tx_probe(tx_queue);
+}
+static inline void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
+{
+ tx_queue->efx->type->tx_init(tx_queue);
+}
+static inline void efx_nic_remove_tx(struct efx_tx_queue *tx_queue)
+{
+ tx_queue->efx->type->tx_remove(tx_queue);
+}
+static inline void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
+{
+ tx_queue->efx->type->tx_write(tx_queue);
+}
/* RX data path */
-extern int efx_nic_probe_rx(struct efx_rx_queue *rx_queue);
-extern void efx_nic_init_rx(struct efx_rx_queue *rx_queue);
-extern void efx_nic_fini_rx(struct efx_rx_queue *rx_queue);
-extern void efx_nic_remove_rx(struct efx_rx_queue *rx_queue);
-extern void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue);
-extern void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue);
+static inline int efx_nic_probe_rx(struct efx_rx_queue *rx_queue)
+{
+ return rx_queue->efx->type->rx_probe(rx_queue);
+}
+static inline void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
+{
+ rx_queue->efx->type->rx_init(rx_queue);
+}
+static inline void efx_nic_remove_rx(struct efx_rx_queue *rx_queue)
+{
+ rx_queue->efx->type->rx_remove(rx_queue);
+}
+static inline void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue)
+{
+ rx_queue->efx->type->rx_write(rx_queue);
+}
+static inline void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue)
+{
+ rx_queue->efx->type->rx_defer_refill(rx_queue);
+}
/* Event data path */
-extern int efx_nic_probe_eventq(struct efx_channel *channel);
-extern void efx_nic_init_eventq(struct efx_channel *channel);
-extern void efx_nic_fini_eventq(struct efx_channel *channel);
-extern void efx_nic_remove_eventq(struct efx_channel *channel);
-extern int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota);
-extern void efx_nic_eventq_read_ack(struct efx_channel *channel);
-extern bool efx_nic_event_present(struct efx_channel *channel);
+static inline int efx_nic_probe_eventq(struct efx_channel *channel)
+{
+ return channel->efx->type->ev_probe(channel);
+}
+static inline int efx_nic_init_eventq(struct efx_channel *channel)
+{
+ return channel->efx->type->ev_init(channel);
+}
+static inline void efx_nic_fini_eventq(struct efx_channel *channel)
+{
+ channel->efx->type->ev_fini(channel);
+}
+static inline void efx_nic_remove_eventq(struct efx_channel *channel)
+{
+ channel->efx->type->ev_remove(channel);
+}
+static inline int
+efx_nic_process_eventq(struct efx_channel *channel, int quota)
+{
+ return channel->efx->type->ev_process(channel, quota);
+}
+static inline void efx_nic_eventq_read_ack(struct efx_channel *channel)
+{
+ channel->efx->type->ev_read_ack(channel);
+}
+extern void efx_nic_event_test_start(struct efx_channel *channel);
-/* MAC/PHY */
-extern void falcon_drain_tx_fifo(struct efx_nic *efx);
-extern void falcon_reconfigure_mac_wrapper(struct efx_nic *efx);
-extern bool falcon_xmac_check_fault(struct efx_nic *efx);
-extern int falcon_reconfigure_xmac(struct efx_nic *efx);
-extern void falcon_update_stats_xmac(struct efx_nic *efx);
+/* Falcon/Siena queue operations */
+extern int efx_farch_tx_probe(struct efx_tx_queue *tx_queue);
+extern void efx_farch_tx_init(struct efx_tx_queue *tx_queue);
+extern void efx_farch_tx_fini(struct efx_tx_queue *tx_queue);
+extern void efx_farch_tx_remove(struct efx_tx_queue *tx_queue);
+extern void efx_farch_tx_write(struct efx_tx_queue *tx_queue);
+extern int efx_farch_rx_probe(struct efx_rx_queue *rx_queue);
+extern void efx_farch_rx_init(struct efx_rx_queue *rx_queue);
+extern void efx_farch_rx_fini(struct efx_rx_queue *rx_queue);
+extern void efx_farch_rx_remove(struct efx_rx_queue *rx_queue);
+extern void efx_farch_rx_write(struct efx_rx_queue *rx_queue);
+extern void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue);
+extern int efx_farch_ev_probe(struct efx_channel *channel);
+extern int efx_farch_ev_init(struct efx_channel *channel);
+extern void efx_farch_ev_fini(struct efx_channel *channel);
+extern void efx_farch_ev_remove(struct efx_channel *channel);
+extern int efx_farch_ev_process(struct efx_channel *channel, int quota);
+extern void efx_farch_ev_read_ack(struct efx_channel *channel);
+extern void efx_farch_ev_test_generate(struct efx_channel *channel);
+
+/* Falcon/Siena filter operations */
+extern int efx_farch_filter_table_probe(struct efx_nic *efx);
+extern void efx_farch_filter_table_restore(struct efx_nic *efx);
+extern void efx_farch_filter_table_remove(struct efx_nic *efx);
+extern void efx_farch_filter_update_rx_scatter(struct efx_nic *efx);
+extern s32 efx_farch_filter_insert(struct efx_nic *efx,
+ struct efx_filter_spec *spec, bool replace);
+extern int efx_farch_filter_remove_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id);
+extern int efx_farch_filter_get_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id, struct efx_filter_spec *);
+extern void efx_farch_filter_clear_rx(struct efx_nic *efx,
+ enum efx_filter_priority priority);
+extern u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
+ enum efx_filter_priority priority);
+extern u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx);
+extern s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 *buf, u32 size);
+#ifdef CONFIG_RFS_ACCEL
+extern s32 efx_farch_filter_rfs_insert(struct efx_nic *efx,
+ struct efx_filter_spec *spec);
+extern bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
+ unsigned int index);
+#endif
+extern void efx_farch_filter_sync_rx_mode(struct efx_nic *efx);
+
+extern bool efx_nic_event_present(struct efx_channel *channel);
/* Some statistics are computed as A - B where A and B each increase
* linearly with some hardware counter(s) and the counters are read
@@ -322,16 +668,18 @@ static inline void efx_update_diff_stat(u64 *stat, u64 diff)
*stat = diff;
}
-/* Interrupts and test events */
+/* Interrupts */
extern int efx_nic_init_interrupt(struct efx_nic *efx);
-extern void efx_nic_enable_interrupts(struct efx_nic *efx);
-extern void efx_nic_event_test_start(struct efx_channel *channel);
extern void efx_nic_irq_test_start(struct efx_nic *efx);
-extern void efx_nic_disable_interrupts(struct efx_nic *efx);
extern void efx_nic_fini_interrupt(struct efx_nic *efx);
-extern irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx);
-extern irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id);
-extern void falcon_irq_ack_a1(struct efx_nic *efx);
+
+/* Falcon/Siena interrupts */
+extern void efx_farch_irq_enable_master(struct efx_nic *efx);
+extern void efx_farch_irq_test_generate(struct efx_nic *efx);
+extern void efx_farch_irq_disable_master(struct efx_nic *efx);
+extern irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id);
+extern irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id);
+extern irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx);
static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel)
{
@@ -345,69 +693,47 @@ static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx)
/* Global Resources */
extern int efx_nic_flush_queues(struct efx_nic *efx);
extern void siena_prepare_flush(struct efx_nic *efx);
+extern int efx_farch_fini_dmaq(struct efx_nic *efx);
extern void siena_finish_flush(struct efx_nic *efx);
extern void falcon_start_nic_stats(struct efx_nic *efx);
extern void falcon_stop_nic_stats(struct efx_nic *efx);
-extern void falcon_setup_xaui(struct efx_nic *efx);
extern int falcon_reset_xaui(struct efx_nic *efx);
-extern void
-efx_nic_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw);
-extern void efx_nic_init_common(struct efx_nic *efx);
-extern void efx_nic_push_rx_indir_table(struct efx_nic *efx);
+extern void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw);
+extern void efx_farch_init_common(struct efx_nic *efx);
+extern void efx_ef10_handle_drain_event(struct efx_nic *efx);
+static inline void efx_nic_push_rx_indir_table(struct efx_nic *efx)
+{
+ efx->type->rx_push_indir_table(efx);
+}
+extern void efx_farch_rx_push_indir_table(struct efx_nic *efx);
int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
- unsigned int len);
+ unsigned int len, gfp_t gfp_flags);
void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer);
/* Tests */
-struct efx_nic_register_test {
+struct efx_farch_register_test {
unsigned address;
efx_oword_t mask;
};
-extern int efx_nic_test_registers(struct efx_nic *efx,
- const struct efx_nic_register_test *regs,
- size_t n_regs);
+extern int efx_farch_test_registers(struct efx_nic *efx,
+ const struct efx_farch_register_test *regs,
+ size_t n_regs);
extern size_t efx_nic_get_regs_len(struct efx_nic *efx);
extern void efx_nic_get_regs(struct efx_nic *efx, void *buf);
-/**************************************************************************
- *
- * Falcon MAC stats
- *
- **************************************************************************
- */
+extern size_t
+efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
+ const unsigned long *mask, u8 *names);
+extern void
+efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
+ const unsigned long *mask,
+ u64 *stats, const void *dma_buf, bool accumulate);
+
+#define EFX_MAX_FLUSH_TIME 5000
-#define FALCON_STAT_OFFSET(falcon_stat) EFX_VAL(falcon_stat, offset)
-#define FALCON_STAT_WIDTH(falcon_stat) EFX_VAL(falcon_stat, WIDTH)
-
-/* Retrieve statistic from statistics block */
-#define FALCON_STAT(efx, falcon_stat, efx_stat) do { \
- if (FALCON_STAT_WIDTH(falcon_stat) == 16) \
- (efx)->mac_stats.efx_stat += le16_to_cpu( \
- *((__force __le16 *) \
- (efx->stats_buffer.addr + \
- FALCON_STAT_OFFSET(falcon_stat)))); \
- else if (FALCON_STAT_WIDTH(falcon_stat) == 32) \
- (efx)->mac_stats.efx_stat += le32_to_cpu( \
- *((__force __le32 *) \
- (efx->stats_buffer.addr + \
- FALCON_STAT_OFFSET(falcon_stat)))); \
- else \
- (efx)->mac_stats.efx_stat += le64_to_cpu( \
- *((__force __le64 *) \
- (efx->stats_buffer.addr + \
- FALCON_STAT_OFFSET(falcon_stat)))); \
- } while (0)
-
-#define FALCON_MAC_STATS_SIZE 0x100
-
-#define MAC_DATA_LBN 0
-#define MAC_DATA_WIDTH 32
-
-extern void efx_generate_event(struct efx_nic *efx, unsigned int evq,
- efx_qword_t *event);
-
-extern void falcon_poll_xmac(struct efx_nic *efx);
+extern void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq,
+ efx_qword_t *event);
#endif /* EFX_NIC_H */
diff --git a/drivers/net/ethernet/sfc/phy.h b/drivers/net/ethernet/sfc/phy.h
index 11d148cd8441..45eeb7075156 100644
--- a/drivers/net/ethernet/sfc/phy.h
+++ b/drivers/net/ethernet/sfc/phy.h
@@ -1,5 +1,5 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2007-2010 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
@@ -47,21 +47,4 @@ extern const struct efx_phy_operations falcon_txc_phy_ops;
extern void falcon_txc_set_gpio_dir(struct efx_nic *efx, int pin, int dir);
extern void falcon_txc_set_gpio_val(struct efx_nic *efx, int pin, int val);
-/****************************************************************************
- * Siena managed PHYs
- */
-extern const struct efx_phy_operations efx_mcdi_phy_ops;
-
-extern int efx_mcdi_mdio_read(struct efx_nic *efx, unsigned int bus,
- unsigned int prtad, unsigned int devad,
- u16 addr, u16 *value_out, u32 *status_out);
-extern int efx_mcdi_mdio_write(struct efx_nic *efx, unsigned int bus,
- unsigned int prtad, unsigned int devad,
- u16 addr, u16 value, u32 *status_out);
-extern void efx_mcdi_phy_decode_link(struct efx_nic *efx,
- struct efx_link_state *link_state,
- u32 speed, u32 flags, u32 fcntl);
-extern int efx_mcdi_phy_reconfigure(struct efx_nic *efx);
-extern void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa);
-
#endif
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index b495394a6dfa..03acf57df045 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -1,6 +1,6 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2011 Solarflare Communications Inc.
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2011-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -46,7 +46,7 @@
#include "mcdi.h"
#include "mcdi_pcol.h"
#include "io.h"
-#include "regs.h"
+#include "farch_regs.h"
#include "nic.h"
/* Maximum number of events expected to make up a PTP event */
@@ -294,8 +294,7 @@ struct efx_ptp_data {
struct work_struct pps_work;
struct workqueue_struct *pps_workwq;
bool nic_ts_enabled;
- u8 txbuf[ALIGN(MC_CMD_PTP_IN_TRANSMIT_LEN(
- MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM), 4)];
+ MCDI_DECLARE_BUF(txbuf, MC_CMD_PTP_IN_TRANSMIT_LENMAX);
struct efx_ptp_timeset
timeset[MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM];
};
@@ -311,9 +310,10 @@ static int efx_phc_enable(struct ptp_clock_info *ptp,
/* Enable MCDI PTP support. */
static int efx_ptp_enable(struct efx_nic *efx)
{
- u8 inbuf[MC_CMD_PTP_IN_ENABLE_LEN];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_ENABLE_LEN);
MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ENABLE);
+ MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
MCDI_SET_DWORD(inbuf, PTP_IN_ENABLE_QUEUE,
efx->ptp_data->channel->channel);
MCDI_SET_DWORD(inbuf, PTP_IN_ENABLE_MODE, efx->ptp_data->mode);
@@ -329,9 +329,10 @@ static int efx_ptp_enable(struct efx_nic *efx)
*/
static int efx_ptp_disable(struct efx_nic *efx)
{
- u8 inbuf[MC_CMD_PTP_IN_DISABLE_LEN];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_DISABLE_LEN);
MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_DISABLE);
+ MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
NULL, 0, NULL);
}
@@ -389,14 +390,14 @@ static void efx_ptp_send_times(struct efx_nic *efx,
host_time = (now.ts_real.tv_sec << MC_NANOSECOND_BITS |
now.ts_real.tv_nsec);
/* Update host time in NIC memory */
- _efx_writed(efx, cpu_to_le32(host_time),
- FR_CZ_MC_TREG_SMEM + MC_SMEM_P0_PTP_TIME_OFST);
+ efx->type->ptp_write_host_time(efx, host_time);
}
*last_time = now;
}
/* Read a timeset from the MC's results and partial process. */
-static void efx_ptp_read_timeset(u8 *data, struct efx_ptp_timeset *timeset)
+static void efx_ptp_read_timeset(MCDI_DECLARE_STRUCT_PTR(data),
+ struct efx_ptp_timeset *timeset)
{
unsigned start_ns, end_ns;
@@ -425,12 +426,14 @@ static void efx_ptp_read_timeset(u8 *data, struct efx_ptp_timeset *timeset)
* busy. A number of readings are taken so that, hopefully, at least one good
* synchronisation will be seen in the results.
*/
-static int efx_ptp_process_times(struct efx_nic *efx, u8 *synch_buf,
- size_t response_length,
- const struct pps_event_time *last_time)
+static int
+efx_ptp_process_times(struct efx_nic *efx, MCDI_DECLARE_STRUCT_PTR(synch_buf),
+ size_t response_length,
+ const struct pps_event_time *last_time)
{
- unsigned number_readings = (response_length /
- MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN);
+ unsigned number_readings =
+ MCDI_VAR_ARRAY_LEN(response_length,
+ PTP_OUT_SYNCHRONIZE_TIMESET);
unsigned i;
unsigned total;
unsigned ngood = 0;
@@ -447,8 +450,10 @@ static int efx_ptp_process_times(struct efx_nic *efx, u8 *synch_buf,
* appera to be erroneous.
*/
for (i = 0; i < number_readings; i++) {
- efx_ptp_read_timeset(synch_buf, &ptp->timeset[i]);
- synch_buf += MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN;
+ efx_ptp_read_timeset(
+ MCDI_ARRAY_STRUCT_PTR(synch_buf,
+ PTP_OUT_SYNCHRONIZE_TIMESET, i),
+ &ptp->timeset[i]);
}
/* Find the last good host-MC synchronization result. The MC times
@@ -518,7 +523,7 @@ static int efx_ptp_process_times(struct efx_nic *efx, u8 *synch_buf,
static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
{
struct efx_ptp_data *ptp = efx->ptp_data;
- u8 synch_buf[MC_CMD_PTP_OUT_SYNCHRONIZE_LENMAX];
+ MCDI_DECLARE_BUF(synch_buf, MC_CMD_PTP_OUT_SYNCHRONIZE_LENMAX);
size_t response_length;
int rc;
unsigned long timeout;
@@ -527,17 +532,17 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
int *start = ptp->start.addr;
MCDI_SET_DWORD(synch_buf, PTP_IN_OP, MC_CMD_PTP_OP_SYNCHRONIZE);
+ MCDI_SET_DWORD(synch_buf, PTP_IN_PERIPH_ID, 0);
MCDI_SET_DWORD(synch_buf, PTP_IN_SYNCHRONIZE_NUMTIMESETS,
num_readings);
- MCDI_SET_DWORD(synch_buf, PTP_IN_SYNCHRONIZE_START_ADDR_LO,
- (u32)ptp->start.dma_addr);
- MCDI_SET_DWORD(synch_buf, PTP_IN_SYNCHRONIZE_START_ADDR_HI,
- (u32)((u64)ptp->start.dma_addr >> 32));
+ MCDI_SET_QWORD(synch_buf, PTP_IN_SYNCHRONIZE_START_ADDR,
+ ptp->start.dma_addr);
/* Clear flag that signals MC ready */
ACCESS_ONCE(*start) = 0;
- efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
- MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
+ rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
+ MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
+ EFX_BUG_ON_PARANOID(rc);
/* Wait for start from MCDI (or timeout) */
timeout = jiffies + msecs_to_jiffies(MAX_SYNCHRONISE_WAIT_MS);
@@ -564,15 +569,15 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
/* Transmit a PTP packet, via the MCDI interface, to the wire. */
static int efx_ptp_xmit_skb(struct efx_nic *efx, struct sk_buff *skb)
{
- u8 *txbuf = efx->ptp_data->txbuf;
+ struct efx_ptp_data *ptp_data = efx->ptp_data;
struct skb_shared_hwtstamps timestamps;
int rc = -EIO;
- /* MCDI driver requires word aligned lengths */
- size_t len = ALIGN(MC_CMD_PTP_IN_TRANSMIT_LEN(skb->len), 4);
- u8 txtime[MC_CMD_PTP_OUT_TRANSMIT_LEN];
+ MCDI_DECLARE_BUF(txtime, MC_CMD_PTP_OUT_TRANSMIT_LEN);
+ size_t len;
- MCDI_SET_DWORD(txbuf, PTP_IN_OP, MC_CMD_PTP_OP_TRANSMIT);
- MCDI_SET_DWORD(txbuf, PTP_IN_TRANSMIT_LENGTH, skb->len);
+ MCDI_SET_DWORD(ptp_data->txbuf, PTP_IN_OP, MC_CMD_PTP_OP_TRANSMIT);
+ MCDI_SET_DWORD(ptp_data->txbuf, PTP_IN_PERIPH_ID, 0);
+ MCDI_SET_DWORD(ptp_data->txbuf, PTP_IN_TRANSMIT_LENGTH, skb->len);
if (skb_shinfo(skb)->nr_frags != 0) {
rc = skb_linearize(skb);
if (rc != 0)
@@ -585,10 +590,12 @@ static int efx_ptp_xmit_skb(struct efx_nic *efx, struct sk_buff *skb)
goto fail;
}
skb_copy_from_linear_data(skb,
- &txbuf[MC_CMD_PTP_IN_TRANSMIT_PACKET_OFST],
- len);
- rc = efx_mcdi_rpc(efx, MC_CMD_PTP, txbuf, len, txtime,
- sizeof(txtime), &len);
+ MCDI_PTR(ptp_data->txbuf,
+ PTP_IN_TRANSMIT_PACKET),
+ skb->len);
+ rc = efx_mcdi_rpc(efx, MC_CMD_PTP,
+ ptp_data->txbuf, MC_CMD_PTP_IN_TRANSMIT_LEN(skb->len),
+ txtime, sizeof(txtime), &len);
if (rc != 0)
goto fail;
@@ -872,7 +879,7 @@ static int efx_ptp_probe_channel(struct efx_channel *channel)
if (!efx->ptp_data)
return -ENOMEM;
- rc = efx_nic_alloc_buffer(efx, &ptp->start, sizeof(int));
+ rc = efx_nic_alloc_buffer(efx, &ptp->start, sizeof(int), GFP_KERNEL);
if (rc != 0)
goto fail1;
@@ -1359,7 +1366,7 @@ static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
struct efx_ptp_data,
phc_clock_info);
struct efx_nic *efx = ptp_data->channel->efx;
- u8 inadj[MC_CMD_PTP_IN_ADJUST_LEN];
+ MCDI_DECLARE_BUF(inadj, MC_CMD_PTP_IN_ADJUST_LEN);
s64 adjustment_ns;
int rc;
@@ -1373,9 +1380,8 @@ static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
(PPB_EXTRA_BITS + MAX_PPB_BITS));
MCDI_SET_DWORD(inadj, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST);
- MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_FREQ_LO, (u32)adjustment_ns);
- MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_FREQ_HI,
- (u32)(adjustment_ns >> 32));
+ MCDI_SET_DWORD(inadj, PTP_IN_PERIPH_ID, 0);
+ MCDI_SET_QWORD(inadj, PTP_IN_ADJUST_FREQ, adjustment_ns);
MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_SECONDS, 0);
MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_NANOSECONDS, 0);
rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inadj, sizeof(inadj),
@@ -1394,11 +1400,11 @@ static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
phc_clock_info);
struct efx_nic *efx = ptp_data->channel->efx;
struct timespec delta_ts = ns_to_timespec(delta);
- u8 inbuf[MC_CMD_PTP_IN_ADJUST_LEN];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_ADJUST_LEN);
MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST);
- MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_FREQ_LO, 0);
- MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_FREQ_HI, 0);
+ MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
+ MCDI_SET_QWORD(inbuf, PTP_IN_ADJUST_FREQ, 0);
MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_SECONDS, (u32)delta_ts.tv_sec);
MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_NANOSECONDS, (u32)delta_ts.tv_nsec);
return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
@@ -1411,11 +1417,12 @@ static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
struct efx_ptp_data,
phc_clock_info);
struct efx_nic *efx = ptp_data->channel->efx;
- u8 inbuf[MC_CMD_PTP_IN_READ_NIC_TIME_LEN];
- u8 outbuf[MC_CMD_PTP_OUT_READ_NIC_TIME_LEN];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_READ_NIC_TIME_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_READ_NIC_TIME_LEN);
int rc;
MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_READ_NIC_TIME);
+ MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), NULL);
diff --git a/drivers/net/ethernet/sfc/qt202x_phy.c b/drivers/net/ethernet/sfc/qt202x_phy.c
index 326a28637f3c..efa3612affca 100644
--- a/drivers/net/ethernet/sfc/qt202x_phy.c
+++ b/drivers/net/ethernet/sfc/qt202x_phy.c
@@ -1,6 +1,6 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2006-2010 Solarflare Communications Inc.
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2006-2012 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 6af9cfda50fb..4a596725023f 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -1,7 +1,7 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2005-2011 Solarflare Communications Inc.
+ * Copyright 2005-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -21,6 +21,7 @@
#include <net/checksum.h>
#include "net_driver.h"
#include "efx.h"
+#include "filter.h"
#include "nic.h"
#include "selftest.h"
#include "workarounds.h"
@@ -60,13 +61,12 @@ static inline u8 *efx_rx_buf_va(struct efx_rx_buffer *buf)
return page_address(buf->page) + buf->page_offset;
}
-static inline u32 efx_rx_buf_hash(const u8 *eh)
+static inline u32 efx_rx_buf_hash(struct efx_nic *efx, const u8 *eh)
{
- /* The ethernet header is always directly after any hash. */
-#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || NET_IP_ALIGN % 4 == 0
- return __le32_to_cpup((const __le32 *)(eh - 4));
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_hash_offset));
#else
- const u8 *data = eh - 4;
+ const u8 *data = eh + efx->rx_packet_hash_offset;
return (u32)data[0] |
(u32)data[1] << 8 |
(u32)data[2] << 16 |
@@ -326,6 +326,9 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
unsigned int fill_level, batch_size;
int space, rc = 0;
+ if (!rx_queue->refill_enabled)
+ return;
+
/* Calculate current fill level, and exit if we don't need to fill */
fill_level = (rx_queue->added_count - rx_queue->removed_count);
EFX_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries);
@@ -435,7 +438,7 @@ efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
}
if (efx->net_dev->features & NETIF_F_RXHASH)
- skb->rxhash = efx_rx_buf_hash(eh);
+ skb->rxhash = efx_rx_buf_hash(efx, eh);
skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
@@ -523,10 +526,11 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
/* Validate the number of fragments and completed length */
if (n_frags == 1) {
- efx_rx_packet__check_len(rx_queue, rx_buf, len);
+ if (!(flags & EFX_RX_PKT_PREFIX_LEN))
+ efx_rx_packet__check_len(rx_queue, rx_buf, len);
} else if (unlikely(n_frags > EFX_RX_MAX_FRAGS) ||
- unlikely(len <= (n_frags - 1) * EFX_RX_USR_BUF_SIZE) ||
- unlikely(len > n_frags * EFX_RX_USR_BUF_SIZE) ||
+ unlikely(len <= (n_frags - 1) * efx->rx_dma_len) ||
+ unlikely(len > n_frags * efx->rx_dma_len) ||
unlikely(!efx->rx_scatter)) {
/* If this isn't an explicit discard request, either
* the hardware or the driver is broken.
@@ -551,7 +555,7 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
return;
}
- if (n_frags == 1)
+ if (n_frags == 1 && !(flags & EFX_RX_PKT_PREFIX_LEN))
rx_buf->len = len;
/* Release and/or sync the DMA mapping - assumes all RX buffers
@@ -564,8 +568,8 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
*/
prefetch(efx_rx_buf_va(rx_buf));
- rx_buf->page_offset += efx->type->rx_buffer_hash_size;
- rx_buf->len -= efx->type->rx_buffer_hash_size;
+ rx_buf->page_offset += efx->rx_prefix_size;
+ rx_buf->len -= efx->rx_prefix_size;
if (n_frags > 1) {
/* Release/sync DMA mapping for additional fragments.
@@ -577,9 +581,9 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
if (--tail_frags == 0)
break;
- efx_sync_rx_buffer(efx, rx_buf, EFX_RX_USR_BUF_SIZE);
+ efx_sync_rx_buffer(efx, rx_buf, efx->rx_dma_len);
}
- rx_buf->len = len - (n_frags - 1) * EFX_RX_USR_BUF_SIZE;
+ rx_buf->len = len - (n_frags - 1) * efx->rx_dma_len;
efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
}
@@ -630,6 +634,13 @@ void __efx_rx_packet(struct efx_channel *channel)
efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index);
u8 *eh = efx_rx_buf_va(rx_buf);
+ /* Read length from the prefix if necessary. This already
+ * excludes the length of the prefix itself.
+ */
+ if (rx_buf->flags & EFX_RX_PKT_PREFIX_LEN)
+ rx_buf->len = le16_to_cpup((__le16 *)
+ (eh + efx->rx_packet_len_offset));
+
/* If we're in loopback test, then pass the packet directly to the
* loopback layer, and free the rx_buf here
*/
@@ -738,9 +749,9 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
rx_queue->max_fill = max_fill;
rx_queue->fast_fill_trigger = trigger;
+ rx_queue->refill_enabled = true;
/* Set up RX descriptor ring */
- rx_queue->enabled = true;
efx_nic_init_rx(rx_queue);
}
@@ -753,11 +764,7 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
"shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
- /* A flush failure might have left rx_queue->enabled */
- rx_queue->enabled = false;
-
del_timer_sync(&rx_queue->slow_fill);
- efx_nic_fini_rx(rx_queue);
/* Release RX buffers from the current read ptr to the write ptr */
if (rx_queue->buffer) {
@@ -803,3 +810,130 @@ module_param(rx_refill_threshold, uint, 0444);
MODULE_PARM_DESC(rx_refill_threshold,
"RX descriptor ring refill threshold (%)");
+#ifdef CONFIG_RFS_ACCEL
+
+int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
+ u16 rxq_index, u32 flow_id)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_channel *channel;
+ struct efx_filter_spec spec;
+ const struct iphdr *ip;
+ const __be16 *ports;
+ int nhoff;
+ int rc;
+
+ nhoff = skb_network_offset(skb);
+
+ if (skb->protocol == htons(ETH_P_8021Q)) {
+ EFX_BUG_ON_PARANOID(skb_headlen(skb) <
+ nhoff + sizeof(struct vlan_hdr));
+ if (((const struct vlan_hdr *)skb->data + nhoff)->
+ h_vlan_encapsulated_proto != htons(ETH_P_IP))
+ return -EPROTONOSUPPORT;
+
+ /* This is IP over 802.1q VLAN. We can't filter on the
+ * IP 5-tuple and the vlan together, so just strip the
+ * vlan header and filter on the IP part.
+ */
+ nhoff += sizeof(struct vlan_hdr);
+ } else if (skb->protocol != htons(ETH_P_IP)) {
+ return -EPROTONOSUPPORT;
+ }
+
+ /* RFS must validate the IP header length before calling us */
+ EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
+ ip = (const struct iphdr *)(skb->data + nhoff);
+ if (ip_is_fragment(ip))
+ return -EPROTONOSUPPORT;
+ EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
+ ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
+
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT,
+ efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
+ rxq_index);
+ rc = efx_filter_set_ipv4_full(&spec, ip->protocol,
+ ip->daddr, ports[1], ip->saddr, ports[0]);
+ if (rc)
+ return rc;
+
+ rc = efx->type->filter_rfs_insert(efx, &spec);
+ if (rc < 0)
+ return rc;
+
+ /* Remember this so we can check whether to expire the filter later */
+ efx->rps_flow_id[rc] = flow_id;
+ channel = efx_get_channel(efx, skb_get_rx_queue(skb));
+ ++channel->rfs_filters_added;
+
+ netif_info(efx, rx_status, efx->net_dev,
+ "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
+ (ip->protocol == IPPROTO_TCP) ? "TCP" : "UDP",
+ &ip->saddr, ntohs(ports[0]), &ip->daddr, ntohs(ports[1]),
+ rxq_index, flow_id, rc);
+
+ return rc;
+}
+
+bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota)
+{
+ bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index);
+ unsigned int index, size;
+ u32 flow_id;
+
+ if (!spin_trylock_bh(&efx->filter_lock))
+ return false;
+
+ expire_one = efx->type->filter_rfs_expire_one;
+ index = efx->rps_expire_index;
+ size = efx->type->max_rx_ip_filters;
+ while (quota--) {
+ flow_id = efx->rps_flow_id[index];
+ if (expire_one(efx, flow_id, index))
+ netif_info(efx, rx_status, efx->net_dev,
+ "expired filter %d [flow %u]\n",
+ index, flow_id);
+ if (++index == size)
+ index = 0;
+ }
+ efx->rps_expire_index = index;
+
+ spin_unlock_bh(&efx->filter_lock);
+ return true;
+}
+
+#endif /* CONFIG_RFS_ACCEL */
+
+/**
+ * efx_filter_is_mc_recipient - test whether spec is a multicast recipient
+ * @spec: Specification to test
+ *
+ * Return: %true if the specification is a non-drop RX filter that
+ * matches a local MAC address I/G bit value of 1 or matches a local
+ * IPv4 or IPv6 address value in the respective multicast address
+ * range. Otherwise %false.
+ */
+bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec)
+{
+ if (!(spec->flags & EFX_FILTER_FLAG_RX) ||
+ spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
+ return false;
+
+ if (spec->match_flags &
+ (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG) &&
+ is_multicast_ether_addr(spec->loc_mac))
+ return true;
+
+ if ((spec->match_flags &
+ (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
+ (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
+ if (spec->ether_type == htons(ETH_P_IP) &&
+ ipv4_is_multicast(spec->loc_host[0]))
+ return true;
+ if (spec->ether_type == htons(ETH_P_IPV6) &&
+ ((const u8 *)spec->loc_host)[0] == 0xff)
+ return true;
+ }
+
+ return false;
+}
diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
index 2069f51b2aa9..144bbff5a4ae 100644
--- a/drivers/net/ethernet/sfc/selftest.c
+++ b/drivers/net/ethernet/sfc/selftest.c
@@ -1,7 +1,7 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2010 Solarflare Communications Inc.
+ * Copyright 2006-2012 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -447,14 +447,7 @@ static int efx_begin_loopback(struct efx_tx_queue *tx_queue)
static int efx_poll_loopback(struct efx_nic *efx)
{
struct efx_loopback_state *state = efx->loopback_selftest;
- struct efx_channel *channel;
- /* NAPI polling is not enabled, so process channels
- * synchronously */
- efx_for_each_channel(channel, efx) {
- if (channel->work_pending)
- efx_process_channel_now(channel);
- }
return atomic_read(&state->rx_good) == state->packet_count;
}
@@ -586,10 +579,6 @@ static int efx_wait_for_link(struct efx_nic *efx)
mutex_lock(&efx->mac_lock);
efx->type->monitor(efx);
mutex_unlock(&efx->mac_lock);
- } else {
- struct efx_channel *channel = efx_get_channel(efx, 0);
- if (channel->work_pending)
- efx_process_channel_now(channel);
}
mutex_lock(&efx->mac_lock);
diff --git a/drivers/net/ethernet/sfc/selftest.h b/drivers/net/ethernet/sfc/selftest.h
index aed24b736059..87698ae0bf75 100644
--- a/drivers/net/ethernet/sfc/selftest.h
+++ b/drivers/net/ethernet/sfc/selftest.h
@@ -1,7 +1,7 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2010 Solarflare Communications Inc.
+ * Copyright 2006-2012 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index 8c91775e3c5f..d034bcd124ef 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -1,7 +1,7 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2010 Solarflare Communications Inc.
+ * Copyright 2006-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -18,8 +18,7 @@
#include "bitfield.h"
#include "efx.h"
#include "nic.h"
-#include "spi.h"
-#include "regs.h"
+#include "farch_regs.h"
#include "io.h"
#include "phy.h"
#include "workarounds.h"
@@ -30,7 +29,6 @@
/* Hardware control for SFC9000 family including SFL9021 (aka Siena). */
static void siena_init_wol(struct efx_nic *efx);
-static int siena_reset_hw(struct efx_nic *efx, enum reset_type method);
static void siena_push_irq_moderation(struct efx_channel *channel)
@@ -52,81 +50,6 @@ static void siena_push_irq_moderation(struct efx_channel *channel)
channel->channel);
}
-static int siena_mdio_write(struct net_device *net_dev,
- int prtad, int devad, u16 addr, u16 value)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
- uint32_t status;
- int rc;
-
- rc = efx_mcdi_mdio_write(efx, efx->mdio_bus, prtad, devad,
- addr, value, &status);
- if (rc)
- return rc;
- if (status != MC_CMD_MDIO_STATUS_GOOD)
- return -EIO;
-
- return 0;
-}
-
-static int siena_mdio_read(struct net_device *net_dev,
- int prtad, int devad, u16 addr)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
- uint16_t value;
- uint32_t status;
- int rc;
-
- rc = efx_mcdi_mdio_read(efx, efx->mdio_bus, prtad, devad,
- addr, &value, &status);
- if (rc)
- return rc;
- if (status != MC_CMD_MDIO_STATUS_GOOD)
- return -EIO;
-
- return (int)value;
-}
-
-/* This call is responsible for hooking in the MAC and PHY operations */
-static int siena_probe_port(struct efx_nic *efx)
-{
- int rc;
-
- /* Hook in PHY operations table */
- efx->phy_op = &efx_mcdi_phy_ops;
-
- /* Set up MDIO structure for PHY */
- efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
- efx->mdio.mdio_read = siena_mdio_read;
- efx->mdio.mdio_write = siena_mdio_write;
-
- /* Fill out MDIO structure, loopback modes, and initial link state */
- rc = efx->phy_op->probe(efx);
- if (rc != 0)
- return rc;
-
- /* Allocate buffer for stats */
- rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
- MC_CMD_MAC_NSTATS * sizeof(u64));
- if (rc)
- return rc;
- netif_dbg(efx, probe, efx->net_dev,
- "stats buffer at %llx (virt %p phys %llx)\n",
- (u64)efx->stats_buffer.dma_addr,
- efx->stats_buffer.addr,
- (u64)virt_to_phys(efx->stats_buffer.addr));
-
- efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 1);
-
- return 0;
-}
-
-static void siena_remove_port(struct efx_nic *efx)
-{
- efx->phy_op->remove(efx);
- efx_nic_free_buffer(efx, &efx->stats_buffer);
-}
-
void siena_prepare_flush(struct efx_nic *efx)
{
if (efx->fc_disable++ == 0)
@@ -139,7 +62,7 @@ void siena_finish_flush(struct efx_nic *efx)
efx_mcdi_set_mac(efx);
}
-static const struct efx_nic_register_test siena_register_tests[] = {
+static const struct efx_farch_register_test siena_register_tests[] = {
{ FR_AZ_ADR_REGION,
EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) },
{ FR_CZ_USR_EV_CFG,
@@ -178,16 +101,16 @@ static int siena_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
/* Reset the chip immediately so that it is completely
* quiescent regardless of what any VF driver does.
*/
- rc = siena_reset_hw(efx, reset_method);
+ rc = efx_mcdi_reset(efx, reset_method);
if (rc)
goto out;
tests->registers =
- efx_nic_test_registers(efx, siena_register_tests,
- ARRAY_SIZE(siena_register_tests))
+ efx_farch_test_registers(efx, siena_register_tests,
+ ARRAY_SIZE(siena_register_tests))
? -1 : 1;
- rc = siena_reset_hw(efx, reset_method);
+ rc = efx_mcdi_reset(efx, reset_method);
out:
rc2 = efx_reset_up(efx, reset_method, rc == 0);
return rc ? rc : rc2;
@@ -200,11 +123,6 @@ out:
**************************************************************************
*/
-static enum reset_type siena_map_reset_reason(enum reset_type reason)
-{
- return RESET_TYPE_RECOVER_OR_ALL;
-}
-
static int siena_map_reset_flags(u32 *flags)
{
enum {
@@ -230,21 +148,6 @@ static int siena_map_reset_flags(u32 *flags)
return -EINVAL;
}
-static int siena_reset_hw(struct efx_nic *efx, enum reset_type method)
-{
- int rc;
-
- /* Recover from a failed assertion pre-reset */
- rc = efx_mcdi_handle_assertion(efx);
- if (rc)
- return rc;
-
- if (method == RESET_TYPE_WORLD)
- return efx_mcdi_reset_mc(efx);
- else
- return efx_mcdi_reset_port(efx);
-}
-
#ifdef CONFIG_EEH
/* When a PCI device is isolated from the bus, a subsequent MMIO read is
* required for the kernel EEH mechanisms to notice. As the Solarflare driver
@@ -274,19 +177,25 @@ static int siena_probe_nvconfig(struct efx_nic *efx)
return rc;
}
-static void siena_dimension_resources(struct efx_nic *efx)
+static int siena_dimension_resources(struct efx_nic *efx)
{
/* Each port has a small block of internal SRAM dedicated to
* the buffer table and descriptor caches. In theory we can
* map both blocks to one port, but we don't.
*/
- efx_nic_dimension_resources(efx, FR_CZ_BUF_FULL_TBL_ROWS / 2);
+ efx_farch_dimension_resources(efx, FR_CZ_BUF_FULL_TBL_ROWS / 2);
+ return 0;
+}
+
+static unsigned int siena_mem_map_size(struct efx_nic *efx)
+{
+ return FR_CZ_MC_TREG_SMEM +
+ FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS;
}
static int siena_probe_nic(struct efx_nic *efx)
{
struct siena_nic_data *nic_data;
- bool already_attached = false;
efx_oword_t reg;
int rc;
@@ -296,38 +205,24 @@ static int siena_probe_nic(struct efx_nic *efx)
return -ENOMEM;
efx->nic_data = nic_data;
- if (efx_nic_fpga_ver(efx) != 0) {
+ if (efx_farch_fpga_ver(efx) != 0) {
netif_err(efx, probe, efx->net_dev,
"Siena FPGA not supported\n");
rc = -ENODEV;
goto fail1;
}
+ efx->max_channels = EFX_MAX_CHANNELS;
+
efx_reado(efx, &reg, FR_AZ_CS_DEBUG);
efx->port_num = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1;
- efx_mcdi_init(efx);
-
- /* Recover from a failed assertion before probing */
- rc = efx_mcdi_handle_assertion(efx);
+ rc = efx_mcdi_init(efx);
if (rc)
goto fail1;
- /* Let the BMC know that the driver is now in charge of link and
- * filter settings. We must do this before we reset the NIC */
- rc = efx_mcdi_drv_attach(efx, true, &already_attached);
- if (rc) {
- netif_err(efx, probe, efx->net_dev,
- "Unable to register driver with MCPU\n");
- goto fail2;
- }
- if (already_attached)
- /* Not a fatal error */
- netif_err(efx, probe, efx->net_dev,
- "Host already registered with MCPU\n");
-
/* Now we can reset the NIC */
- rc = siena_reset_hw(efx, RESET_TYPE_ALL);
+ rc = efx_mcdi_reset(efx, RESET_TYPE_ALL);
if (rc) {
netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n");
goto fail3;
@@ -336,7 +231,8 @@ static int siena_probe_nic(struct efx_nic *efx)
siena_init_wol(efx);
/* Allocate memory for INT_KER */
- rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t));
+ rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t),
+ GFP_KERNEL);
if (rc)
goto fail4;
BUG_ON(efx->irq_status.dma_addr & 0x0f);
@@ -371,8 +267,7 @@ fail5:
efx_nic_free_buffer(efx, &efx->irq_status);
fail4:
fail3:
- efx_mcdi_drv_attach(efx, false, NULL);
-fail2:
+ efx_mcdi_fini(efx);
fail1:
kfree(efx->nic_data);
return rc;
@@ -448,7 +343,7 @@ static int siena_init_nic(struct efx_nic *efx)
EFX_POPULATE_OWORD_1(temp, FRF_CZ_USREV_DIS, 1);
efx_writeo(efx, &temp, FR_CZ_USR_EV_CFG);
- efx_nic_init_common(efx);
+ efx_farch_init_common(efx);
return 0;
}
@@ -458,144 +353,192 @@ static void siena_remove_nic(struct efx_nic *efx)
efx_nic_free_buffer(efx, &efx->irq_status);
- siena_reset_hw(efx, RESET_TYPE_ALL);
+ efx_mcdi_reset(efx, RESET_TYPE_ALL);
- /* Relinquish the device back to the BMC */
- efx_mcdi_drv_attach(efx, false, NULL);
+ efx_mcdi_fini(efx);
/* Tear down the private nic state */
kfree(efx->nic_data);
efx->nic_data = NULL;
}
-#define STATS_GENERATION_INVALID ((__force __le64)(-1))
+#define SIENA_DMA_STAT(ext_name, mcdi_name) \
+ [SIENA_STAT_ ## ext_name] = \
+ { #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
+#define SIENA_OTHER_STAT(ext_name) \
+ [SIENA_STAT_ ## ext_name] = { #ext_name, 0, 0 }
+
+static const struct efx_hw_stat_desc siena_stat_desc[SIENA_STAT_COUNT] = {
+ SIENA_DMA_STAT(tx_bytes, TX_BYTES),
+ SIENA_OTHER_STAT(tx_good_bytes),
+ SIENA_DMA_STAT(tx_bad_bytes, TX_BAD_BYTES),
+ SIENA_DMA_STAT(tx_packets, TX_PKTS),
+ SIENA_DMA_STAT(tx_bad, TX_BAD_FCS_PKTS),
+ SIENA_DMA_STAT(tx_pause, TX_PAUSE_PKTS),
+ SIENA_DMA_STAT(tx_control, TX_CONTROL_PKTS),
+ SIENA_DMA_STAT(tx_unicast, TX_UNICAST_PKTS),
+ SIENA_DMA_STAT(tx_multicast, TX_MULTICAST_PKTS),
+ SIENA_DMA_STAT(tx_broadcast, TX_BROADCAST_PKTS),
+ SIENA_DMA_STAT(tx_lt64, TX_LT64_PKTS),
+ SIENA_DMA_STAT(tx_64, TX_64_PKTS),
+ SIENA_DMA_STAT(tx_65_to_127, TX_65_TO_127_PKTS),
+ SIENA_DMA_STAT(tx_128_to_255, TX_128_TO_255_PKTS),
+ SIENA_DMA_STAT(tx_256_to_511, TX_256_TO_511_PKTS),
+ SIENA_DMA_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS),
+ SIENA_DMA_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS),
+ SIENA_DMA_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS),
+ SIENA_DMA_STAT(tx_gtjumbo, TX_GTJUMBO_PKTS),
+ SIENA_OTHER_STAT(tx_collision),
+ SIENA_DMA_STAT(tx_single_collision, TX_SINGLE_COLLISION_PKTS),
+ SIENA_DMA_STAT(tx_multiple_collision, TX_MULTIPLE_COLLISION_PKTS),
+ SIENA_DMA_STAT(tx_excessive_collision, TX_EXCESSIVE_COLLISION_PKTS),
+ SIENA_DMA_STAT(tx_deferred, TX_DEFERRED_PKTS),
+ SIENA_DMA_STAT(tx_late_collision, TX_LATE_COLLISION_PKTS),
+ SIENA_DMA_STAT(tx_excessive_deferred, TX_EXCESSIVE_DEFERRED_PKTS),
+ SIENA_DMA_STAT(tx_non_tcpudp, TX_NON_TCPUDP_PKTS),
+ SIENA_DMA_STAT(tx_mac_src_error, TX_MAC_SRC_ERR_PKTS),
+ SIENA_DMA_STAT(tx_ip_src_error, TX_IP_SRC_ERR_PKTS),
+ SIENA_DMA_STAT(rx_bytes, RX_BYTES),
+ SIENA_OTHER_STAT(rx_good_bytes),
+ SIENA_DMA_STAT(rx_bad_bytes, RX_BAD_BYTES),
+ SIENA_DMA_STAT(rx_packets, RX_PKTS),
+ SIENA_DMA_STAT(rx_good, RX_GOOD_PKTS),
+ SIENA_DMA_STAT(rx_bad, RX_BAD_FCS_PKTS),
+ SIENA_DMA_STAT(rx_pause, RX_PAUSE_PKTS),
+ SIENA_DMA_STAT(rx_control, RX_CONTROL_PKTS),
+ SIENA_DMA_STAT(rx_unicast, RX_UNICAST_PKTS),
+ SIENA_DMA_STAT(rx_multicast, RX_MULTICAST_PKTS),
+ SIENA_DMA_STAT(rx_broadcast, RX_BROADCAST_PKTS),
+ SIENA_DMA_STAT(rx_lt64, RX_UNDERSIZE_PKTS),
+ SIENA_DMA_STAT(rx_64, RX_64_PKTS),
+ SIENA_DMA_STAT(rx_65_to_127, RX_65_TO_127_PKTS),
+ SIENA_DMA_STAT(rx_128_to_255, RX_128_TO_255_PKTS),
+ SIENA_DMA_STAT(rx_256_to_511, RX_256_TO_511_PKTS),
+ SIENA_DMA_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS),
+ SIENA_DMA_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS),
+ SIENA_DMA_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS),
+ SIENA_DMA_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS),
+ SIENA_DMA_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS),
+ SIENA_DMA_STAT(rx_overflow, RX_OVERFLOW_PKTS),
+ SIENA_DMA_STAT(rx_false_carrier, RX_FALSE_CARRIER_PKTS),
+ SIENA_DMA_STAT(rx_symbol_error, RX_SYMBOL_ERROR_PKTS),
+ SIENA_DMA_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS),
+ SIENA_DMA_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS),
+ SIENA_DMA_STAT(rx_internal_error, RX_INTERNAL_ERROR_PKTS),
+ SIENA_DMA_STAT(rx_nodesc_drop_cnt, RX_NODESC_DROPS),
+};
+static const unsigned long siena_stat_mask[] = {
+ [0 ... BITS_TO_LONGS(SIENA_STAT_COUNT) - 1] = ~0UL,
+};
+
+static size_t siena_describe_nic_stats(struct efx_nic *efx, u8 *names)
+{
+ return efx_nic_describe_stats(siena_stat_desc, SIENA_STAT_COUNT,
+ siena_stat_mask, names);
+}
static int siena_try_update_nic_stats(struct efx_nic *efx)
{
+ struct siena_nic_data *nic_data = efx->nic_data;
+ u64 *stats = nic_data->stats;
__le64 *dma_stats;
- struct efx_mac_stats *mac_stats;
__le64 generation_start, generation_end;
- mac_stats = &efx->mac_stats;
dma_stats = efx->stats_buffer.addr;
generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
- if (generation_end == STATS_GENERATION_INVALID)
+ if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
return 0;
rmb();
-
-#define MAC_STAT(M, D) \
- mac_stats->M = le64_to_cpu(dma_stats[MC_CMD_MAC_ ## D])
-
- MAC_STAT(tx_bytes, TX_BYTES);
- MAC_STAT(tx_bad_bytes, TX_BAD_BYTES);
- efx_update_diff_stat(&mac_stats->tx_good_bytes,
- mac_stats->tx_bytes - mac_stats->tx_bad_bytes);
- MAC_STAT(tx_packets, TX_PKTS);
- MAC_STAT(tx_bad, TX_BAD_FCS_PKTS);
- MAC_STAT(tx_pause, TX_PAUSE_PKTS);
- MAC_STAT(tx_control, TX_CONTROL_PKTS);
- MAC_STAT(tx_unicast, TX_UNICAST_PKTS);
- MAC_STAT(tx_multicast, TX_MULTICAST_PKTS);
- MAC_STAT(tx_broadcast, TX_BROADCAST_PKTS);
- MAC_STAT(tx_lt64, TX_LT64_PKTS);
- MAC_STAT(tx_64, TX_64_PKTS);
- MAC_STAT(tx_65_to_127, TX_65_TO_127_PKTS);
- MAC_STAT(tx_128_to_255, TX_128_TO_255_PKTS);
- MAC_STAT(tx_256_to_511, TX_256_TO_511_PKTS);
- MAC_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS);
- MAC_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS);
- MAC_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS);
- MAC_STAT(tx_gtjumbo, TX_GTJUMBO_PKTS);
- mac_stats->tx_collision = 0;
- MAC_STAT(tx_single_collision, TX_SINGLE_COLLISION_PKTS);
- MAC_STAT(tx_multiple_collision, TX_MULTIPLE_COLLISION_PKTS);
- MAC_STAT(tx_excessive_collision, TX_EXCESSIVE_COLLISION_PKTS);
- MAC_STAT(tx_deferred, TX_DEFERRED_PKTS);
- MAC_STAT(tx_late_collision, TX_LATE_COLLISION_PKTS);
- mac_stats->tx_collision = (mac_stats->tx_single_collision +
- mac_stats->tx_multiple_collision +
- mac_stats->tx_excessive_collision +
- mac_stats->tx_late_collision);
- MAC_STAT(tx_excessive_deferred, TX_EXCESSIVE_DEFERRED_PKTS);
- MAC_STAT(tx_non_tcpudp, TX_NON_TCPUDP_PKTS);
- MAC_STAT(tx_mac_src_error, TX_MAC_SRC_ERR_PKTS);
- MAC_STAT(tx_ip_src_error, TX_IP_SRC_ERR_PKTS);
- MAC_STAT(rx_bytes, RX_BYTES);
- MAC_STAT(rx_bad_bytes, RX_BAD_BYTES);
- efx_update_diff_stat(&mac_stats->rx_good_bytes,
- mac_stats->rx_bytes - mac_stats->rx_bad_bytes);
- MAC_STAT(rx_packets, RX_PKTS);
- MAC_STAT(rx_good, RX_GOOD_PKTS);
- MAC_STAT(rx_bad, RX_BAD_FCS_PKTS);
- MAC_STAT(rx_pause, RX_PAUSE_PKTS);
- MAC_STAT(rx_control, RX_CONTROL_PKTS);
- MAC_STAT(rx_unicast, RX_UNICAST_PKTS);
- MAC_STAT(rx_multicast, RX_MULTICAST_PKTS);
- MAC_STAT(rx_broadcast, RX_BROADCAST_PKTS);
- MAC_STAT(rx_lt64, RX_UNDERSIZE_PKTS);
- MAC_STAT(rx_64, RX_64_PKTS);
- MAC_STAT(rx_65_to_127, RX_65_TO_127_PKTS);
- MAC_STAT(rx_128_to_255, RX_128_TO_255_PKTS);
- MAC_STAT(rx_256_to_511, RX_256_TO_511_PKTS);
- MAC_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS);
- MAC_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS);
- MAC_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS);
- MAC_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS);
- mac_stats->rx_bad_lt64 = 0;
- mac_stats->rx_bad_64_to_15xx = 0;
- mac_stats->rx_bad_15xx_to_jumbo = 0;
- MAC_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS);
- MAC_STAT(rx_overflow, RX_OVERFLOW_PKTS);
- mac_stats->rx_missed = 0;
- MAC_STAT(rx_false_carrier, RX_FALSE_CARRIER_PKTS);
- MAC_STAT(rx_symbol_error, RX_SYMBOL_ERROR_PKTS);
- MAC_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS);
- MAC_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS);
- MAC_STAT(rx_internal_error, RX_INTERNAL_ERROR_PKTS);
- mac_stats->rx_good_lt64 = 0;
-
- efx->n_rx_nodesc_drop_cnt =
- le64_to_cpu(dma_stats[MC_CMD_MAC_RX_NODESC_DROPS]);
-
-#undef MAC_STAT
-
+ efx_nic_update_stats(siena_stat_desc, SIENA_STAT_COUNT, siena_stat_mask,
+ stats, efx->stats_buffer.addr, false);
rmb();
generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
if (generation_end != generation_start)
return -EAGAIN;
+ /* Update derived statistics */
+ efx_update_diff_stat(&stats[SIENA_STAT_tx_good_bytes],
+ stats[SIENA_STAT_tx_bytes] -
+ stats[SIENA_STAT_tx_bad_bytes]);
+ stats[SIENA_STAT_tx_collision] =
+ stats[SIENA_STAT_tx_single_collision] +
+ stats[SIENA_STAT_tx_multiple_collision] +
+ stats[SIENA_STAT_tx_excessive_collision] +
+ stats[SIENA_STAT_tx_late_collision];
+ efx_update_diff_stat(&stats[SIENA_STAT_rx_good_bytes],
+ stats[SIENA_STAT_rx_bytes] -
+ stats[SIENA_STAT_rx_bad_bytes]);
return 0;
}
-static void siena_update_nic_stats(struct efx_nic *efx)
+static size_t siena_update_nic_stats(struct efx_nic *efx, u64 *full_stats,
+ struct rtnl_link_stats64 *core_stats)
{
+ struct siena_nic_data *nic_data = efx->nic_data;
+ u64 *stats = nic_data->stats;
int retry;
/* If we're unlucky enough to read statistics wduring the DMA, wait
* up to 10ms for it to finish (typically takes <500us) */
for (retry = 0; retry < 100; ++retry) {
if (siena_try_update_nic_stats(efx) == 0)
- return;
+ break;
udelay(100);
}
- /* Use the old values instead */
+ if (full_stats)
+ memcpy(full_stats, stats, sizeof(u64) * SIENA_STAT_COUNT);
+
+ if (core_stats) {
+ core_stats->rx_packets = stats[SIENA_STAT_rx_packets];
+ core_stats->tx_packets = stats[SIENA_STAT_tx_packets];
+ core_stats->rx_bytes = stats[SIENA_STAT_rx_bytes];
+ core_stats->tx_bytes = stats[SIENA_STAT_tx_bytes];
+ core_stats->rx_dropped = stats[SIENA_STAT_rx_nodesc_drop_cnt];
+ core_stats->multicast = stats[SIENA_STAT_rx_multicast];
+ core_stats->collisions = stats[SIENA_STAT_tx_collision];
+ core_stats->rx_length_errors =
+ stats[SIENA_STAT_rx_gtjumbo] +
+ stats[SIENA_STAT_rx_length_error];
+ core_stats->rx_crc_errors = stats[SIENA_STAT_rx_bad];
+ core_stats->rx_frame_errors = stats[SIENA_STAT_rx_align_error];
+ core_stats->rx_fifo_errors = stats[SIENA_STAT_rx_overflow];
+ core_stats->tx_window_errors =
+ stats[SIENA_STAT_tx_late_collision];
+
+ core_stats->rx_errors = (core_stats->rx_length_errors +
+ core_stats->rx_crc_errors +
+ core_stats->rx_frame_errors +
+ stats[SIENA_STAT_rx_symbol_error]);
+ core_stats->tx_errors = (core_stats->tx_window_errors +
+ stats[SIENA_STAT_tx_bad]);
+ }
+
+ return SIENA_STAT_COUNT;
}
-static void siena_start_nic_stats(struct efx_nic *efx)
+static int siena_mac_reconfigure(struct efx_nic *efx)
{
- __le64 *dma_stats = efx->stats_buffer.addr;
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_MCAST_HASH_IN_LEN);
+ int rc;
- dma_stats[MC_CMD_MAC_GENERATION_END] = STATS_GENERATION_INVALID;
+ BUILD_BUG_ON(MC_CMD_SET_MCAST_HASH_IN_LEN !=
+ MC_CMD_SET_MCAST_HASH_IN_HASH0_OFST +
+ sizeof(efx->multicast_hash));
- efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr,
- MC_CMD_MAC_NSTATS * sizeof(u64), 1, 0);
-}
+ efx_farch_filter_sync_rx_mode(efx);
-static void siena_stop_nic_stats(struct efx_nic *efx)
-{
- efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 0);
+ WARN_ON(!mutex_is_locked(&efx->mac_lock));
+
+ rc = efx_mcdi_set_mac(efx);
+ if (rc != 0)
+ return rc;
+
+ memcpy(MCDI_PTR(inbuf, SET_MCAST_HASH_IN_HASH0),
+ efx->multicast_hash.byte, sizeof(efx->multicast_hash));
+ return efx_mcdi_rpc(efx, MC_CMD_SET_MCAST_HASH,
+ inbuf, sizeof(inbuf), NULL, 0, NULL);
}
/**************************************************************************
@@ -669,6 +612,241 @@ static void siena_init_wol(struct efx_nic *efx)
}
}
+/**************************************************************************
+ *
+ * MCDI
+ *
+ **************************************************************************
+ */
+
+#define MCDI_PDU(efx) \
+ (efx_port_num(efx) ? MC_SMEM_P1_PDU_OFST : MC_SMEM_P0_PDU_OFST)
+#define MCDI_DOORBELL(efx) \
+ (efx_port_num(efx) ? MC_SMEM_P1_DOORBELL_OFST : MC_SMEM_P0_DOORBELL_OFST)
+#define MCDI_STATUS(efx) \
+ (efx_port_num(efx) ? MC_SMEM_P1_STATUS_OFST : MC_SMEM_P0_STATUS_OFST)
+
+static void siena_mcdi_request(struct efx_nic *efx,
+ const efx_dword_t *hdr, size_t hdr_len,
+ const efx_dword_t *sdu, size_t sdu_len)
+{
+ unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
+ unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx);
+ unsigned int i;
+ unsigned int inlen_dw = DIV_ROUND_UP(sdu_len, 4);
+
+ EFX_BUG_ON_PARANOID(hdr_len != 4);
+
+ efx_writed(efx, hdr, pdu);
+
+ for (i = 0; i < inlen_dw; i++)
+ efx_writed(efx, &sdu[i], pdu + hdr_len + 4 * i);
+
+ /* Ensure the request is written out before the doorbell */
+ wmb();
+
+ /* ring the doorbell with a distinctive value */
+ _efx_writed(efx, (__force __le32) 0x45789abc, doorbell);
+}
+
+static bool siena_mcdi_poll_response(struct efx_nic *efx)
+{
+ unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
+ efx_dword_t hdr;
+
+ efx_readd(efx, &hdr, pdu);
+
+ /* All 1's indicates that shared memory is in reset (and is
+ * not a valid hdr). Wait for it to come out reset before
+ * completing the command
+ */
+ return EFX_DWORD_FIELD(hdr, EFX_DWORD_0) != 0xffffffff &&
+ EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE);
+}
+
+static void siena_mcdi_read_response(struct efx_nic *efx, efx_dword_t *outbuf,
+ size_t offset, size_t outlen)
+{
+ unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
+ unsigned int outlen_dw = DIV_ROUND_UP(outlen, 4);
+ int i;
+
+ for (i = 0; i < outlen_dw; i++)
+ efx_readd(efx, &outbuf[i], pdu + offset + 4 * i);
+}
+
+static int siena_mcdi_poll_reboot(struct efx_nic *efx)
+{
+ struct siena_nic_data *nic_data = efx->nic_data;
+ unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_STATUS(efx);
+ efx_dword_t reg;
+ u32 value;
+
+ efx_readd(efx, &reg, addr);
+ value = EFX_DWORD_FIELD(reg, EFX_DWORD_0);
+
+ if (value == 0)
+ return 0;
+
+ EFX_ZERO_DWORD(reg);
+ efx_writed(efx, &reg, addr);
+
+ /* MAC statistics have been cleared on the NIC; clear the local
+ * copies that we update with efx_update_diff_stat().
+ */
+ nic_data->stats[SIENA_STAT_tx_good_bytes] = 0;
+ nic_data->stats[SIENA_STAT_rx_good_bytes] = 0;
+
+ if (value == MC_STATUS_DWORD_ASSERT)
+ return -EINTR;
+ else
+ return -EIO;
+}
+
+/**************************************************************************
+ *
+ * MTD
+ *
+ **************************************************************************
+ */
+
+#ifdef CONFIG_SFC_MTD
+
+struct siena_nvram_type_info {
+ int port;
+ const char *name;
+};
+
+static const struct siena_nvram_type_info siena_nvram_types[] = {
+ [MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO] = { 0, "sfc_dummy_phy" },
+ [MC_CMD_NVRAM_TYPE_MC_FW] = { 0, "sfc_mcfw" },
+ [MC_CMD_NVRAM_TYPE_MC_FW_BACKUP] = { 0, "sfc_mcfw_backup" },
+ [MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0] = { 0, "sfc_static_cfg" },
+ [MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1] = { 1, "sfc_static_cfg" },
+ [MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0] = { 0, "sfc_dynamic_cfg" },
+ [MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1] = { 1, "sfc_dynamic_cfg" },
+ [MC_CMD_NVRAM_TYPE_EXP_ROM] = { 0, "sfc_exp_rom" },
+ [MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0] = { 0, "sfc_exp_rom_cfg" },
+ [MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1] = { 1, "sfc_exp_rom_cfg" },
+ [MC_CMD_NVRAM_TYPE_PHY_PORT0] = { 0, "sfc_phy_fw" },
+ [MC_CMD_NVRAM_TYPE_PHY_PORT1] = { 1, "sfc_phy_fw" },
+ [MC_CMD_NVRAM_TYPE_FPGA] = { 0, "sfc_fpga" },
+};
+
+static int siena_mtd_probe_partition(struct efx_nic *efx,
+ struct efx_mcdi_mtd_partition *part,
+ unsigned int type)
+{
+ const struct siena_nvram_type_info *info;
+ size_t size, erase_size;
+ bool protected;
+ int rc;
+
+ if (type >= ARRAY_SIZE(siena_nvram_types) ||
+ siena_nvram_types[type].name == NULL)
+ return -ENODEV;
+
+ info = &siena_nvram_types[type];
+
+ if (info->port != efx_port_num(efx))
+ return -ENODEV;
+
+ rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected);
+ if (rc)
+ return rc;
+ if (protected)
+ return -ENODEV; /* hide it */
+
+ part->nvram_type = type;
+ part->common.dev_type_name = "Siena NVRAM manager";
+ part->common.type_name = info->name;
+
+ part->common.mtd.type = MTD_NORFLASH;
+ part->common.mtd.flags = MTD_CAP_NORFLASH;
+ part->common.mtd.size = size;
+ part->common.mtd.erasesize = erase_size;
+
+ return 0;
+}
+
+static int siena_mtd_get_fw_subtypes(struct efx_nic *efx,
+ struct efx_mcdi_mtd_partition *parts,
+ size_t n_parts)
+{
+ uint16_t fw_subtype_list[
+ MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM];
+ size_t i;
+ int rc;
+
+ rc = efx_mcdi_get_board_cfg(efx, NULL, fw_subtype_list, NULL);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < n_parts; i++)
+ parts[i].fw_subtype = fw_subtype_list[parts[i].nvram_type];
+
+ return 0;
+}
+
+static int siena_mtd_probe(struct efx_nic *efx)
+{
+ struct efx_mcdi_mtd_partition *parts;
+ u32 nvram_types;
+ unsigned int type;
+ size_t n_parts;
+ int rc;
+
+ ASSERT_RTNL();
+
+ rc = efx_mcdi_nvram_types(efx, &nvram_types);
+ if (rc)
+ return rc;
+
+ parts = kcalloc(hweight32(nvram_types), sizeof(*parts), GFP_KERNEL);
+ if (!parts)
+ return -ENOMEM;
+
+ type = 0;
+ n_parts = 0;
+
+ while (nvram_types != 0) {
+ if (nvram_types & 1) {
+ rc = siena_mtd_probe_partition(efx, &parts[n_parts],
+ type);
+ if (rc == 0)
+ n_parts++;
+ else if (rc != -ENODEV)
+ goto fail;
+ }
+ type++;
+ nvram_types >>= 1;
+ }
+
+ rc = siena_mtd_get_fw_subtypes(efx, parts, n_parts);
+ if (rc)
+ goto fail;
+
+ rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
+fail:
+ if (rc)
+ kfree(parts);
+ return rc;
+}
+
+#endif /* CONFIG_SFC_MTD */
+
+/**************************************************************************
+ *
+ * PTP
+ *
+ **************************************************************************
+ */
+
+static void siena_ptp_write_host_time(struct efx_nic *efx, u32 host_time)
+{
+ _efx_writed(efx, cpu_to_le32(host_time),
+ FR_CZ_MC_TREG_SMEM + MC_SMEM_P0_PTP_TIME_OFST);
+}
/**************************************************************************
*
@@ -678,6 +856,7 @@ static void siena_init_wol(struct efx_nic *efx)
*/
const struct efx_nic_type siena_a0_nic_type = {
+ .mem_map_size = siena_mem_map_size,
.probe = siena_probe_nic,
.remove = siena_remove_nic,
.init = siena_init_nic,
@@ -688,44 +867,94 @@ const struct efx_nic_type siena_a0_nic_type = {
#else
.monitor = NULL,
#endif
- .map_reset_reason = siena_map_reset_reason,
+ .map_reset_reason = efx_mcdi_map_reset_reason,
.map_reset_flags = siena_map_reset_flags,
- .reset = siena_reset_hw,
- .probe_port = siena_probe_port,
- .remove_port = siena_remove_port,
+ .reset = efx_mcdi_reset,
+ .probe_port = efx_mcdi_port_probe,
+ .remove_port = efx_mcdi_port_remove,
+ .fini_dmaq = efx_farch_fini_dmaq,
.prepare_flush = siena_prepare_flush,
.finish_flush = siena_finish_flush,
+ .describe_stats = siena_describe_nic_stats,
.update_stats = siena_update_nic_stats,
- .start_stats = siena_start_nic_stats,
- .stop_stats = siena_stop_nic_stats,
+ .start_stats = efx_mcdi_mac_start_stats,
+ .stop_stats = efx_mcdi_mac_stop_stats,
.set_id_led = efx_mcdi_set_id_led,
.push_irq_moderation = siena_push_irq_moderation,
- .reconfigure_mac = efx_mcdi_mac_reconfigure,
+ .reconfigure_mac = siena_mac_reconfigure,
.check_mac_fault = efx_mcdi_mac_check_fault,
- .reconfigure_port = efx_mcdi_phy_reconfigure,
+ .reconfigure_port = efx_mcdi_port_reconfigure,
.get_wol = siena_get_wol,
.set_wol = siena_set_wol,
.resume_wol = siena_init_wol,
.test_chip = siena_test_chip,
.test_nvram = efx_mcdi_nvram_test_all,
+ .mcdi_request = siena_mcdi_request,
+ .mcdi_poll_response = siena_mcdi_poll_response,
+ .mcdi_read_response = siena_mcdi_read_response,
+ .mcdi_poll_reboot = siena_mcdi_poll_reboot,
+ .irq_enable_master = efx_farch_irq_enable_master,
+ .irq_test_generate = efx_farch_irq_test_generate,
+ .irq_disable_non_ev = efx_farch_irq_disable_master,
+ .irq_handle_msi = efx_farch_msi_interrupt,
+ .irq_handle_legacy = efx_farch_legacy_interrupt,
+ .tx_probe = efx_farch_tx_probe,
+ .tx_init = efx_farch_tx_init,
+ .tx_remove = efx_farch_tx_remove,
+ .tx_write = efx_farch_tx_write,
+ .rx_push_indir_table = efx_farch_rx_push_indir_table,
+ .rx_probe = efx_farch_rx_probe,
+ .rx_init = efx_farch_rx_init,
+ .rx_remove = efx_farch_rx_remove,
+ .rx_write = efx_farch_rx_write,
+ .rx_defer_refill = efx_farch_rx_defer_refill,
+ .ev_probe = efx_farch_ev_probe,
+ .ev_init = efx_farch_ev_init,
+ .ev_fini = efx_farch_ev_fini,
+ .ev_remove = efx_farch_ev_remove,
+ .ev_process = efx_farch_ev_process,
+ .ev_read_ack = efx_farch_ev_read_ack,
+ .ev_test_generate = efx_farch_ev_test_generate,
+ .filter_table_probe = efx_farch_filter_table_probe,
+ .filter_table_restore = efx_farch_filter_table_restore,
+ .filter_table_remove = efx_farch_filter_table_remove,
+ .filter_update_rx_scatter = efx_farch_filter_update_rx_scatter,
+ .filter_insert = efx_farch_filter_insert,
+ .filter_remove_safe = efx_farch_filter_remove_safe,
+ .filter_get_safe = efx_farch_filter_get_safe,
+ .filter_clear_rx = efx_farch_filter_clear_rx,
+ .filter_count_rx_used = efx_farch_filter_count_rx_used,
+ .filter_get_rx_id_limit = efx_farch_filter_get_rx_id_limit,
+ .filter_get_rx_ids = efx_farch_filter_get_rx_ids,
+#ifdef CONFIG_RFS_ACCEL
+ .filter_rfs_insert = efx_farch_filter_rfs_insert,
+ .filter_rfs_expire_one = efx_farch_filter_rfs_expire_one,
+#endif
+#ifdef CONFIG_SFC_MTD
+ .mtd_probe = siena_mtd_probe,
+ .mtd_rename = efx_mcdi_mtd_rename,
+ .mtd_read = efx_mcdi_mtd_read,
+ .mtd_erase = efx_mcdi_mtd_erase,
+ .mtd_write = efx_mcdi_mtd_write,
+ .mtd_sync = efx_mcdi_mtd_sync,
+#endif
+ .ptp_write_host_time = siena_ptp_write_host_time,
.revision = EFX_REV_SIENA_A0,
- .mem_map_size = (FR_CZ_MC_TREG_SMEM +
- FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS),
.txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
.rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
.buf_tbl_base = FR_BZ_BUF_FULL_TBL,
.evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL,
.evq_rptr_tbl_base = FR_BZ_EVQ_RPTR,
.max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
- .rx_buffer_hash_size = 0x10,
+ .rx_prefix_size = FS_BZ_RX_PREFIX_SIZE,
+ .rx_hash_offset = FS_BZ_RX_PREFIX_HASH_OFST,
.rx_buffer_padding = 0,
.can_rx_scatter = true,
.max_interrupt_mode = EFX_INT_MODE_MSIX,
- .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
- * interrupt handler only supports 32
- * channels */
.timer_period_max = 1 << FRF_CZ_TC_TIMER_VAL_WIDTH,
.offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXHASH | NETIF_F_NTUPLE),
+ .mcdi_max_ver = 1,
+ .max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS,
};
diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena_sriov.c
index 90f8d1604f5f..0c38f926871e 100644
--- a/drivers/net/ethernet/sfc/siena_sriov.c
+++ b/drivers/net/ethernet/sfc/siena_sriov.c
@@ -1,6 +1,6 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2010-2011 Solarflare Communications Inc.
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2010-2012 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -15,7 +15,7 @@
#include "mcdi.h"
#include "filter.h"
#include "mcdi_pcol.h"
-#include "regs.h"
+#include "farch_regs.h"
#include "vfdi.h"
/* Number of longs required to track all the VIs in a VF */
@@ -197,8 +197,8 @@ static unsigned abs_index(struct efx_vf *vf, unsigned index)
static int efx_sriov_cmd(struct efx_nic *efx, bool enable,
unsigned *vi_scale_out, unsigned *vf_total_out)
{
- u8 inbuf[MC_CMD_SRIOV_IN_LEN];
- u8 outbuf[MC_CMD_SRIOV_OUT_LEN];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_SRIOV_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_SRIOV_OUT_LEN);
unsigned vi_scale, vf_total;
size_t outlen;
int rc;
@@ -240,64 +240,55 @@ static void efx_sriov_usrev(struct efx_nic *efx, bool enabled)
static int efx_sriov_memcpy(struct efx_nic *efx, struct efx_memcpy_req *req,
unsigned int count)
{
- u8 *inbuf, *record;
- unsigned int used;
- u32 from_rid, from_hi, from_lo;
+ MCDI_DECLARE_BUF(inbuf, MCDI_CTL_SDU_LEN_MAX_V1);
+ MCDI_DECLARE_STRUCT_PTR(record);
+ unsigned int index, used;
+ u64 from_addr;
+ u32 from_rid;
int rc;
mb(); /* Finish writing source/reading dest before DMA starts */
- used = MC_CMD_MEMCPY_IN_LEN(count);
- if (WARN_ON(used > MCDI_CTL_SDU_LEN_MAX))
+ if (WARN_ON(count > MC_CMD_MEMCPY_IN_RECORD_MAXNUM))
return -ENOBUFS;
+ used = MC_CMD_MEMCPY_IN_LEN(count);
- /* Allocate room for the largest request */
- inbuf = kzalloc(MCDI_CTL_SDU_LEN_MAX, GFP_KERNEL);
- if (inbuf == NULL)
- return -ENOMEM;
-
- record = inbuf;
- MCDI_SET_DWORD(record, MEMCPY_IN_RECORD, count);
- while (count-- > 0) {
+ for (index = 0; index < count; index++) {
+ record = MCDI_ARRAY_STRUCT_PTR(inbuf, MEMCPY_IN_RECORD, index);
+ MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_NUM_RECORDS,
+ count);
MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_TO_RID,
req->to_rid);
- MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO,
- (u32)req->to_addr);
- MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI,
- (u32)(req->to_addr >> 32));
+ MCDI_SET_QWORD(record, MEMCPY_RECORD_TYPEDEF_TO_ADDR,
+ req->to_addr);
if (req->from_buf == NULL) {
from_rid = req->from_rid;
- from_lo = (u32)req->from_addr;
- from_hi = (u32)(req->from_addr >> 32);
+ from_addr = req->from_addr;
} else {
- if (WARN_ON(used + req->length > MCDI_CTL_SDU_LEN_MAX)) {
+ if (WARN_ON(used + req->length >
+ MCDI_CTL_SDU_LEN_MAX_V1)) {
rc = -ENOBUFS;
goto out;
}
from_rid = MC_CMD_MEMCPY_RECORD_TYPEDEF_RID_INLINE;
- from_lo = used;
- from_hi = 0;
- memcpy(inbuf + used, req->from_buf, req->length);
+ from_addr = used;
+ memcpy(_MCDI_PTR(inbuf, used), req->from_buf,
+ req->length);
used += req->length;
}
MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_FROM_RID, from_rid);
- MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO,
- from_lo);
- MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI,
- from_hi);
+ MCDI_SET_QWORD(record, MEMCPY_RECORD_TYPEDEF_FROM_ADDR,
+ from_addr);
MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_LENGTH,
req->length);
++req;
- record += MC_CMD_MEMCPY_IN_RECORD_LEN;
}
rc = efx_mcdi_rpc(efx, MC_CMD_MEMCPY, inbuf, used, NULL, 0, NULL);
out:
- kfree(inbuf);
-
mb(); /* Don't write source/read dest before DMA is complete */
return rc;
@@ -473,8 +464,9 @@ static void __efx_sriov_push_vf_status(struct efx_vf *vf)
VFDI_EV_SEQ, (vf->msg_seqno & 0xff),
VFDI_EV_TYPE, VFDI_EV_TYPE_STATUS);
++vf->msg_seqno;
- efx_generate_event(efx, EFX_VI_BASE + vf->index * efx_vf_size(efx),
- &event);
+ efx_farch_generate_event(efx,
+ EFX_VI_BASE + vf->index * efx_vf_size(efx),
+ &event);
}
static void efx_sriov_bufs(struct efx_nic *efx, unsigned offset,
@@ -684,16 +676,12 @@ static int efx_vfdi_fini_all_queues(struct efx_vf *vf)
unsigned vf_offset = EFX_VI_BASE + vf->index * efx_vf_size(efx);
unsigned timeout = HZ;
unsigned index, rxqs_count;
- __le32 *rxqs;
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FLUSH_RX_QUEUES_IN_LENMAX);
int rc;
BUILD_BUG_ON(VF_MAX_RX_QUEUES >
MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM);
- rxqs = kmalloc(count * sizeof(*rxqs), GFP_KERNEL);
- if (rxqs == NULL)
- return VFDI_RC_ENOMEM;
-
rtnl_lock();
siena_prepare_flush(efx);
rtnl_unlock();
@@ -708,14 +696,19 @@ static int efx_vfdi_fini_all_queues(struct efx_vf *vf)
vf_offset + index);
efx_writeo(efx, &reg, FR_AZ_TX_FLUSH_DESCQ);
}
- if (test_bit(index, vf->rxq_mask))
- rxqs[rxqs_count++] = cpu_to_le32(vf_offset + index);
+ if (test_bit(index, vf->rxq_mask)) {
+ MCDI_SET_ARRAY_DWORD(
+ inbuf, FLUSH_RX_QUEUES_IN_QID_OFST,
+ rxqs_count, vf_offset + index);
+ rxqs_count++;
+ }
}
atomic_set(&vf->rxq_retry_count, 0);
while (timeout && (vf->rxq_count || vf->txq_count)) {
- rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, (u8 *)rxqs,
- rxqs_count * sizeof(*rxqs), NULL, 0, NULL);
+ rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, inbuf,
+ MC_CMD_FLUSH_RX_QUEUES_IN_LEN(rxqs_count),
+ NULL, 0, NULL);
WARN_ON(rc < 0);
timeout = wait_event_timeout(vf->flush_waitq,
@@ -725,8 +718,10 @@ static int efx_vfdi_fini_all_queues(struct efx_vf *vf)
for (index = 0; index < count; ++index) {
if (test_and_clear_bit(index, vf->rxq_retry_mask)) {
atomic_dec(&vf->rxq_retry_count);
- rxqs[rxqs_count++] =
- cpu_to_le32(vf_offset + index);
+ MCDI_SET_ARRAY_DWORD(
+ inbuf, FLUSH_RX_QUEUES_IN_QID_OFST,
+ rxqs_count, vf_offset + index);
+ rxqs_count++;
}
}
}
@@ -749,7 +744,6 @@ static int efx_vfdi_fini_all_queues(struct efx_vf *vf)
}
efx_sriov_bufs(efx, vf->buftbl_base, NULL,
EFX_VF_BUFTBL_PER_VI * efx_vf_size(efx));
- kfree(rxqs);
efx_vfdi_flush_clear(vf);
vf->evq0_count = 0;
@@ -1004,7 +998,7 @@ static void efx_sriov_reset_vf_work(struct work_struct *work)
struct efx_nic *efx = vf->efx;
struct efx_buffer buf;
- if (!efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE)) {
+ if (!efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE, GFP_NOIO)) {
efx_sriov_reset_vf(vf, &buf);
efx_nic_free_buffer(efx, &buf);
}
@@ -1248,7 +1242,8 @@ static int efx_sriov_vfs_init(struct efx_nic *efx)
pci_domain_nr(pci_dev->bus), pci_dev->bus->number,
PCI_SLOT(devfn), PCI_FUNC(devfn));
- rc = efx_nic_alloc_buffer(efx, &vf->buf, EFX_PAGE_SIZE);
+ rc = efx_nic_alloc_buffer(efx, &vf->buf, EFX_PAGE_SIZE,
+ GFP_KERNEL);
if (rc)
goto fail;
@@ -1280,7 +1275,8 @@ int efx_sriov_init(struct efx_nic *efx)
if (rc)
goto fail_cmd;
- rc = efx_nic_alloc_buffer(efx, &efx->vfdi_status, sizeof(*vfdi_status));
+ rc = efx_nic_alloc_buffer(efx, &efx->vfdi_status, sizeof(*vfdi_status),
+ GFP_KERNEL);
if (rc)
goto fail_status;
vfdi_status = efx->vfdi_status.addr;
@@ -1535,7 +1531,7 @@ void efx_sriov_reset(struct efx_nic *efx)
efx_sriov_usrev(efx, true);
(void)efx_sriov_cmd(efx, true, NULL, NULL);
- if (efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE))
+ if (efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE, GFP_NOIO))
return;
for (vf_i = 0; vf_i < efx->vf_init_count; ++vf_i) {
diff --git a/drivers/net/ethernet/sfc/spi.h b/drivers/net/ethernet/sfc/spi.h
deleted file mode 100644
index 5431a1bbff5c..000000000000
--- a/drivers/net/ethernet/sfc/spi.h
+++ /dev/null
@@ -1,99 +0,0 @@
-/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2005 Fen Systems Ltd.
- * Copyright 2006-2010 Solarflare Communications Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation, incorporated herein by reference.
- */
-
-#ifndef EFX_SPI_H
-#define EFX_SPI_H
-
-#include "net_driver.h"
-
-/**************************************************************************
- *
- * Basic SPI command set and bit definitions
- *
- *************************************************************************/
-
-#define SPI_WRSR 0x01 /* Write status register */
-#define SPI_WRITE 0x02 /* Write data to memory array */
-#define SPI_READ 0x03 /* Read data from memory array */
-#define SPI_WRDI 0x04 /* Reset write enable latch */
-#define SPI_RDSR 0x05 /* Read status register */
-#define SPI_WREN 0x06 /* Set write enable latch */
-#define SPI_SST_EWSR 0x50 /* SST: Enable write to status register */
-
-#define SPI_STATUS_WPEN 0x80 /* Write-protect pin enabled */
-#define SPI_STATUS_BP2 0x10 /* Block protection bit 2 */
-#define SPI_STATUS_BP1 0x08 /* Block protection bit 1 */
-#define SPI_STATUS_BP0 0x04 /* Block protection bit 0 */
-#define SPI_STATUS_WEN 0x02 /* State of the write enable latch */
-#define SPI_STATUS_NRDY 0x01 /* Device busy flag */
-
-/**
- * struct efx_spi_device - an Efx SPI (Serial Peripheral Interface) device
- * @device_id: Controller's id for the device
- * @size: Size (in bytes)
- * @addr_len: Number of address bytes in read/write commands
- * @munge_address: Flag whether addresses should be munged.
- * Some devices with 9-bit addresses (e.g. AT25040A EEPROM)
- * use bit 3 of the command byte as address bit A8, rather
- * than having a two-byte address. If this flag is set, then
- * commands should be munged in this way.
- * @erase_command: Erase command (or 0 if sector erase not needed).
- * @erase_size: Erase sector size (in bytes)
- * Erase commands affect sectors with this size and alignment.
- * This must be a power of two.
- * @block_size: Write block size (in bytes).
- * Write commands are limited to blocks with this size and alignment.
- */
-struct efx_spi_device {
- int device_id;
- unsigned int size;
- unsigned int addr_len;
- unsigned int munge_address:1;
- u8 erase_command;
- unsigned int erase_size;
- unsigned int block_size;
-};
-
-static inline bool efx_spi_present(const struct efx_spi_device *spi)
-{
- return spi->size != 0;
-}
-
-int falcon_spi_cmd(struct efx_nic *efx,
- const struct efx_spi_device *spi, unsigned int command,
- int address, const void *in, void *out, size_t len);
-int falcon_spi_wait_write(struct efx_nic *efx,
- const struct efx_spi_device *spi);
-int falcon_spi_read(struct efx_nic *efx,
- const struct efx_spi_device *spi, loff_t start,
- size_t len, size_t *retlen, u8 *buffer);
-int falcon_spi_write(struct efx_nic *efx,
- const struct efx_spi_device *spi, loff_t start,
- size_t len, size_t *retlen, const u8 *buffer);
-
-/*
- * SFC4000 flash is partitioned into:
- * 0-0x400 chip and board config (see falcon_hwdefs.h)
- * 0x400-0x8000 unused (or may contain VPD if EEPROM not present)
- * 0x8000-end boot code (mapped to PCI expansion ROM)
- * SFC4000 small EEPROM (size < 0x400) is used for VPD only.
- * SFC4000 large EEPROM (size >= 0x400) is partitioned into:
- * 0-0x400 chip and board config
- * configurable VPD
- * 0x800-0x1800 boot config
- * Aside from the chip and board config, all of these are optional and may
- * be absent or truncated depending on the devices used.
- */
-#define FALCON_NVCONFIG_END 0x400U
-#define FALCON_FLASH_BOOTCODE_START 0x8000U
-#define EFX_EEPROM_BOOTCONFIG_START 0x800U
-#define EFX_EEPROM_BOOTCONFIG_END 0x1800U
-
-#endif /* EFX_SPI_H */
diff --git a/drivers/net/ethernet/sfc/tenxpress.c b/drivers/net/ethernet/sfc/tenxpress.c
index d37cb5017129..2c90e6b31575 100644
--- a/drivers/net/ethernet/sfc/tenxpress.c
+++ b/drivers/net/ethernet/sfc/tenxpress.c
@@ -1,5 +1,5 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2007-2011 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 5e090e54298e..2ac91c5b5eea 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -1,7 +1,7 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2005-2010 Solarflare Communications Inc.
+ * Copyright 2005-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -306,7 +306,9 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
while (read_ptr != stop_index) {
struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
- if (unlikely(buffer->len == 0)) {
+
+ if (!(buffer->flags & EFX_TX_BUF_OPTION) &&
+ unlikely(buffer->len == 0)) {
netif_err(efx, tx_err, efx->net_dev,
"TX queue %d spurious TX completion id %x\n",
tx_queue->queue, read_ptr);
@@ -437,6 +439,9 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl);
+ if (pkts_compl > 1)
+ ++tx_queue->merge_events;
+
/* See if we need to restart the netif queue. This memory
* barrier ensures that we write read_count (inside
* efx_dequeue_buffers()) before reading the queue status.
@@ -543,10 +548,13 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
tx_queue->initialised = true;
}
-void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
+void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
{
struct efx_tx_buffer *buffer;
+ netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
+ "shutting down TX queue %d\n", tx_queue->queue);
+
if (!tx_queue->buffer)
return;
@@ -561,22 +569,6 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
netdev_tx_reset_queue(tx_queue->core_txq);
}
-void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
-{
- if (!tx_queue->initialised)
- return;
-
- netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
- "shutting down TX queue %d\n", tx_queue->queue);
-
- tx_queue->initialised = false;
-
- /* Flush TX queue, remove descriptor ring */
- efx_nic_fini_tx(tx_queue);
-
- efx_release_tx_buffers(tx_queue);
-}
-
void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
{
int i;
@@ -708,7 +700,8 @@ static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue,
TSOH_STD_SIZE * (index % TSOH_PER_PAGE) + TSOH_OFFSET;
if (unlikely(!page_buf->addr) &&
- efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE))
+ efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE,
+ GFP_ATOMIC))
return NULL;
result = (u8 *)page_buf->addr + offset;
diff --git a/drivers/net/ethernet/sfc/txc43128_phy.c b/drivers/net/ethernet/sfc/txc43128_phy.c
index 29bb3f9941c0..3d5ee3259885 100644
--- a/drivers/net/ethernet/sfc/txc43128_phy.c
+++ b/drivers/net/ethernet/sfc/txc43128_phy.c
@@ -1,5 +1,5 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2006-2011 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/net/ethernet/sfc/vfdi.h b/drivers/net/ethernet/sfc/vfdi.h
index 225557caaf5a..ae044f44936a 100644
--- a/drivers/net/ethernet/sfc/vfdi.h
+++ b/drivers/net/ethernet/sfc/vfdi.h
@@ -1,5 +1,5 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2010-2012 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/net/ethernet/sfc/workarounds.h b/drivers/net/ethernet/sfc/workarounds.h
index e4dd3a7f304b..2310b75d4ec2 100644
--- a/drivers/net/ethernet/sfc/workarounds.h
+++ b/drivers/net/ethernet/sfc/workarounds.h
@@ -1,6 +1,6 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2006-2010 Solarflare Communications Inc.
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2006-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -15,27 +15,15 @@
* Bug numbers are from Solarflare's Bugzilla.
*/
-#define EFX_WORKAROUND_ALWAYS(efx) 1
#define EFX_WORKAROUND_FALCON_A(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_A1)
#define EFX_WORKAROUND_FALCON_AB(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_B0)
#define EFX_WORKAROUND_SIENA(efx) (efx_nic_rev(efx) == EFX_REV_SIENA_A0)
#define EFX_WORKAROUND_10G(efx) 1
-/* XAUI resets if link not detected */
-#define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS
-/* RX PCIe double split performance issue */
-#define EFX_WORKAROUND_7575 EFX_WORKAROUND_ALWAYS
/* Bit-bashed I2C reads cause performance drop */
#define EFX_WORKAROUND_7884 EFX_WORKAROUND_10G
-/* TX_EV_PKT_ERR can be caused by a dangling TX descriptor
- * or a PCIe error (bug 11028) */
-#define EFX_WORKAROUND_10727 EFX_WORKAROUND_ALWAYS
-/* Transmit flow control may get disabled */
-#define EFX_WORKAROUND_11482 EFX_WORKAROUND_FALCON_AB
/* Truncated IPv4 packets can confuse the TX packet parser */
#define EFX_WORKAROUND_15592 EFX_WORKAROUND_FALCON_AB
-/* Legacy ISR read can return zero once */
-#define EFX_WORKAROUND_15783 EFX_WORKAROUND_ALWAYS
/* Legacy interrupt storm when interrupt fifo fills */
#define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA
@@ -56,4 +44,10 @@
/* Leak overlength packets rather than free */
#define EFX_WORKAROUND_8071 EFX_WORKAROUND_FALCON_A
+/* Lockup when writing event block registers at gen2/gen3 */
+#define EFX_EF10_WORKAROUND_35388(efx) \
+ (((struct efx_ef10_nic_data *)efx->nic_data)->workaround_35388)
+#define EFX_WORKAROUND_35388(efx) \
+ (efx_nic_rev(efx) == EFX_REV_HUNT_A0 && EFX_EF10_WORKAROUND_35388(efx))
+
#endif /* EFX_WORKAROUNDS_H */