summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/sfc
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-06-25 01:49:49 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2015-06-25 01:49:49 +0200
commite0456717e483bb8a9431b80a5bdc99a928b9b003 (patch)
tree5eb5add2bafd1f20326d70f5cb3b711d00a40b10 /drivers/net/ethernet/sfc
parentMerge branch 'sched-hrtimers-for-linus' of git://git.kernel.org/pub/scm/linux... (diff)
parentbridge: vlan: flush the dynamically learned entries on port vlan delete (diff)
downloadlinux-e0456717e483bb8a9431b80a5bdc99a928b9b003.tar.xz
linux-e0456717e483bb8a9431b80a5bdc99a928b9b003.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: 1) Add TX fast path in mac80211, from Johannes Berg. 2) Add TSO/GRO support to ibmveth, from Thomas Falcon 3) Move away from cached routes in ipv6, just like ipv4, from Martin KaFai Lau. 4) Lots of new rhashtable tests, from Thomas Graf. 5) Run ingress qdisc lockless, from Alexei Starovoitov. 6) Allow servers to fetch TCP packet headers for SYN packets of new connections, for fingerprinting. From Eric Dumazet. 7) Add mode parameter to pktgen, for testing receive. From Alexei Starovoitov. 8) Cache access optimizations via simplifications of build_skb(), from Alexander Duyck. 9) Move page frag allocator under mm/, also from Alexander. 10) Add xmit_more support to hv_netvsc, from KY Srinivasan. 11) Add a counter guard in case we try to perform endless reclassify loops in the packet scheduler. 12) Extern flow dissector to be programmable and use it in new "Flower" classifier. From Jiri Pirko. 13) AF_PACKET fanout rollover fixes, performance improvements, and new statistics. From Willem de Bruijn. 14) Add netdev driver for GENEVE tunnels, from John W Linville. 15) Add ingress netfilter hooks and filtering, from Pablo Neira Ayuso. 16) Fix handling of epoll edge triggers in TCP, from Eric Dumazet. 17) Add an ECN retry fallback for the initial TCP handshake, from Daniel Borkmann. 18) Add tail call support to BPF, from Alexei Starovoitov. 19) Add several pktgen helper scripts, from Jesper Dangaard Brouer. 20) Add zerocopy support to AF_UNIX, from Hannes Frederic Sowa. 21) Favor even port numbers for allocation to connect() requests, and odd port numbers for bind(0), in an effort to help avoid ip_local_port_range exhaustion. From Eric Dumazet. 22) Add Cavium ThunderX driver, from Sunil Goutham. 23) Allow bpf programs to access skb_iif and dev->ifindex SKB metadata, from Alexei Starovoitov. 24) Add support for T6 chips in cxgb4vf driver, from Hariprasad Shenai. 25) Double TCP Small Queues default to 256K to accomodate situations like the XEN driver and wireless aggregation. From Wei Liu. 26) Add more entropy inputs to flow dissector, from Tom Herbert. 27) Add CDG congestion control algorithm to TCP, from Kenneth Klette Jonassen. 28) Convert ipset over to RCU locking, from Jozsef Kadlecsik. 29) Track and act upon link status of ipv4 route nexthops, from Andy Gospodarek. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1670 commits) bridge: vlan: flush the dynamically learned entries on port vlan delete bridge: multicast: add a comment to br_port_state_selection about blocking state net: inet_diag: export IPV6_V6ONLY sockopt stmmac: troubleshoot unexpected bits in des0 & des1 net: ipv4 sysctl option to ignore routes when nexthop link is down net: track link-status of ipv4 nexthops net: switchdev: ignore unsupported bridge flags net: Cavium: Fix MAC address setting in shutdown state drivers: net: xgene: fix for ACPI support without ACPI ip: report the original address of ICMP messages net/mlx5e: Prefetch skb data on RX net/mlx5e: Pop cq outside mlx5e_get_cqe net/mlx5e: Remove mlx5e_cq.sqrq back-pointer net/mlx5e: Remove extra spaces net/mlx5e: Avoid TX CQE generation if more xmit packets expected net/mlx5e: Avoid redundant dev_kfree_skb() upon NOP completion net/mlx5e: Remove re-assignment of wq type in mlx5e_enable_rq() net/mlx5e: Use skb_shinfo(skb)->gso_segs rather than counting them net/mlx5e: Static mapping of netdev priv resources to/from netdev TX queues net/mlx4_en: Use HW counters for rx/tx bytes/packets in PF device ...
Diffstat (limited to 'drivers/net/ethernet/sfc')
-rw-r--r--drivers/net/ethernet/sfc/Kconfig9
-rw-r--r--drivers/net/ethernet/sfc/Makefile2
-rw-r--r--drivers/net/ethernet/sfc/ef10.c1147
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.c783
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.h69
-rw-r--r--drivers/net/ethernet/sfc/efx.c317
-rw-r--r--drivers/net/ethernet/sfc/efx.h15
-rw-r--r--drivers/net/ethernet/sfc/enum.h2
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c7
-rw-r--r--drivers/net/ethernet/sfc/falcon.c33
-rw-r--r--drivers/net/ethernet/sfc/farch.c64
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c228
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h16
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h434
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c13
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h34
-rw-r--r--drivers/net/ethernet/sfc/nic.h251
-rw-r--r--drivers/net/ethernet/sfc/ptp.c40
-rw-r--r--drivers/net/ethernet/sfc/siena.c27
-rw-r--r--drivers/net/ethernet/sfc/siena_sriov.c156
-rw-r--r--drivers/net/ethernet/sfc/siena_sriov.h79
-rw-r--r--drivers/net/ethernet/sfc/sriov.c83
-rw-r--r--drivers/net/ethernet/sfc/sriov.h31
23 files changed, 3187 insertions, 653 deletions
diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig
index 088921294448..4dd92b7b80f4 100644
--- a/drivers/net/ethernet/sfc/Kconfig
+++ b/drivers/net/ethernet/sfc/Kconfig
@@ -36,3 +36,12 @@ config SFC_SRIOV
This enables support for the SFC9000 I/O Virtualization
features, allowing accelerated network performance in
virtualized environments.
+config SFC_MCDI_LOGGING
+ bool "Solarflare SFC9000/SFC9100-family MCDI logging support"
+ depends on SFC
+ default y
+ ---help---
+ This enables support for tracing of MCDI (Management-Controller-to-
+ Driver-Interface) commands and responses, allowing debugging of
+ driver/firmware interaction. The tracing is actually enabled by
+ a sysfs file 'mcdi_logging' under the PCI device.
diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile
index 3a83c0dca8e6..ce8470fe79d5 100644
--- a/drivers/net/ethernet/sfc/Makefile
+++ b/drivers/net/ethernet/sfc/Makefile
@@ -3,6 +3,6 @@ sfc-y += efx.o nic.o farch.o falcon.o siena.o ef10.o tx.o \
tenxpress.o txc43128_phy.o falcon_boards.o \
mcdi.o mcdi_port.o mcdi_mon.o ptp.o
sfc-$(CONFIG_SFC_MTD) += mtd.o
-sfc-$(CONFIG_SFC_SRIOV) += siena_sriov.o
+sfc-$(CONFIG_SFC_SRIOV) += sriov.o siena_sriov.o ef10_sriov.o
obj-$(CONFIG_SFC) += sfc.o
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index fbb6cfa0f5f1..847643455468 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -15,6 +15,7 @@
#include "nic.h"
#include "workarounds.h"
#include "selftest.h"
+#include "ef10_sriov.h"
#include <linux/in.h>
#include <linux/jhash.h>
#include <linux/wait.h>
@@ -30,6 +31,9 @@ enum {
/* The reserved RSS context value */
#define EFX_EF10_RSS_CONTEXT_INVALID 0xffffffff
+/* The maximum size of a shared RSS context */
+/* TODO: this should really be from the mcdi protocol export */
+#define EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE 64UL
/* The filter table(s) are managed by firmware and we have write-only
* access. When removing filters we must identify them to the
@@ -77,7 +81,6 @@ struct efx_ef10_filter_table {
/* An arbitrary search limit for the software hash table */
#define EFX_EF10_FILTER_SEARCH_LIMIT 200
-static void efx_ef10_rx_push_rss_config(struct efx_nic *efx);
static void efx_ef10_rx_free_indir_table(struct efx_nic *efx);
static void efx_ef10_filter_table_remove(struct efx_nic *efx);
@@ -92,8 +95,49 @@ static int efx_ef10_get_warm_boot_count(struct efx_nic *efx)
static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx)
{
- return resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]);
+ int bar;
+
+ bar = efx->type->mem_bar;
+ return resource_size(&efx->pci_dev->resource[bar]);
+}
+
+static int efx_ef10_get_pf_index(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ size_t outlen;
+ int rc;
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf,
+ sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < sizeof(outbuf))
+ return -EIO;
+
+ nic_data->pf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_PF);
+ return 0;
+}
+
+#ifdef CONFIG_SFC_SRIOV
+static int efx_ef10_get_vf_index(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ size_t outlen;
+ int rc;
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf,
+ sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < sizeof(outbuf))
+ return -EIO;
+
+ nic_data->vf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_VF);
+ return 0;
}
+#endif
static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
{
@@ -117,6 +161,13 @@ static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
nic_data->datapath_caps =
MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1);
+ /* record the DPCPU firmware IDs to determine VEB vswitching support.
+ */
+ nic_data->rx_dpcpu_fw_id =
+ MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID);
+ nic_data->tx_dpcpu_fw_id =
+ MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID);
+
if (!(nic_data->datapath_caps &
(1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN))) {
netif_err(efx, drv, efx->net_dev,
@@ -147,7 +198,7 @@ static int efx_ef10_get_sysclk_freq(struct efx_nic *efx)
return rc > 0 ? rc : -ERANGE;
}
-static int efx_ef10_get_mac_address(struct efx_nic *efx, u8 *mac_address)
+static int efx_ef10_get_mac_address_pf(struct efx_nic *efx, u8 *mac_address)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN);
size_t outlen;
@@ -167,9 +218,66 @@ static int efx_ef10_get_mac_address(struct efx_nic *efx, u8 *mac_address)
return 0;
}
+static int efx_ef10_get_mac_address_vf(struct efx_nic *efx, u8 *mac_address)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX);
+ size_t outlen;
+ int num_addrs, rc;
+
+ MCDI_SET_DWORD(inbuf, VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID,
+ EVB_PORT_ID_ASSIGNED);
+ rc = efx_mcdi_rpc(efx, MC_CMD_VPORT_GET_MAC_ADDRESSES, inbuf,
+ sizeof(inbuf), outbuf, sizeof(outbuf), &outlen);
+
+ if (rc)
+ return rc;
+ if (outlen < MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN)
+ return -EIO;
+
+ num_addrs = MCDI_DWORD(outbuf,
+ VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT);
+
+ WARN_ON(num_addrs != 1);
+
+ ether_addr_copy(mac_address,
+ MCDI_PTR(outbuf, VPORT_GET_MAC_ADDRESSES_OUT_MACADDR));
+
+ return 0;
+}
+
+static ssize_t efx_ef10_show_link_control_flag(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+
+ return sprintf(buf, "%d\n",
+ ((efx->mcdi->fn_flags) &
+ (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL))
+ ? 1 : 0);
+}
+
+static ssize_t efx_ef10_show_primary_flag(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+
+ return sprintf(buf, "%d\n",
+ ((efx->mcdi->fn_flags) &
+ (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY))
+ ? 1 : 0);
+}
+
+static DEVICE_ATTR(link_control_flag, 0444, efx_ef10_show_link_control_flag,
+ NULL);
+static DEVICE_ATTR(primary_flag, 0444, efx_ef10_show_primary_flag, NULL);
+
static int efx_ef10_probe(struct efx_nic *efx)
{
struct efx_ef10_nic_data *nic_data;
+ struct net_device *net_dev = efx->net_dev;
int i, rc;
/* We can have one VI for each 8K region. However, until we
@@ -178,7 +286,7 @@ static int efx_ef10_probe(struct efx_nic *efx)
efx->max_channels =
min_t(unsigned int,
EFX_MAX_CHANNELS,
- resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]) /
+ efx_ef10_mem_map_size(efx) /
(EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES));
if (WARN_ON(efx->max_channels == 0))
return -EIO;
@@ -188,6 +296,9 @@ static int efx_ef10_probe(struct efx_nic *efx)
return -ENOMEM;
efx->nic_data = nic_data;
+ /* we assume later that we can copy from this buffer in dwords */
+ BUILD_BUG_ON(MCDI_CTL_SDU_LEN_MAX_V2 % 4);
+
rc = efx_nic_alloc_buffer(efx, &nic_data->mcdi_buf,
8 + MCDI_CTL_SDU_LEN_MAX_V2, GFP_KERNEL);
if (rc)
@@ -209,6 +320,8 @@ static int efx_ef10_probe(struct efx_nic *efx)
nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
+ nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
+
/* In case we're recovering from a crash (kexec), we want to
* cancel any outstanding request by the previous user of this
* function. We send a special message using the least
@@ -230,45 +343,85 @@ static int efx_ef10_probe(struct efx_nic *efx)
if (rc)
goto fail3;
+ rc = device_create_file(&efx->pci_dev->dev,
+ &dev_attr_link_control_flag);
+ if (rc)
+ goto fail3;
+
+ rc = device_create_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
+ if (rc)
+ goto fail4;
+
+ rc = efx_ef10_get_pf_index(efx);
+ if (rc)
+ goto fail5;
+
rc = efx_ef10_init_datapath_caps(efx);
if (rc < 0)
- goto fail3;
+ goto fail5;
efx->rx_packet_len_offset =
ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE;
rc = efx_mcdi_port_get_number(efx);
if (rc < 0)
- goto fail3;
+ goto fail5;
efx->port_num = rc;
+ net_dev->dev_port = rc;
- rc = efx_ef10_get_mac_address(efx, efx->net_dev->perm_addr);
+ rc = efx->type->get_mac_address(efx, efx->net_dev->perm_addr);
if (rc)
- goto fail3;
+ goto fail5;
rc = efx_ef10_get_sysclk_freq(efx);
if (rc < 0)
- goto fail3;
+ goto fail5;
efx->timer_quantum_ns = 1536000 / rc; /* 1536 cycles */
- /* Check whether firmware supports bug 35388 workaround */
+ /* Check whether firmware supports bug 35388 workaround.
+ * First try to enable it, then if we get EPERM, just
+ * ask if it's already enabled
+ */
rc = efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG35388, true);
- if (rc == 0)
+ if (rc == 0) {
nic_data->workaround_35388 = true;
- else if (rc != -ENOSYS && rc != -ENOENT)
- goto fail3;
+ } else if (rc == -EPERM) {
+ unsigned int enabled;
+
+ rc = efx_mcdi_get_workarounds(efx, NULL, &enabled);
+ if (rc)
+ goto fail3;
+ nic_data->workaround_35388 = enabled &
+ MC_CMD_GET_WORKAROUNDS_OUT_BUG35388;
+ } else if (rc != -ENOSYS && rc != -ENOENT) {
+ goto fail5;
+ }
netif_dbg(efx, probe, efx->net_dev,
"workaround for bug 35388 is %sabled\n",
nic_data->workaround_35388 ? "en" : "dis");
rc = efx_mcdi_mon_probe(efx);
- if (rc)
- goto fail3;
+ if (rc && rc != -EPERM)
+ goto fail5;
efx_ptp_probe(efx, NULL);
+#ifdef CONFIG_SFC_SRIOV
+ if ((efx->pci_dev->physfn) && (!efx->pci_dev->is_physfn)) {
+ struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
+ struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
+
+ efx_pf->type->get_mac_address(efx_pf, nic_data->port_id);
+ } else
+#endif
+ ether_addr_copy(nic_data->port_id, efx->net_dev->perm_addr);
+
return 0;
+fail5:
+ device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
+fail4:
+ device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag);
fail3:
efx_mcdi_fini(efx);
fail2:
@@ -281,7 +434,7 @@ fail1:
static int efx_ef10_free_vis(struct efx_nic *efx)
{
- MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, 0);
+ MCDI_DECLARE_BUF_ERR(outbuf);
size_t outlen;
int rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FREE_VIS, NULL, 0,
outbuf, sizeof(outbuf), &outlen);
@@ -352,9 +505,9 @@ static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
static int efx_ef10_link_piobufs(struct efx_nic *efx)
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
- MCDI_DECLARE_BUF(inbuf,
- max(MC_CMD_LINK_PIOBUF_IN_LEN,
- MC_CMD_UNLINK_PIOBUF_IN_LEN));
+ _MCDI_DECLARE_BUF(inbuf,
+ max(MC_CMD_LINK_PIOBUF_IN_LEN,
+ MC_CMD_UNLINK_PIOBUF_IN_LEN));
struct efx_channel *channel;
struct efx_tx_queue *tx_queue;
unsigned int offset, index;
@@ -363,6 +516,8 @@ static int efx_ef10_link_piobufs(struct efx_nic *efx)
BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_OUT_LEN != 0);
BUILD_BUG_ON(MC_CMD_UNLINK_PIOBUF_OUT_LEN != 0);
+ memset(inbuf, 0, sizeof(inbuf));
+
/* Link a buffer to each VI in the write-combining mapping */
for (index = 0; index < nic_data->n_piobufs; ++index) {
MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_PIOBUF_HANDLE,
@@ -475,6 +630,25 @@ static void efx_ef10_remove(struct efx_nic *efx)
struct efx_ef10_nic_data *nic_data = efx->nic_data;
int rc;
+#ifdef CONFIG_SFC_SRIOV
+ struct efx_ef10_nic_data *nic_data_pf;
+ struct pci_dev *pci_dev_pf;
+ struct efx_nic *efx_pf;
+ struct ef10_vf *vf;
+
+ if (efx->pci_dev->is_virtfn) {
+ pci_dev_pf = efx->pci_dev->physfn;
+ if (pci_dev_pf) {
+ efx_pf = pci_get_drvdata(pci_dev_pf);
+ nic_data_pf = efx_pf->nic_data;
+ vf = nic_data_pf->vf + nic_data->vf_index;
+ vf->efx = NULL;
+ } else
+ netif_info(efx, drv, efx->net_dev,
+ "Could not get the PF id from VF\n");
+ }
+#endif
+
efx_ptp_remove(efx);
efx_mcdi_mon_remove(efx);
@@ -490,11 +664,78 @@ static void efx_ef10_remove(struct efx_nic *efx)
if (!nic_data->must_restore_piobufs)
efx_ef10_free_piobufs(efx);
+ device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
+ device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag);
+
efx_mcdi_fini(efx);
efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
kfree(nic_data);
}
+static int efx_ef10_probe_pf(struct efx_nic *efx)
+{
+ return efx_ef10_probe(efx);
+}
+
+#ifdef CONFIG_SFC_SRIOV
+static int efx_ef10_probe_vf(struct efx_nic *efx)
+{
+ int rc;
+ struct pci_dev *pci_dev_pf;
+
+ /* If the parent PF has no VF data structure, it doesn't know about this
+ * VF so fail probe. The VF needs to be re-created. This can happen
+ * if the PF driver is unloaded while the VF is assigned to a guest.
+ */
+ pci_dev_pf = efx->pci_dev->physfn;
+ if (pci_dev_pf) {
+ struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
+ struct efx_ef10_nic_data *nic_data_pf = efx_pf->nic_data;
+
+ if (!nic_data_pf->vf) {
+ netif_info(efx, drv, efx->net_dev,
+ "The VF cannot link to its parent PF; "
+ "please destroy and re-create the VF\n");
+ return -EBUSY;
+ }
+ }
+
+ rc = efx_ef10_probe(efx);
+ if (rc)
+ return rc;
+
+ rc = efx_ef10_get_vf_index(efx);
+ if (rc)
+ goto fail;
+
+ if (efx->pci_dev->is_virtfn) {
+ if (efx->pci_dev->physfn) {
+ struct efx_nic *efx_pf =
+ pci_get_drvdata(efx->pci_dev->physfn);
+ struct efx_ef10_nic_data *nic_data_p = efx_pf->nic_data;
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ nic_data_p->vf[nic_data->vf_index].efx = efx;
+ nic_data_p->vf[nic_data->vf_index].pci_dev =
+ efx->pci_dev;
+ } else
+ netif_info(efx, drv, efx->net_dev,
+ "Could not get the PF id from VF\n");
+ }
+
+ return 0;
+
+fail:
+ efx_ef10_remove(efx);
+ return rc;
+}
+#else
+static int efx_ef10_probe_vf(struct efx_nic *efx __attribute__ ((unused)))
+{
+ return 0;
+}
+#endif
+
static int efx_ef10_alloc_vis(struct efx_nic *efx,
unsigned int min_vis, unsigned int max_vis)
{
@@ -687,7 +928,9 @@ static int efx_ef10_init_nic(struct efx_nic *efx)
nic_data->must_restore_piobufs = false;
}
- efx_ef10_rx_push_rss_config(efx);
+ /* don't fail init if RSS setup doesn't work */
+ efx->type->rx_push_rss_config(efx, false, efx->rx_indir_table);
+
return 0;
}
@@ -702,6 +945,14 @@ static void efx_ef10_reset_mc_allocations(struct efx_nic *efx)
nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
}
+static enum reset_type efx_ef10_map_reset_reason(enum reset_type reason)
+{
+ if (reason == RESET_TYPE_MC_FAILURE)
+ return RESET_TYPE_DATAPATH;
+
+ return efx_mcdi_map_reset_reason(reason);
+}
+
static int efx_ef10_map_reset_flags(u32 *flags)
{
enum {
@@ -760,93 +1011,112 @@ static int efx_ef10_reset(struct efx_nic *efx, enum reset_type reset_type)
[GENERIC_STAT_ ## ext_name] = { #ext_name, 0, 0 }
static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
- EF10_DMA_STAT(tx_bytes, TX_BYTES),
- EF10_DMA_STAT(tx_packets, TX_PKTS),
- EF10_DMA_STAT(tx_pause, TX_PAUSE_PKTS),
- EF10_DMA_STAT(tx_control, TX_CONTROL_PKTS),
- EF10_DMA_STAT(tx_unicast, TX_UNICAST_PKTS),
- EF10_DMA_STAT(tx_multicast, TX_MULTICAST_PKTS),
- EF10_DMA_STAT(tx_broadcast, TX_BROADCAST_PKTS),
- EF10_DMA_STAT(tx_lt64, TX_LT64_PKTS),
- EF10_DMA_STAT(tx_64, TX_64_PKTS),
- EF10_DMA_STAT(tx_65_to_127, TX_65_TO_127_PKTS),
- EF10_DMA_STAT(tx_128_to_255, TX_128_TO_255_PKTS),
- EF10_DMA_STAT(tx_256_to_511, TX_256_TO_511_PKTS),
- EF10_DMA_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS),
- EF10_DMA_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS),
- EF10_DMA_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS),
- EF10_DMA_STAT(rx_bytes, RX_BYTES),
- EF10_DMA_INVIS_STAT(rx_bytes_minus_good_bytes, RX_BAD_BYTES),
- EF10_OTHER_STAT(rx_good_bytes),
- EF10_OTHER_STAT(rx_bad_bytes),
- EF10_DMA_STAT(rx_packets, RX_PKTS),
- EF10_DMA_STAT(rx_good, RX_GOOD_PKTS),
- EF10_DMA_STAT(rx_bad, RX_BAD_FCS_PKTS),
- EF10_DMA_STAT(rx_pause, RX_PAUSE_PKTS),
- EF10_DMA_STAT(rx_control, RX_CONTROL_PKTS),
- EF10_DMA_STAT(rx_unicast, RX_UNICAST_PKTS),
- EF10_DMA_STAT(rx_multicast, RX_MULTICAST_PKTS),
- EF10_DMA_STAT(rx_broadcast, RX_BROADCAST_PKTS),
- EF10_DMA_STAT(rx_lt64, RX_UNDERSIZE_PKTS),
- EF10_DMA_STAT(rx_64, RX_64_PKTS),
- EF10_DMA_STAT(rx_65_to_127, RX_65_TO_127_PKTS),
- EF10_DMA_STAT(rx_128_to_255, RX_128_TO_255_PKTS),
- EF10_DMA_STAT(rx_256_to_511, RX_256_TO_511_PKTS),
- EF10_DMA_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS),
- EF10_DMA_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS),
- EF10_DMA_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS),
- EF10_DMA_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS),
- EF10_DMA_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS),
- EF10_DMA_STAT(rx_overflow, RX_OVERFLOW_PKTS),
- EF10_DMA_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS),
- EF10_DMA_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS),
- EF10_DMA_STAT(rx_nodesc_drops, RX_NODESC_DROPS),
+ EF10_DMA_STAT(port_tx_bytes, TX_BYTES),
+ EF10_DMA_STAT(port_tx_packets, TX_PKTS),
+ EF10_DMA_STAT(port_tx_pause, TX_PAUSE_PKTS),
+ EF10_DMA_STAT(port_tx_control, TX_CONTROL_PKTS),
+ EF10_DMA_STAT(port_tx_unicast, TX_UNICAST_PKTS),
+ EF10_DMA_STAT(port_tx_multicast, TX_MULTICAST_PKTS),
+ EF10_DMA_STAT(port_tx_broadcast, TX_BROADCAST_PKTS),
+ EF10_DMA_STAT(port_tx_lt64, TX_LT64_PKTS),
+ EF10_DMA_STAT(port_tx_64, TX_64_PKTS),
+ EF10_DMA_STAT(port_tx_65_to_127, TX_65_TO_127_PKTS),
+ EF10_DMA_STAT(port_tx_128_to_255, TX_128_TO_255_PKTS),
+ EF10_DMA_STAT(port_tx_256_to_511, TX_256_TO_511_PKTS),
+ EF10_DMA_STAT(port_tx_512_to_1023, TX_512_TO_1023_PKTS),
+ EF10_DMA_STAT(port_tx_1024_to_15xx, TX_1024_TO_15XX_PKTS),
+ EF10_DMA_STAT(port_tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS),
+ EF10_DMA_STAT(port_rx_bytes, RX_BYTES),
+ EF10_DMA_INVIS_STAT(port_rx_bytes_minus_good_bytes, RX_BAD_BYTES),
+ EF10_OTHER_STAT(port_rx_good_bytes),
+ EF10_OTHER_STAT(port_rx_bad_bytes),
+ EF10_DMA_STAT(port_rx_packets, RX_PKTS),
+ EF10_DMA_STAT(port_rx_good, RX_GOOD_PKTS),
+ EF10_DMA_STAT(port_rx_bad, RX_BAD_FCS_PKTS),
+ EF10_DMA_STAT(port_rx_pause, RX_PAUSE_PKTS),
+ EF10_DMA_STAT(port_rx_control, RX_CONTROL_PKTS),
+ EF10_DMA_STAT(port_rx_unicast, RX_UNICAST_PKTS),
+ EF10_DMA_STAT(port_rx_multicast, RX_MULTICAST_PKTS),
+ EF10_DMA_STAT(port_rx_broadcast, RX_BROADCAST_PKTS),
+ EF10_DMA_STAT(port_rx_lt64, RX_UNDERSIZE_PKTS),
+ EF10_DMA_STAT(port_rx_64, RX_64_PKTS),
+ EF10_DMA_STAT(port_rx_65_to_127, RX_65_TO_127_PKTS),
+ EF10_DMA_STAT(port_rx_128_to_255, RX_128_TO_255_PKTS),
+ EF10_DMA_STAT(port_rx_256_to_511, RX_256_TO_511_PKTS),
+ EF10_DMA_STAT(port_rx_512_to_1023, RX_512_TO_1023_PKTS),
+ EF10_DMA_STAT(port_rx_1024_to_15xx, RX_1024_TO_15XX_PKTS),
+ EF10_DMA_STAT(port_rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS),
+ EF10_DMA_STAT(port_rx_gtjumbo, RX_GTJUMBO_PKTS),
+ EF10_DMA_STAT(port_rx_bad_gtjumbo, RX_JABBER_PKTS),
+ EF10_DMA_STAT(port_rx_overflow, RX_OVERFLOW_PKTS),
+ EF10_DMA_STAT(port_rx_align_error, RX_ALIGN_ERROR_PKTS),
+ EF10_DMA_STAT(port_rx_length_error, RX_LENGTH_ERROR_PKTS),
+ EF10_DMA_STAT(port_rx_nodesc_drops, RX_NODESC_DROPS),
GENERIC_SW_STAT(rx_nodesc_trunc),
GENERIC_SW_STAT(rx_noskb_drops),
- EF10_DMA_STAT(rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW),
- EF10_DMA_STAT(rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW),
- EF10_DMA_STAT(rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL),
- EF10_DMA_STAT(rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL),
- EF10_DMA_STAT(rx_pm_trunc_qbb, PM_TRUNC_QBB),
- EF10_DMA_STAT(rx_pm_discard_qbb, PM_DISCARD_QBB),
- EF10_DMA_STAT(rx_pm_discard_mapping, PM_DISCARD_MAPPING),
- EF10_DMA_STAT(rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS),
- EF10_DMA_STAT(rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS),
- EF10_DMA_STAT(rx_dp_streaming_packets, RXDP_STREAMING_PKTS),
- EF10_DMA_STAT(rx_dp_hlb_fetch, RXDP_EMERGENCY_FETCH_CONDITIONS),
- EF10_DMA_STAT(rx_dp_hlb_wait, RXDP_EMERGENCY_WAIT_CONDITIONS),
+ EF10_DMA_STAT(port_rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW),
+ EF10_DMA_STAT(port_rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW),
+ EF10_DMA_STAT(port_rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL),
+ EF10_DMA_STAT(port_rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL),
+ EF10_DMA_STAT(port_rx_pm_trunc_qbb, PM_TRUNC_QBB),
+ EF10_DMA_STAT(port_rx_pm_discard_qbb, PM_DISCARD_QBB),
+ EF10_DMA_STAT(port_rx_pm_discard_mapping, PM_DISCARD_MAPPING),
+ EF10_DMA_STAT(port_rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS),
+ EF10_DMA_STAT(port_rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS),
+ EF10_DMA_STAT(port_rx_dp_streaming_packets, RXDP_STREAMING_PKTS),
+ EF10_DMA_STAT(port_rx_dp_hlb_fetch, RXDP_HLB_FETCH_CONDITIONS),
+ EF10_DMA_STAT(port_rx_dp_hlb_wait, RXDP_HLB_WAIT_CONDITIONS),
+ EF10_DMA_STAT(rx_unicast, VADAPTER_RX_UNICAST_PACKETS),
+ EF10_DMA_STAT(rx_unicast_bytes, VADAPTER_RX_UNICAST_BYTES),
+ EF10_DMA_STAT(rx_multicast, VADAPTER_RX_MULTICAST_PACKETS),
+ EF10_DMA_STAT(rx_multicast_bytes, VADAPTER_RX_MULTICAST_BYTES),
+ EF10_DMA_STAT(rx_broadcast, VADAPTER_RX_BROADCAST_PACKETS),
+ EF10_DMA_STAT(rx_broadcast_bytes, VADAPTER_RX_BROADCAST_BYTES),
+ EF10_DMA_STAT(rx_bad, VADAPTER_RX_BAD_PACKETS),
+ EF10_DMA_STAT(rx_bad_bytes, VADAPTER_RX_BAD_BYTES),
+ EF10_DMA_STAT(rx_overflow, VADAPTER_RX_OVERFLOW),
+ EF10_DMA_STAT(tx_unicast, VADAPTER_TX_UNICAST_PACKETS),
+ EF10_DMA_STAT(tx_unicast_bytes, VADAPTER_TX_UNICAST_BYTES),
+ EF10_DMA_STAT(tx_multicast, VADAPTER_TX_MULTICAST_PACKETS),
+ EF10_DMA_STAT(tx_multicast_bytes, VADAPTER_TX_MULTICAST_BYTES),
+ EF10_DMA_STAT(tx_broadcast, VADAPTER_TX_BROADCAST_PACKETS),
+ EF10_DMA_STAT(tx_broadcast_bytes, VADAPTER_TX_BROADCAST_BYTES),
+ EF10_DMA_STAT(tx_bad, VADAPTER_TX_BAD_PACKETS),
+ EF10_DMA_STAT(tx_bad_bytes, VADAPTER_TX_BAD_BYTES),
+ EF10_DMA_STAT(tx_overflow, VADAPTER_TX_OVERFLOW),
};
-#define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_tx_bytes) | \
- (1ULL << EF10_STAT_tx_packets) | \
- (1ULL << EF10_STAT_tx_pause) | \
- (1ULL << EF10_STAT_tx_unicast) | \
- (1ULL << EF10_STAT_tx_multicast) | \
- (1ULL << EF10_STAT_tx_broadcast) | \
- (1ULL << EF10_STAT_rx_bytes) | \
- (1ULL << EF10_STAT_rx_bytes_minus_good_bytes) | \
- (1ULL << EF10_STAT_rx_good_bytes) | \
- (1ULL << EF10_STAT_rx_bad_bytes) | \
- (1ULL << EF10_STAT_rx_packets) | \
- (1ULL << EF10_STAT_rx_good) | \
- (1ULL << EF10_STAT_rx_bad) | \
- (1ULL << EF10_STAT_rx_pause) | \
- (1ULL << EF10_STAT_rx_control) | \
- (1ULL << EF10_STAT_rx_unicast) | \
- (1ULL << EF10_STAT_rx_multicast) | \
- (1ULL << EF10_STAT_rx_broadcast) | \
- (1ULL << EF10_STAT_rx_lt64) | \
- (1ULL << EF10_STAT_rx_64) | \
- (1ULL << EF10_STAT_rx_65_to_127) | \
- (1ULL << EF10_STAT_rx_128_to_255) | \
- (1ULL << EF10_STAT_rx_256_to_511) | \
- (1ULL << EF10_STAT_rx_512_to_1023) | \
- (1ULL << EF10_STAT_rx_1024_to_15xx) | \
- (1ULL << EF10_STAT_rx_15xx_to_jumbo) | \
- (1ULL << EF10_STAT_rx_gtjumbo) | \
- (1ULL << EF10_STAT_rx_bad_gtjumbo) | \
- (1ULL << EF10_STAT_rx_overflow) | \
- (1ULL << EF10_STAT_rx_nodesc_drops) | \
+#define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_port_tx_bytes) | \
+ (1ULL << EF10_STAT_port_tx_packets) | \
+ (1ULL << EF10_STAT_port_tx_pause) | \
+ (1ULL << EF10_STAT_port_tx_unicast) | \
+ (1ULL << EF10_STAT_port_tx_multicast) | \
+ (1ULL << EF10_STAT_port_tx_broadcast) | \
+ (1ULL << EF10_STAT_port_rx_bytes) | \
+ (1ULL << \
+ EF10_STAT_port_rx_bytes_minus_good_bytes) | \
+ (1ULL << EF10_STAT_port_rx_good_bytes) | \
+ (1ULL << EF10_STAT_port_rx_bad_bytes) | \
+ (1ULL << EF10_STAT_port_rx_packets) | \
+ (1ULL << EF10_STAT_port_rx_good) | \
+ (1ULL << EF10_STAT_port_rx_bad) | \
+ (1ULL << EF10_STAT_port_rx_pause) | \
+ (1ULL << EF10_STAT_port_rx_control) | \
+ (1ULL << EF10_STAT_port_rx_unicast) | \
+ (1ULL << EF10_STAT_port_rx_multicast) | \
+ (1ULL << EF10_STAT_port_rx_broadcast) | \
+ (1ULL << EF10_STAT_port_rx_lt64) | \
+ (1ULL << EF10_STAT_port_rx_64) | \
+ (1ULL << EF10_STAT_port_rx_65_to_127) | \
+ (1ULL << EF10_STAT_port_rx_128_to_255) | \
+ (1ULL << EF10_STAT_port_rx_256_to_511) | \
+ (1ULL << EF10_STAT_port_rx_512_to_1023) |\
+ (1ULL << EF10_STAT_port_rx_1024_to_15xx) |\
+ (1ULL << EF10_STAT_port_rx_15xx_to_jumbo) |\
+ (1ULL << EF10_STAT_port_rx_gtjumbo) | \
+ (1ULL << EF10_STAT_port_rx_bad_gtjumbo) |\
+ (1ULL << EF10_STAT_port_rx_overflow) | \
+ (1ULL << EF10_STAT_port_rx_nodesc_drops) |\
(1ULL << GENERIC_STAT_rx_nodesc_trunc) | \
(1ULL << GENERIC_STAT_rx_noskb_drops))
@@ -854,39 +1124,39 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
* switchable port we do not expose these because they might not
* include all the packets they should.
*/
-#define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_tx_control) | \
- (1ULL << EF10_STAT_tx_lt64) | \
- (1ULL << EF10_STAT_tx_64) | \
- (1ULL << EF10_STAT_tx_65_to_127) | \
- (1ULL << EF10_STAT_tx_128_to_255) | \
- (1ULL << EF10_STAT_tx_256_to_511) | \
- (1ULL << EF10_STAT_tx_512_to_1023) | \
- (1ULL << EF10_STAT_tx_1024_to_15xx) | \
- (1ULL << EF10_STAT_tx_15xx_to_jumbo))
+#define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_port_tx_control) | \
+ (1ULL << EF10_STAT_port_tx_lt64) | \
+ (1ULL << EF10_STAT_port_tx_64) | \
+ (1ULL << EF10_STAT_port_tx_65_to_127) |\
+ (1ULL << EF10_STAT_port_tx_128_to_255) |\
+ (1ULL << EF10_STAT_port_tx_256_to_511) |\
+ (1ULL << EF10_STAT_port_tx_512_to_1023) |\
+ (1ULL << EF10_STAT_port_tx_1024_to_15xx) |\
+ (1ULL << EF10_STAT_port_tx_15xx_to_jumbo))
/* These statistics are only provided by the 40G MAC. For a 10G/40G
* switchable port we do expose these because the errors will otherwise
* be silent.
*/
-#define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_rx_align_error) | \
- (1ULL << EF10_STAT_rx_length_error))
+#define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_port_rx_align_error) |\
+ (1ULL << EF10_STAT_port_rx_length_error))
/* These statistics are only provided if the firmware supports the
* capability PM_AND_RXDP_COUNTERS.
*/
#define HUNT_PM_AND_RXDP_STAT_MASK ( \
- (1ULL << EF10_STAT_rx_pm_trunc_bb_overflow) | \
- (1ULL << EF10_STAT_rx_pm_discard_bb_overflow) | \
- (1ULL << EF10_STAT_rx_pm_trunc_vfifo_full) | \
- (1ULL << EF10_STAT_rx_pm_discard_vfifo_full) | \
- (1ULL << EF10_STAT_rx_pm_trunc_qbb) | \
- (1ULL << EF10_STAT_rx_pm_discard_qbb) | \
- (1ULL << EF10_STAT_rx_pm_discard_mapping) | \
- (1ULL << EF10_STAT_rx_dp_q_disabled_packets) | \
- (1ULL << EF10_STAT_rx_dp_di_dropped_packets) | \
- (1ULL << EF10_STAT_rx_dp_streaming_packets) | \
- (1ULL << EF10_STAT_rx_dp_hlb_fetch) | \
- (1ULL << EF10_STAT_rx_dp_hlb_wait))
+ (1ULL << EF10_STAT_port_rx_pm_trunc_bb_overflow) | \
+ (1ULL << EF10_STAT_port_rx_pm_discard_bb_overflow) | \
+ (1ULL << EF10_STAT_port_rx_pm_trunc_vfifo_full) | \
+ (1ULL << EF10_STAT_port_rx_pm_discard_vfifo_full) | \
+ (1ULL << EF10_STAT_port_rx_pm_trunc_qbb) | \
+ (1ULL << EF10_STAT_port_rx_pm_discard_qbb) | \
+ (1ULL << EF10_STAT_port_rx_pm_discard_mapping) | \
+ (1ULL << EF10_STAT_port_rx_dp_q_disabled_packets) | \
+ (1ULL << EF10_STAT_port_rx_dp_di_dropped_packets) | \
+ (1ULL << EF10_STAT_port_rx_dp_streaming_packets) | \
+ (1ULL << EF10_STAT_port_rx_dp_hlb_fetch) | \
+ (1ULL << EF10_STAT_port_rx_dp_hlb_wait))
static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx)
{
@@ -894,6 +1164,10 @@ static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx)
u32 port_caps = efx_mcdi_phy_get_caps(efx);
struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ if (!(efx->mcdi->fn_flags &
+ 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL))
+ return 0;
+
if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
raw_mask |= HUNT_40G_EXTRA_STAT_MASK;
else
@@ -908,13 +1182,28 @@ static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx)
static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask)
{
- u64 raw_mask = efx_ef10_raw_stat_mask(efx);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ u64 raw_mask[2];
+
+ raw_mask[0] = efx_ef10_raw_stat_mask(efx);
+
+ /* Only show vadaptor stats when EVB capability is present */
+ if (nic_data->datapath_caps &
+ (1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN)) {
+ raw_mask[0] |= ~((1ULL << EF10_STAT_rx_unicast) - 1);
+ raw_mask[1] = (1ULL << (EF10_STAT_COUNT - 63)) - 1;
+ } else {
+ raw_mask[1] = 0;
+ }
#if BITS_PER_LONG == 64
- mask[0] = raw_mask;
+ mask[0] = raw_mask[0];
+ mask[1] = raw_mask[1];
#else
- mask[0] = raw_mask & 0xffffffff;
- mask[1] = raw_mask >> 32;
+ mask[0] = raw_mask[0] & 0xffffffff;
+ mask[1] = raw_mask[0] >> 32;
+ mask[2] = raw_mask[1] & 0xffffffff;
+ mask[3] = raw_mask[1] >> 32;
#endif
}
@@ -927,7 +1216,51 @@ static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names)
mask, names);
}
-static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
+static size_t efx_ef10_update_stats_common(struct efx_nic *efx, u64 *full_stats,
+ struct rtnl_link_stats64 *core_stats)
+{
+ DECLARE_BITMAP(mask, EF10_STAT_COUNT);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ u64 *stats = nic_data->stats;
+ size_t stats_count = 0, index;
+
+ efx_ef10_get_stat_mask(efx, mask);
+
+ if (full_stats) {
+ for_each_set_bit(index, mask, EF10_STAT_COUNT) {
+ if (efx_ef10_stat_desc[index].name) {
+ *full_stats++ = stats[index];
+ ++stats_count;
+ }
+ }
+ }
+
+ if (core_stats) {
+ core_stats->rx_packets = stats[EF10_STAT_rx_unicast] +
+ stats[EF10_STAT_rx_multicast] +
+ stats[EF10_STAT_rx_broadcast];
+ core_stats->tx_packets = stats[EF10_STAT_tx_unicast] +
+ stats[EF10_STAT_tx_multicast] +
+ stats[EF10_STAT_tx_broadcast];
+ core_stats->rx_bytes = stats[EF10_STAT_rx_unicast_bytes] +
+ stats[EF10_STAT_rx_multicast_bytes] +
+ stats[EF10_STAT_rx_broadcast_bytes];
+ core_stats->tx_bytes = stats[EF10_STAT_tx_unicast_bytes] +
+ stats[EF10_STAT_tx_multicast_bytes] +
+ stats[EF10_STAT_tx_broadcast_bytes];
+ core_stats->rx_dropped = stats[GENERIC_STAT_rx_nodesc_trunc] +
+ stats[GENERIC_STAT_rx_noskb_drops];
+ core_stats->multicast = stats[EF10_STAT_rx_multicast];
+ core_stats->rx_crc_errors = stats[EF10_STAT_rx_bad];
+ core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow];
+ core_stats->rx_errors = core_stats->rx_crc_errors;
+ core_stats->tx_errors = stats[EF10_STAT_tx_bad];
+ }
+
+ return stats_count;
+}
+
+static int efx_ef10_try_update_nic_stats_pf(struct efx_nic *efx)
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
DECLARE_BITMAP(mask, EF10_STAT_COUNT);
@@ -952,67 +1285,114 @@ static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
return -EAGAIN;
/* Update derived statistics */
- efx_nic_fix_nodesc_drop_stat(efx, &stats[EF10_STAT_rx_nodesc_drops]);
- stats[EF10_STAT_rx_good_bytes] =
- stats[EF10_STAT_rx_bytes] -
- stats[EF10_STAT_rx_bytes_minus_good_bytes];
- efx_update_diff_stat(&stats[EF10_STAT_rx_bad_bytes],
- stats[EF10_STAT_rx_bytes_minus_good_bytes]);
+ efx_nic_fix_nodesc_drop_stat(efx,
+ &stats[EF10_STAT_port_rx_nodesc_drops]);
+ stats[EF10_STAT_port_rx_good_bytes] =
+ stats[EF10_STAT_port_rx_bytes] -
+ stats[EF10_STAT_port_rx_bytes_minus_good_bytes];
+ efx_update_diff_stat(&stats[EF10_STAT_port_rx_bad_bytes],
+ stats[EF10_STAT_port_rx_bytes_minus_good_bytes]);
efx_update_sw_stats(efx, stats);
return 0;
}
-static size_t efx_ef10_update_stats(struct efx_nic *efx, u64 *full_stats,
- struct rtnl_link_stats64 *core_stats)
+static size_t efx_ef10_update_stats_pf(struct efx_nic *efx, u64 *full_stats,
+ struct rtnl_link_stats64 *core_stats)
{
- DECLARE_BITMAP(mask, EF10_STAT_COUNT);
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
- u64 *stats = nic_data->stats;
- size_t stats_count = 0, index;
int retry;
- efx_ef10_get_stat_mask(efx, mask);
-
/* If we're unlucky enough to read statistics during the DMA, wait
* up to 10ms for it to finish (typically takes <500us)
*/
for (retry = 0; retry < 100; ++retry) {
- if (efx_ef10_try_update_nic_stats(efx) == 0)
+ if (efx_ef10_try_update_nic_stats_pf(efx) == 0)
break;
udelay(100);
}
- if (full_stats) {
- for_each_set_bit(index, mask, EF10_STAT_COUNT) {
- if (efx_ef10_stat_desc[index].name) {
- *full_stats++ = stats[index];
- ++stats_count;
- }
- }
+ return efx_ef10_update_stats_common(efx, full_stats, core_stats);
+}
+
+static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ DECLARE_BITMAP(mask, EF10_STAT_COUNT);
+ __le64 generation_start, generation_end;
+ u64 *stats = nic_data->stats;
+ u32 dma_len = MC_CMD_MAC_NSTATS * sizeof(u64);
+ struct efx_buffer stats_buf;
+ __le64 *dma_stats;
+ int rc;
+
+ spin_unlock_bh(&efx->stats_lock);
+
+ if (in_interrupt()) {
+ /* If in atomic context, cannot update stats. Just update the
+ * software stats and return so the caller can continue.
+ */
+ spin_lock_bh(&efx->stats_lock);
+ efx_update_sw_stats(efx, stats);
+ return 0;
}
- if (core_stats) {
- core_stats->rx_packets = stats[EF10_STAT_rx_packets];
- core_stats->tx_packets = stats[EF10_STAT_tx_packets];
- core_stats->rx_bytes = stats[EF10_STAT_rx_bytes];
- core_stats->tx_bytes = stats[EF10_STAT_tx_bytes];
- core_stats->rx_dropped = stats[EF10_STAT_rx_nodesc_drops] +
- stats[GENERIC_STAT_rx_nodesc_trunc] +
- stats[GENERIC_STAT_rx_noskb_drops];
- core_stats->multicast = stats[EF10_STAT_rx_multicast];
- core_stats->rx_length_errors =
- stats[EF10_STAT_rx_gtjumbo] +
- stats[EF10_STAT_rx_length_error];
- core_stats->rx_crc_errors = stats[EF10_STAT_rx_bad];
- core_stats->rx_frame_errors = stats[EF10_STAT_rx_align_error];
- core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow];
- core_stats->rx_errors = (core_stats->rx_length_errors +
- core_stats->rx_crc_errors +
- core_stats->rx_frame_errors);
+ efx_ef10_get_stat_mask(efx, mask);
+
+ rc = efx_nic_alloc_buffer(efx, &stats_buf, dma_len, GFP_ATOMIC);
+ if (rc) {
+ spin_lock_bh(&efx->stats_lock);
+ return rc;
}
- return stats_count;
+ dma_stats = stats_buf.addr;
+ dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID;
+
+ MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, stats_buf.dma_addr);
+ MCDI_POPULATE_DWORD_1(inbuf, MAC_STATS_IN_CMD,
+ MAC_STATS_IN_DMA, 1);
+ MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
+ MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
+
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ spin_lock_bh(&efx->stats_lock);
+ if (rc) {
+ /* Expect ENOENT if DMA queues have not been set up */
+ if (rc != -ENOENT || atomic_read(&efx->active_queues))
+ efx_mcdi_display_error(efx, MC_CMD_MAC_STATS,
+ sizeof(inbuf), NULL, 0, rc);
+ goto out;
+ }
+
+ generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
+ if (generation_end == EFX_MC_STATS_GENERATION_INVALID) {
+ WARN_ON_ONCE(1);
+ goto out;
+ }
+ rmb();
+ efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask,
+ stats, stats_buf.addr, false);
+ rmb();
+ generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
+ if (generation_end != generation_start) {
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ efx_update_sw_stats(efx, stats);
+out:
+ efx_nic_free_buffer(efx, &stats_buf);
+ return rc;
+}
+
+static size_t efx_ef10_update_stats_vf(struct efx_nic *efx, u64 *full_stats,
+ struct rtnl_link_stats64 *core_stats)
+{
+ if (efx_ef10_try_update_nic_stats_vf(efx))
+ return 0;
+
+ return efx_ef10_update_stats_common(efx, full_stats, core_stats);
}
static void efx_ef10_push_irq_moderation(struct efx_channel *channel)
@@ -1044,6 +1424,14 @@ static void efx_ef10_push_irq_moderation(struct efx_channel *channel)
}
}
+static void efx_ef10_get_wol_vf(struct efx_nic *efx,
+ struct ethtool_wolinfo *wol) {}
+
+static int efx_ef10_set_wol_vf(struct efx_nic *efx, u32 type)
+{
+ return -EOPNOTSUPP;
+}
+
static void efx_ef10_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol)
{
wol->supported = 0;
@@ -1123,13 +1511,17 @@ static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx)
/* All our allocations have been reset */
efx_ef10_reset_mc_allocations(efx);
+ /* Driver-created vswitches and vports must be re-created */
+ nic_data->must_probe_vswitching = true;
+ nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
+
/* The datapath firmware might have been changed */
nic_data->must_check_datapath_caps = true;
/* MAC statistics have been cleared on the NIC; clear the local
* statistic that we update with efx_update_diff_stat().
*/
- nic_data->stats[EF10_STAT_rx_bad_bytes] = 0;
+ nic_data->stats[EF10_STAT_port_rx_bad_bytes] = 0;
return -EIO;
}
@@ -1232,16 +1624,17 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
EFX_BUF_SIZE));
- MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_TXQ_OUT_LEN);
bool csum_offload = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE;
struct efx_channel *channel = tx_queue->channel;
struct efx_nic *efx = tx_queue->efx;
- size_t inlen, outlen;
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ size_t inlen;
dma_addr_t dma_addr;
efx_qword_t *txd;
int rc;
int i;
+ BUILD_BUG_ON(MC_CMD_INIT_TXQ_OUT_LEN != 0);
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1);
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel);
@@ -1251,7 +1644,7 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload,
INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload);
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0);
- MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
+ MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, nic_data->vport_id);
dma_addr = tx_queue->txd.buf.dma_addr;
@@ -1266,7 +1659,7 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
inlen = MC_CMD_INIT_TXQ_IN_LEN(entries);
rc = efx_mcdi_rpc(efx, MC_CMD_INIT_TXQ, inbuf, inlen,
- outbuf, sizeof(outbuf), &outlen);
+ NULL, 0, NULL);
if (rc)
goto fail;
@@ -1299,7 +1692,7 @@ fail:
static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_TXQ_IN_LEN);
- MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_TXQ_OUT_LEN);
+ MCDI_DECLARE_BUF_ERR(outbuf);
struct efx_nic *efx = tx_queue->efx;
size_t outlen;
int rc;
@@ -1378,19 +1771,33 @@ static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue)
}
}
-static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context)
+static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context,
+ bool exclusive, unsigned *context_size)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN);
MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
size_t outlen;
int rc;
+ u32 alloc_type = exclusive ?
+ MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE :
+ MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED;
+ unsigned rss_spread = exclusive ?
+ efx->rss_spread :
+ min(rounddown_pow_of_two(efx->rss_spread),
+ EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE);
+
+ if (!exclusive && rss_spread == 1) {
+ *context = EFX_EF10_RSS_CONTEXT_INVALID;
+ if (context_size)
+ *context_size = 1;
+ return 0;
+ }
MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID,
- EVB_PORT_ID_ASSIGNED);
- MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE,
- MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE);
- MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES,
- EFX_MAX_CHANNELS);
+ nic_data->vport_id);
+ MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE, alloc_type);
+ MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES, rss_spread);
rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_ALLOC, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
@@ -1402,6 +1809,9 @@ static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context)
*context = MCDI_DWORD(outbuf, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID);
+ if (context_size)
+ *context_size = rss_spread;
+
return 0;
}
@@ -1418,7 +1828,8 @@ static void efx_ef10_free_rss_context(struct efx_nic *efx, u32 context)
WARN_ON(rc != 0);
}
-static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context)
+static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context,
+ const u32 *rx_indir_table)
{
MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN);
MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN);
@@ -1432,7 +1843,7 @@ static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context)
for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); ++i)
MCDI_PTR(tablebuf,
RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE)[i] =
- (u8) efx->rx_indir_table[i];
+ (u8) rx_indir_table[i];
rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_TABLE, tablebuf,
sizeof(tablebuf), NULL, 0, NULL);
@@ -1460,27 +1871,119 @@ static void efx_ef10_rx_free_indir_table(struct efx_nic *efx)
nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
}
-static void efx_ef10_rx_push_rss_config(struct efx_nic *efx)
+static int efx_ef10_rx_push_shared_rss_config(struct efx_nic *efx,
+ unsigned *context_size)
{
+ u32 new_rx_rss_context;
struct efx_ef10_nic_data *nic_data = efx->nic_data;
- int rc;
+ int rc = efx_ef10_alloc_rss_context(efx, &new_rx_rss_context,
+ false, context_size);
- netif_dbg(efx, drv, efx->net_dev, "pushing RSS config\n");
+ if (rc != 0)
+ return rc;
- if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID) {
- rc = efx_ef10_alloc_rss_context(efx, &nic_data->rx_rss_context);
- if (rc != 0)
- goto fail;
+ nic_data->rx_rss_context = new_rx_rss_context;
+ nic_data->rx_rss_context_exclusive = false;
+ efx_set_default_rx_indir_table(efx);
+ return 0;
+}
+
+static int efx_ef10_rx_push_exclusive_rss_config(struct efx_nic *efx,
+ const u32 *rx_indir_table)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ int rc;
+ u32 new_rx_rss_context;
+
+ if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID ||
+ !nic_data->rx_rss_context_exclusive) {
+ rc = efx_ef10_alloc_rss_context(efx, &new_rx_rss_context,
+ true, NULL);
+ if (rc == -EOPNOTSUPP)
+ return rc;
+ else if (rc != 0)
+ goto fail1;
+ } else {
+ new_rx_rss_context = nic_data->rx_rss_context;
}
- rc = efx_ef10_populate_rss_table(efx, nic_data->rx_rss_context);
+ rc = efx_ef10_populate_rss_table(efx, new_rx_rss_context,
+ rx_indir_table);
if (rc != 0)
- goto fail;
+ goto fail2;
- return;
+ if (nic_data->rx_rss_context != new_rx_rss_context)
+ efx_ef10_rx_free_indir_table(efx);
+ nic_data->rx_rss_context = new_rx_rss_context;
+ nic_data->rx_rss_context_exclusive = true;
+ if (rx_indir_table != efx->rx_indir_table)
+ memcpy(efx->rx_indir_table, rx_indir_table,
+ sizeof(efx->rx_indir_table));
+ return 0;
-fail:
+fail2:
+ if (new_rx_rss_context != nic_data->rx_rss_context)
+ efx_ef10_free_rss_context(efx, new_rx_rss_context);
+fail1:
netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+static int efx_ef10_pf_rx_push_rss_config(struct efx_nic *efx, bool user,
+ const u32 *rx_indir_table)
+{
+ int rc;
+
+ if (efx->rss_spread == 1)
+ return 0;
+
+ rc = efx_ef10_rx_push_exclusive_rss_config(efx, rx_indir_table);
+
+ if (rc == -ENOBUFS && !user) {
+ unsigned context_size;
+ bool mismatch = false;
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table) && !mismatch;
+ i++)
+ mismatch = rx_indir_table[i] !=
+ ethtool_rxfh_indir_default(i, efx->rss_spread);
+
+ rc = efx_ef10_rx_push_shared_rss_config(efx, &context_size);
+ if (rc == 0) {
+ if (context_size != efx->rss_spread)
+ netif_warn(efx, probe, efx->net_dev,
+ "Could not allocate an exclusive RSS"
+ " context; allocated a shared one of"
+ " different size."
+ " Wanted %u, got %u.\n",
+ efx->rss_spread, context_size);
+ else if (mismatch)
+ netif_warn(efx, probe, efx->net_dev,
+ "Could not allocate an exclusive RSS"
+ " context; allocated a shared one but"
+ " could not apply custom"
+ " indirection.\n");
+ else
+ netif_info(efx, probe, efx->net_dev,
+ "Could not allocate an exclusive RSS"
+ " context; allocated a shared one.\n");
+ }
+ }
+ return rc;
+}
+
+static int efx_ef10_vf_rx_push_rss_config(struct efx_nic *efx, bool user,
+ const u32 *rx_indir_table
+ __attribute__ ((unused)))
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ if (user)
+ return -EOPNOTSUPP;
+ if (nic_data->rx_rss_context != EFX_EF10_RSS_CONTEXT_INVALID)
+ return 0;
+ return efx_ef10_rx_push_shared_rss_config(efx, NULL);
}
static int efx_ef10_rx_probe(struct efx_rx_queue *rx_queue)
@@ -1496,14 +1999,15 @@ static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue)
MCDI_DECLARE_BUF(inbuf,
MC_CMD_INIT_RXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
EFX_BUF_SIZE));
- MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_RXQ_OUT_LEN);
struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE;
struct efx_nic *efx = rx_queue->efx;
- size_t inlen, outlen;
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ size_t inlen;
dma_addr_t dma_addr;
int rc;
int i;
+ BUILD_BUG_ON(MC_CMD_INIT_RXQ_OUT_LEN != 0);
rx_queue->scatter_n = 0;
rx_queue->scatter_len = 0;
@@ -1517,7 +2021,7 @@ static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue)
INIT_RXQ_IN_FLAG_PREFIX, 1,
INIT_RXQ_IN_FLAG_TIMESTAMP, 1);
MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0);
- MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
+ MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, nic_data->vport_id);
dma_addr = rx_queue->rxd.buf.dma_addr;
@@ -1532,7 +2036,7 @@ static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue)
inlen = MC_CMD_INIT_RXQ_IN_LEN(entries);
rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen,
- outbuf, sizeof(outbuf), &outlen);
+ NULL, 0, NULL);
if (rc)
netdev_WARN(efx->net_dev, "failed to initialise RXQ %d\n",
efx_rx_queue_index(rx_queue));
@@ -1541,7 +2045,7 @@ static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue)
static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_RXQ_IN_LEN);
- MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_RXQ_OUT_LEN);
+ MCDI_DECLARE_BUF_ERR(outbuf);
struct efx_nic *efx = rx_queue->efx;
size_t outlen;
int rc;
@@ -1703,7 +2207,7 @@ static int efx_ef10_ev_init(struct efx_channel *channel)
static void efx_ef10_ev_fini(struct efx_channel *channel)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN);
- MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_EVQ_OUT_LEN);
+ MCDI_DECLARE_BUF_ERR(outbuf);
struct efx_nic *efx = channel->efx;
size_t outlen;
int rc;
@@ -2286,11 +2790,12 @@ static void efx_ef10_filter_push_prep(struct efx_nic *efx,
match_fields);
}
- MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, nic_data->vport_id);
MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST,
spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
MC_CMD_FILTER_OP_IN_RX_DEST_DROP :
MC_CMD_FILTER_OP_IN_RX_DEST_HOST);
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DOMAIN, 0);
MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DEST,
MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT);
MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE,
@@ -3055,6 +3560,9 @@ fail:
return rc;
}
+/* Caller must hold efx->filter_sem for read if race against
+ * efx_ef10_filter_table_remove() is possible
+ */
static void efx_ef10_filter_table_restore(struct efx_nic *efx)
{
struct efx_ef10_filter_table *table = efx->filter_state;
@@ -3064,9 +3572,14 @@ static void efx_ef10_filter_table_restore(struct efx_nic *efx)
bool failed = false;
int rc;
+ WARN_ON(!rwsem_is_locked(&efx->filter_sem));
+
if (!nic_data->must_restore_filters)
return;
+ if (!table)
+ return;
+
spin_lock_bh(&efx->filter_lock);
for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
@@ -3102,6 +3615,7 @@ static void efx_ef10_filter_table_restore(struct efx_nic *efx)
nic_data->must_restore_filters = false;
}
+/* Caller must hold efx->filter_sem for write */
static void efx_ef10_filter_table_remove(struct efx_nic *efx)
{
struct efx_ef10_filter_table *table = efx->filter_state;
@@ -3110,6 +3624,10 @@ static void efx_ef10_filter_table_remove(struct efx_nic *efx)
unsigned int filter_idx;
int rc;
+ efx->filter_state = NULL;
+ if (!table)
+ return;
+
for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
spec = efx_ef10_filter_entry_spec(table, filter_idx);
if (!spec)
@@ -3135,6 +3653,9 @@ static void efx_ef10_filter_table_remove(struct efx_nic *efx)
kfree(table);
}
+/* Caller must hold efx->filter_sem for read if race against
+ * efx_ef10_filter_table_remove() is possible
+ */
static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
{
struct efx_ef10_filter_table *table = efx->filter_state;
@@ -3149,6 +3670,9 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
if (!efx_dev_registered(efx))
return;
+ if (!table)
+ return;
+
/* Mark old filters that may need to be removed */
spin_lock_bh(&efx->filter_lock);
n = table->dev_uc_count < 0 ? 1 : table->dev_uc_count;
@@ -3280,6 +3804,78 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
WARN_ON(remove_failed);
}
+static int efx_ef10_set_mac_address(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_SET_MAC_IN_LEN);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ bool was_enabled = efx->port_enabled;
+ int rc;
+
+ efx_device_detach_sync(efx);
+ efx_net_stop(efx->net_dev);
+ down_write(&efx->filter_sem);
+ efx_ef10_filter_table_remove(efx);
+
+ ether_addr_copy(MCDI_PTR(inbuf, VADAPTOR_SET_MAC_IN_MACADDR),
+ efx->net_dev->dev_addr);
+ MCDI_SET_DWORD(inbuf, VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID,
+ nic_data->vport_id);
+ rc = efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_SET_MAC, inbuf,
+ sizeof(inbuf), NULL, 0, NULL);
+
+ efx_ef10_filter_table_probe(efx);
+ up_write(&efx->filter_sem);
+ if (was_enabled)
+ efx_net_open(efx->net_dev);
+ netif_device_attach(efx->net_dev);
+
+#if !defined(CONFIG_SFC_SRIOV)
+ if (rc == -EPERM)
+ netif_err(efx, drv, efx->net_dev,
+ "Cannot change MAC address; use sfboot to enable mac-spoofing"
+ " on this interface\n");
+#else
+ if (rc == -EPERM) {
+ struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
+
+ /* Switch to PF and change MAC address on vport */
+ if (efx->pci_dev->is_virtfn && pci_dev_pf) {
+ struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
+
+ if (!efx_ef10_sriov_set_vf_mac(efx_pf,
+ nic_data->vf_index,
+ efx->net_dev->dev_addr))
+ return 0;
+ }
+ netif_err(efx, drv, efx->net_dev,
+ "Cannot change MAC address; use sfboot to enable mac-spoofing"
+ " on this interface\n");
+ } else if (efx->pci_dev->is_virtfn) {
+ /* Successfully changed by VF (with MAC spoofing), so update the
+ * parent PF if possible.
+ */
+ struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
+
+ if (pci_dev_pf) {
+ struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
+ struct efx_ef10_nic_data *nic_data = efx_pf->nic_data;
+ unsigned int i;
+
+ for (i = 0; i < efx_pf->vf_count; ++i) {
+ struct ef10_vf *vf = nic_data->vf + i;
+
+ if (vf->efx == efx) {
+ ether_addr_copy(vf->mac,
+ efx->net_dev->dev_addr);
+ return 0;
+ }
+ }
+ }
+ }
+#endif
+ return rc;
+}
+
static int efx_ef10_mac_reconfigure(struct efx_nic *efx)
{
efx_ef10_filter_sync_rx_mode(efx);
@@ -3287,6 +3883,13 @@ static int efx_ef10_mac_reconfigure(struct efx_nic *efx)
return efx_mcdi_set_mac(efx);
}
+static int efx_ef10_mac_reconfigure_vf(struct efx_nic *efx)
+{
+ efx_ef10_filter_sync_rx_mode(efx);
+
+ return 0;
+}
+
static int efx_ef10_start_bist(struct efx_nic *efx, u32 bist_type)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN);
@@ -3494,6 +4097,9 @@ static void efx_ef10_ptp_write_host_time(struct efx_nic *efx, u32 host_time)
_efx_writed(efx, cpu_to_le32(host_time), ER_DZ_MC_DB_LWRD);
}
+static void efx_ef10_ptp_write_host_time_vf(struct efx_nic *efx,
+ u32 host_time) {}
+
static int efx_ef10_rx_enable_timestamping(struct efx_channel *channel,
bool temp)
{
@@ -3571,6 +4177,12 @@ static int efx_ef10_ptp_set_ts_sync_events(struct efx_nic *efx, bool en,
return 0;
}
+static int efx_ef10_ptp_set_ts_config_vf(struct efx_nic *efx,
+ struct hwtstamp_config *init)
+{
+ return -EOPNOTSUPP;
+}
+
static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx,
struct hwtstamp_config *init)
{
@@ -3607,14 +4219,118 @@ static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx,
}
}
+const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
+ .is_vf = true,
+ .mem_bar = EFX_MEM_VF_BAR,
+ .mem_map_size = efx_ef10_mem_map_size,
+ .probe = efx_ef10_probe_vf,
+ .remove = efx_ef10_remove,
+ .dimension_resources = efx_ef10_dimension_resources,
+ .init = efx_ef10_init_nic,
+ .fini = efx_port_dummy_op_void,
+ .map_reset_reason = efx_ef10_map_reset_reason,
+ .map_reset_flags = efx_ef10_map_reset_flags,
+ .reset = efx_ef10_reset,
+ .probe_port = efx_mcdi_port_probe,
+ .remove_port = efx_mcdi_port_remove,
+ .fini_dmaq = efx_ef10_fini_dmaq,
+ .prepare_flr = efx_ef10_prepare_flr,
+ .finish_flr = efx_port_dummy_op_void,
+ .describe_stats = efx_ef10_describe_stats,
+ .update_stats = efx_ef10_update_stats_vf,
+ .start_stats = efx_port_dummy_op_void,
+ .pull_stats = efx_port_dummy_op_void,
+ .stop_stats = efx_port_dummy_op_void,
+ .set_id_led = efx_mcdi_set_id_led,
+ .push_irq_moderation = efx_ef10_push_irq_moderation,
+ .reconfigure_mac = efx_ef10_mac_reconfigure_vf,
+ .check_mac_fault = efx_mcdi_mac_check_fault,
+ .reconfigure_port = efx_mcdi_port_reconfigure,
+ .get_wol = efx_ef10_get_wol_vf,
+ .set_wol = efx_ef10_set_wol_vf,
+ .resume_wol = efx_port_dummy_op_void,
+ .mcdi_request = efx_ef10_mcdi_request,
+ .mcdi_poll_response = efx_ef10_mcdi_poll_response,
+ .mcdi_read_response = efx_ef10_mcdi_read_response,
+ .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot,
+ .irq_enable_master = efx_port_dummy_op_void,
+ .irq_test_generate = efx_ef10_irq_test_generate,
+ .irq_disable_non_ev = efx_port_dummy_op_void,
+ .irq_handle_msi = efx_ef10_msi_interrupt,
+ .irq_handle_legacy = efx_ef10_legacy_interrupt,
+ .tx_probe = efx_ef10_tx_probe,
+ .tx_init = efx_ef10_tx_init,
+ .tx_remove = efx_ef10_tx_remove,
+ .tx_write = efx_ef10_tx_write,
+ .rx_push_rss_config = efx_ef10_vf_rx_push_rss_config,
+ .rx_probe = efx_ef10_rx_probe,
+ .rx_init = efx_ef10_rx_init,
+ .rx_remove = efx_ef10_rx_remove,
+ .rx_write = efx_ef10_rx_write,
+ .rx_defer_refill = efx_ef10_rx_defer_refill,
+ .ev_probe = efx_ef10_ev_probe,
+ .ev_init = efx_ef10_ev_init,
+ .ev_fini = efx_ef10_ev_fini,
+ .ev_remove = efx_ef10_ev_remove,
+ .ev_process = efx_ef10_ev_process,
+ .ev_read_ack = efx_ef10_ev_read_ack,
+ .ev_test_generate = efx_ef10_ev_test_generate,
+ .filter_table_probe = efx_ef10_filter_table_probe,
+ .filter_table_restore = efx_ef10_filter_table_restore,
+ .filter_table_remove = efx_ef10_filter_table_remove,
+ .filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter,
+ .filter_insert = efx_ef10_filter_insert,
+ .filter_remove_safe = efx_ef10_filter_remove_safe,
+ .filter_get_safe = efx_ef10_filter_get_safe,
+ .filter_clear_rx = efx_ef10_filter_clear_rx,
+ .filter_count_rx_used = efx_ef10_filter_count_rx_used,
+ .filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit,
+ .filter_get_rx_ids = efx_ef10_filter_get_rx_ids,
+#ifdef CONFIG_RFS_ACCEL
+ .filter_rfs_insert = efx_ef10_filter_rfs_insert,
+ .filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one,
+#endif
+#ifdef CONFIG_SFC_MTD
+ .mtd_probe = efx_port_dummy_op_int,
+#endif
+ .ptp_write_host_time = efx_ef10_ptp_write_host_time_vf,
+ .ptp_set_ts_config = efx_ef10_ptp_set_ts_config_vf,
+#ifdef CONFIG_SFC_SRIOV
+ .vswitching_probe = efx_ef10_vswitching_probe_vf,
+ .vswitching_restore = efx_ef10_vswitching_restore_vf,
+ .vswitching_remove = efx_ef10_vswitching_remove_vf,
+ .sriov_get_phys_port_id = efx_ef10_sriov_get_phys_port_id,
+#endif
+ .get_mac_address = efx_ef10_get_mac_address_vf,
+ .set_mac_address = efx_ef10_set_mac_address,
+
+ .revision = EFX_REV_HUNT_A0,
+ .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
+ .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE,
+ .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST,
+ .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST,
+ .can_rx_scatter = true,
+ .always_rx_scatter = true,
+ .max_interrupt_mode = EFX_INT_MODE_MSIX,
+ .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
+ .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_RXHASH | NETIF_F_NTUPLE),
+ .mcdi_max_ver = 2,
+ .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
+ .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
+ 1 << HWTSTAMP_FILTER_ALL,
+};
+
const struct efx_nic_type efx_hunt_a0_nic_type = {
+ .is_vf = false,
+ .mem_bar = EFX_MEM_BAR,
.mem_map_size = efx_ef10_mem_map_size,
- .probe = efx_ef10_probe,
+ .probe = efx_ef10_probe_pf,
.remove = efx_ef10_remove,
.dimension_resources = efx_ef10_dimension_resources,
.init = efx_ef10_init_nic,
.fini = efx_port_dummy_op_void,
- .map_reset_reason = efx_mcdi_map_reset_reason,
+ .map_reset_reason = efx_ef10_map_reset_reason,
.map_reset_flags = efx_ef10_map_reset_flags,
.reset = efx_ef10_reset,
.probe_port = efx_mcdi_port_probe,
@@ -3623,7 +4339,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.prepare_flr = efx_ef10_prepare_flr,
.finish_flr = efx_port_dummy_op_void,
.describe_stats = efx_ef10_describe_stats,
- .update_stats = efx_ef10_update_stats,
+ .update_stats = efx_ef10_update_stats_pf,
.start_stats = efx_mcdi_mac_start_stats,
.pull_stats = efx_mcdi_mac_pull_stats,
.stop_stats = efx_mcdi_mac_stop_stats,
@@ -3650,7 +4366,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.tx_init = efx_ef10_tx_init,
.tx_remove = efx_ef10_tx_remove,
.tx_write = efx_ef10_tx_write,
- .rx_push_rss_config = efx_ef10_rx_push_rss_config,
+ .rx_push_rss_config = efx_ef10_pf_rx_push_rss_config,
.rx_probe = efx_ef10_rx_probe,
.rx_init = efx_ef10_rx_init,
.rx_remove = efx_ef10_rx_remove,
@@ -3689,11 +4405,24 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.ptp_write_host_time = efx_ef10_ptp_write_host_time,
.ptp_set_ts_sync_events = efx_ef10_ptp_set_ts_sync_events,
.ptp_set_ts_config = efx_ef10_ptp_set_ts_config,
+#ifdef CONFIG_SFC_SRIOV
+ .sriov_configure = efx_ef10_sriov_configure,
.sriov_init = efx_ef10_sriov_init,
.sriov_fini = efx_ef10_sriov_fini,
- .sriov_mac_address_changed = efx_ef10_sriov_mac_address_changed,
.sriov_wanted = efx_ef10_sriov_wanted,
.sriov_reset = efx_ef10_sriov_reset,
+ .sriov_flr = efx_ef10_sriov_flr,
+ .sriov_set_vf_mac = efx_ef10_sriov_set_vf_mac,
+ .sriov_set_vf_vlan = efx_ef10_sriov_set_vf_vlan,
+ .sriov_set_vf_spoofchk = efx_ef10_sriov_set_vf_spoofchk,
+ .sriov_get_vf_config = efx_ef10_sriov_get_vf_config,
+ .sriov_set_vf_link_state = efx_ef10_sriov_set_vf_link_state,
+ .vswitching_probe = efx_ef10_vswitching_probe_pf,
+ .vswitching_restore = efx_ef10_vswitching_restore_pf,
+ .vswitching_remove = efx_ef10_vswitching_remove_pf,
+#endif
+ .get_mac_address = efx_ef10_get_mac_address_pf,
+ .set_mac_address = efx_ef10_set_mac_address,
.revision = EFX_REV_HUNT_A0,
.max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c
new file mode 100644
index 000000000000..6c9b6e45509a
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ef10_sriov.c
@@ -0,0 +1,783 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2015 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+#include <linux/pci.h>
+#include <linux/module.h>
+#include "net_driver.h"
+#include "ef10_sriov.h"
+#include "efx.h"
+#include "nic.h"
+#include "mcdi_pcol.h"
+
+static int efx_ef10_evb_port_assign(struct efx_nic *efx, unsigned int port_id,
+ unsigned int vf_fn)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_EVB_PORT_ASSIGN_IN_LEN);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ MCDI_SET_DWORD(inbuf, EVB_PORT_ASSIGN_IN_PORT_ID, port_id);
+ MCDI_POPULATE_DWORD_2(inbuf, EVB_PORT_ASSIGN_IN_FUNCTION,
+ EVB_PORT_ASSIGN_IN_PF, nic_data->pf_index,
+ EVB_PORT_ASSIGN_IN_VF, vf_fn);
+
+ return efx_mcdi_rpc(efx, MC_CMD_EVB_PORT_ASSIGN, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+}
+
+static int efx_ef10_vport_add_mac(struct efx_nic *efx,
+ unsigned int port_id, u8 *mac)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN);
+
+ MCDI_SET_DWORD(inbuf, VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID, port_id);
+ ether_addr_copy(MCDI_PTR(inbuf, VPORT_ADD_MAC_ADDRESS_IN_MACADDR), mac);
+
+ return efx_mcdi_rpc(efx, MC_CMD_VPORT_ADD_MAC_ADDRESS, inbuf,
+ sizeof(inbuf), NULL, 0, NULL);
+}
+
+static int efx_ef10_vport_del_mac(struct efx_nic *efx,
+ unsigned int port_id, u8 *mac)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN);
+
+ MCDI_SET_DWORD(inbuf, VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID, port_id);
+ ether_addr_copy(MCDI_PTR(inbuf, VPORT_DEL_MAC_ADDRESS_IN_MACADDR), mac);
+
+ return efx_mcdi_rpc(efx, MC_CMD_VPORT_DEL_MAC_ADDRESS, inbuf,
+ sizeof(inbuf), NULL, 0, NULL);
+}
+
+static int efx_ef10_vswitch_alloc(struct efx_nic *efx, unsigned int port_id,
+ unsigned int vswitch_type)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_VSWITCH_ALLOC_IN_LEN);
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, VSWITCH_ALLOC_IN_UPSTREAM_PORT_ID, port_id);
+ MCDI_SET_DWORD(inbuf, VSWITCH_ALLOC_IN_TYPE, vswitch_type);
+ MCDI_SET_DWORD(inbuf, VSWITCH_ALLOC_IN_NUM_VLAN_TAGS, 2);
+ MCDI_POPULATE_DWORD_1(inbuf, VSWITCH_ALLOC_IN_FLAGS,
+ VSWITCH_ALLOC_IN_FLAG_AUTO_PORT, 0);
+
+ /* Quietly try to allocate 2 VLAN tags */
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_VSWITCH_ALLOC, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+
+ /* If 2 VLAN tags is too many, revert to trying with 1 VLAN tags */
+ if (rc == -EPROTO) {
+ MCDI_SET_DWORD(inbuf, VSWITCH_ALLOC_IN_NUM_VLAN_TAGS, 1);
+ rc = efx_mcdi_rpc(efx, MC_CMD_VSWITCH_ALLOC, inbuf,
+ sizeof(inbuf), NULL, 0, NULL);
+ } else if (rc) {
+ efx_mcdi_display_error(efx, MC_CMD_VSWITCH_ALLOC,
+ MC_CMD_VSWITCH_ALLOC_IN_LEN,
+ NULL, 0, rc);
+ }
+ return rc;
+}
+
+static int efx_ef10_vswitch_free(struct efx_nic *efx, unsigned int port_id)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_VSWITCH_FREE_IN_LEN);
+
+ MCDI_SET_DWORD(inbuf, VSWITCH_FREE_IN_UPSTREAM_PORT_ID, port_id);
+
+ return efx_mcdi_rpc(efx, MC_CMD_VSWITCH_FREE, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+}
+
+static int efx_ef10_vport_alloc(struct efx_nic *efx,
+ unsigned int port_id_in,
+ unsigned int vport_type,
+ u16 vlan,
+ unsigned int *port_id_out)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_ALLOC_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_VPORT_ALLOC_OUT_LEN);
+ size_t outlen;
+ int rc;
+
+ EFX_WARN_ON_PARANOID(!port_id_out);
+
+ MCDI_SET_DWORD(inbuf, VPORT_ALLOC_IN_UPSTREAM_PORT_ID, port_id_in);
+ MCDI_SET_DWORD(inbuf, VPORT_ALLOC_IN_TYPE, vport_type);
+ MCDI_SET_DWORD(inbuf, VPORT_ALLOC_IN_NUM_VLAN_TAGS,
+ (vlan != EFX_EF10_NO_VLAN));
+ MCDI_POPULATE_DWORD_1(inbuf, VPORT_ALLOC_IN_FLAGS,
+ VPORT_ALLOC_IN_FLAG_AUTO_PORT, 0);
+ if (vlan != EFX_EF10_NO_VLAN)
+ MCDI_POPULATE_DWORD_1(inbuf, VPORT_ALLOC_IN_VLAN_TAGS,
+ VPORT_ALLOC_IN_VLAN_TAG_0, vlan);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_VPORT_ALLOC, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < MC_CMD_VPORT_ALLOC_OUT_LEN)
+ return -EIO;
+
+ *port_id_out = MCDI_DWORD(outbuf, VPORT_ALLOC_OUT_VPORT_ID);
+ return 0;
+}
+
+static int efx_ef10_vport_free(struct efx_nic *efx, unsigned int port_id)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_FREE_IN_LEN);
+
+ MCDI_SET_DWORD(inbuf, VPORT_FREE_IN_VPORT_ID, port_id);
+
+ return efx_mcdi_rpc(efx, MC_CMD_VPORT_FREE, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+}
+
+static int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_ALLOC_IN_LEN);
+
+ MCDI_SET_DWORD(inbuf, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id);
+ return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_ALLOC, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+}
+
+static int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_FREE_IN_LEN);
+
+ MCDI_SET_DWORD(inbuf, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id);
+ return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_FREE, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+}
+
+static void efx_ef10_sriov_free_vf_vports(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ int i;
+
+ if (!nic_data->vf)
+ return;
+
+ for (i = 0; i < efx->vf_count; i++) {
+ struct ef10_vf *vf = nic_data->vf + i;
+
+ /* If VF is assigned, do not free the vport */
+ if (vf->pci_dev &&
+ vf->pci_dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
+ continue;
+
+ if (vf->vport_assigned) {
+ efx_ef10_evb_port_assign(efx, EVB_PORT_ID_NULL, i);
+ vf->vport_assigned = 0;
+ }
+
+ if (!is_zero_ether_addr(vf->mac)) {
+ efx_ef10_vport_del_mac(efx, vf->vport_id, vf->mac);
+ eth_zero_addr(vf->mac);
+ }
+
+ if (vf->vport_id) {
+ efx_ef10_vport_free(efx, vf->vport_id);
+ vf->vport_id = 0;
+ }
+
+ vf->efx = NULL;
+ }
+}
+
+static void efx_ef10_sriov_free_vf_vswitching(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ efx_ef10_sriov_free_vf_vports(efx);
+ kfree(nic_data->vf);
+ nic_data->vf = NULL;
+}
+
+static int efx_ef10_sriov_assign_vf_vport(struct efx_nic *efx,
+ unsigned int vf_i)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct ef10_vf *vf = nic_data->vf + vf_i;
+ int rc;
+
+ if (WARN_ON_ONCE(!nic_data->vf))
+ return -EOPNOTSUPP;
+
+ rc = efx_ef10_vport_alloc(efx, EVB_PORT_ID_ASSIGNED,
+ MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL,
+ vf->vlan, &vf->vport_id);
+ if (rc)
+ return rc;
+
+ rc = efx_ef10_vport_add_mac(efx, vf->vport_id, vf->mac);
+ if (rc) {
+ eth_zero_addr(vf->mac);
+ return rc;
+ }
+
+ rc = efx_ef10_evb_port_assign(efx, vf->vport_id, vf_i);
+ if (rc)
+ return rc;
+
+ vf->vport_assigned = 1;
+ return 0;
+}
+
+static int efx_ef10_sriov_alloc_vf_vswitching(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ unsigned int i;
+ int rc;
+
+ nic_data->vf = kcalloc(efx->vf_count, sizeof(struct ef10_vf),
+ GFP_KERNEL);
+ if (!nic_data->vf)
+ return -ENOMEM;
+
+ for (i = 0; i < efx->vf_count; i++) {
+ random_ether_addr(nic_data->vf[i].mac);
+ nic_data->vf[i].efx = NULL;
+ nic_data->vf[i].vlan = EFX_EF10_NO_VLAN;
+
+ rc = efx_ef10_sriov_assign_vf_vport(efx, i);
+ if (rc)
+ goto fail;
+ }
+
+ return 0;
+fail:
+ efx_ef10_sriov_free_vf_vports(efx);
+ kfree(nic_data->vf);
+ nic_data->vf = NULL;
+ return rc;
+}
+
+static int efx_ef10_sriov_restore_vf_vswitching(struct efx_nic *efx)
+{
+ unsigned int i;
+ int rc;
+
+ for (i = 0; i < efx->vf_count; i++) {
+ rc = efx_ef10_sriov_assign_vf_vport(efx, i);
+ if (rc)
+ goto fail;
+ }
+
+ return 0;
+fail:
+ efx_ef10_sriov_free_vf_vswitching(efx);
+ return rc;
+}
+
+/* On top of the default firmware vswitch setup, create a VEB vswitch and
+ * expansion vport for use by this function.
+ */
+int efx_ef10_vswitching_probe_pf(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct net_device *net_dev = efx->net_dev;
+ int rc;
+
+ if (pci_sriov_get_totalvfs(efx->pci_dev) <= 0) {
+ /* vswitch not needed as we have no VFs */
+ efx_ef10_vadaptor_alloc(efx, nic_data->vport_id);
+ return 0;
+ }
+
+ rc = efx_ef10_vswitch_alloc(efx, EVB_PORT_ID_ASSIGNED,
+ MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEB);
+ if (rc)
+ goto fail1;
+
+ rc = efx_ef10_vport_alloc(efx, EVB_PORT_ID_ASSIGNED,
+ MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL,
+ EFX_EF10_NO_VLAN, &nic_data->vport_id);
+ if (rc)
+ goto fail2;
+
+ rc = efx_ef10_vport_add_mac(efx, nic_data->vport_id, net_dev->dev_addr);
+ if (rc)
+ goto fail3;
+ ether_addr_copy(nic_data->vport_mac, net_dev->dev_addr);
+
+ rc = efx_ef10_vadaptor_alloc(efx, nic_data->vport_id);
+ if (rc)
+ goto fail4;
+
+ return 0;
+fail4:
+ efx_ef10_vport_del_mac(efx, nic_data->vport_id, nic_data->vport_mac);
+ eth_zero_addr(nic_data->vport_mac);
+fail3:
+ efx_ef10_vport_free(efx, nic_data->vport_id);
+ nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
+fail2:
+ efx_ef10_vswitch_free(efx, EVB_PORT_ID_ASSIGNED);
+fail1:
+ return rc;
+}
+
+int efx_ef10_vswitching_probe_vf(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ return efx_ef10_vadaptor_alloc(efx, nic_data->vport_id);
+}
+
+int efx_ef10_vswitching_restore_pf(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ if (!nic_data->must_probe_vswitching)
+ return 0;
+
+ rc = efx_ef10_vswitching_probe_pf(efx);
+ if (rc)
+ goto fail;
+
+ rc = efx_ef10_sriov_restore_vf_vswitching(efx);
+ if (rc)
+ goto fail;
+
+ nic_data->must_probe_vswitching = false;
+fail:
+ return rc;
+}
+
+int efx_ef10_vswitching_restore_vf(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ if (!nic_data->must_probe_vswitching)
+ return 0;
+
+ rc = efx_ef10_vadaptor_free(efx, EVB_PORT_ID_ASSIGNED);
+ if (rc)
+ return rc;
+
+ nic_data->must_probe_vswitching = false;
+ return 0;
+}
+
+void efx_ef10_vswitching_remove_pf(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ efx_ef10_sriov_free_vf_vswitching(efx);
+
+ efx_ef10_vadaptor_free(efx, nic_data->vport_id);
+
+ if (nic_data->vport_id == EVB_PORT_ID_ASSIGNED)
+ return; /* No vswitch was ever created */
+
+ if (!is_zero_ether_addr(nic_data->vport_mac)) {
+ efx_ef10_vport_del_mac(efx, nic_data->vport_id,
+ efx->net_dev->dev_addr);
+ eth_zero_addr(nic_data->vport_mac);
+ }
+ efx_ef10_vport_free(efx, nic_data->vport_id);
+ nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
+
+ /* Only free the vswitch if no VFs are assigned */
+ if (!pci_vfs_assigned(efx->pci_dev))
+ efx_ef10_vswitch_free(efx, nic_data->vport_id);
+}
+
+void efx_ef10_vswitching_remove_vf(struct efx_nic *efx)
+{
+ efx_ef10_vadaptor_free(efx, EVB_PORT_ID_ASSIGNED);
+}
+
+static int efx_ef10_pci_sriov_enable(struct efx_nic *efx, int num_vfs)
+{
+ int rc = 0;
+ struct pci_dev *dev = efx->pci_dev;
+
+ efx->vf_count = num_vfs;
+
+ rc = efx_ef10_sriov_alloc_vf_vswitching(efx);
+ if (rc)
+ goto fail1;
+
+ rc = pci_enable_sriov(dev, num_vfs);
+ if (rc)
+ goto fail2;
+
+ return 0;
+fail2:
+ efx_ef10_sriov_free_vf_vswitching(efx);
+fail1:
+ efx->vf_count = 0;
+ netif_err(efx, probe, efx->net_dev,
+ "Failed to enable SRIOV VFs\n");
+ return rc;
+}
+
+static int efx_ef10_pci_sriov_disable(struct efx_nic *efx, bool force)
+{
+ struct pci_dev *dev = efx->pci_dev;
+ unsigned int vfs_assigned = 0;
+
+ vfs_assigned = pci_vfs_assigned(dev);
+
+ if (vfs_assigned && !force) {
+ netif_info(efx, drv, efx->net_dev, "VFs are assigned to guests; "
+ "please detach them before disabling SR-IOV\n");
+ return -EBUSY;
+ }
+
+ if (!vfs_assigned)
+ pci_disable_sriov(dev);
+
+ efx_ef10_sriov_free_vf_vswitching(efx);
+ efx->vf_count = 0;
+ return 0;
+}
+
+int efx_ef10_sriov_configure(struct efx_nic *efx, int num_vfs)
+{
+ if (num_vfs == 0)
+ return efx_ef10_pci_sriov_disable(efx, false);
+ else
+ return efx_ef10_pci_sriov_enable(efx, num_vfs);
+}
+
+int efx_ef10_sriov_init(struct efx_nic *efx)
+{
+ return 0;
+}
+
+void efx_ef10_sriov_fini(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ unsigned int i;
+ int rc;
+
+ if (!nic_data->vf) {
+ /* Remove any un-assigned orphaned VFs */
+ if (pci_num_vf(efx->pci_dev) && !pci_vfs_assigned(efx->pci_dev))
+ pci_disable_sriov(efx->pci_dev);
+ return;
+ }
+
+ /* Remove any VFs in the host */
+ for (i = 0; i < efx->vf_count; ++i) {
+ struct efx_nic *vf_efx = nic_data->vf[i].efx;
+
+ if (vf_efx)
+ vf_efx->pci_dev->driver->remove(vf_efx->pci_dev);
+ }
+
+ rc = efx_ef10_pci_sriov_disable(efx, true);
+ if (rc)
+ netif_dbg(efx, drv, efx->net_dev,
+ "Disabling SRIOV was not successful rc=%d\n", rc);
+ else
+ netif_dbg(efx, drv, efx->net_dev, "SRIOV disabled\n");
+}
+
+static int efx_ef10_vport_del_vf_mac(struct efx_nic *efx, unsigned int port_id,
+ u8 *mac)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN);
+ MCDI_DECLARE_BUF_ERR(outbuf);
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID, port_id);
+ ether_addr_copy(MCDI_PTR(inbuf, VPORT_DEL_MAC_ADDRESS_IN_MACADDR), mac);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_VPORT_DEL_MAC_ADDRESS, inbuf,
+ sizeof(inbuf), outbuf, sizeof(outbuf), &outlen);
+
+ return rc;
+}
+
+int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, u8 *mac)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct ef10_vf *vf;
+ int rc;
+
+ if (!nic_data->vf)
+ return -EOPNOTSUPP;
+
+ if (vf_i >= efx->vf_count)
+ return -EINVAL;
+ vf = nic_data->vf + vf_i;
+
+ if (vf->efx) {
+ efx_device_detach_sync(vf->efx);
+ efx_net_stop(vf->efx->net_dev);
+
+ down_write(&vf->efx->filter_sem);
+ vf->efx->type->filter_table_remove(vf->efx);
+
+ rc = efx_ef10_vadaptor_free(vf->efx, EVB_PORT_ID_ASSIGNED);
+ if (rc) {
+ up_write(&vf->efx->filter_sem);
+ return rc;
+ }
+ }
+
+ rc = efx_ef10_evb_port_assign(efx, EVB_PORT_ID_NULL, vf_i);
+ if (rc)
+ return rc;
+
+ if (!is_zero_ether_addr(vf->mac)) {
+ rc = efx_ef10_vport_del_vf_mac(efx, vf->vport_id, vf->mac);
+ if (rc)
+ return rc;
+ }
+
+ if (!is_zero_ether_addr(mac)) {
+ rc = efx_ef10_vport_add_mac(efx, vf->vport_id, mac);
+ if (rc) {
+ eth_zero_addr(vf->mac);
+ goto fail;
+ }
+ if (vf->efx)
+ ether_addr_copy(vf->efx->net_dev->dev_addr, mac);
+ }
+
+ ether_addr_copy(vf->mac, mac);
+
+ rc = efx_ef10_evb_port_assign(efx, vf->vport_id, vf_i);
+ if (rc)
+ goto fail;
+
+ if (vf->efx) {
+ /* VF cannot use the vport_id that the PF created */
+ rc = efx_ef10_vadaptor_alloc(vf->efx, EVB_PORT_ID_ASSIGNED);
+ if (rc) {
+ up_write(&vf->efx->filter_sem);
+ return rc;
+ }
+ vf->efx->type->filter_table_probe(vf->efx);
+ up_write(&vf->efx->filter_sem);
+ efx_net_open(vf->efx->net_dev);
+ netif_device_attach(vf->efx->net_dev);
+ }
+
+ return 0;
+
+fail:
+ memset(vf->mac, 0, ETH_ALEN);
+ return rc;
+}
+
+int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i, u16 vlan,
+ u8 qos)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct ef10_vf *vf;
+ u16 old_vlan, new_vlan;
+ int rc = 0, rc2 = 0;
+
+ if (vf_i >= efx->vf_count)
+ return -EINVAL;
+ if (qos != 0)
+ return -EINVAL;
+
+ vf = nic_data->vf + vf_i;
+
+ new_vlan = (vlan == 0) ? EFX_EF10_NO_VLAN : vlan;
+ if (new_vlan == vf->vlan)
+ return 0;
+
+ if (vf->efx) {
+ efx_device_detach_sync(vf->efx);
+ efx_net_stop(vf->efx->net_dev);
+
+ down_write(&vf->efx->filter_sem);
+ vf->efx->type->filter_table_remove(vf->efx);
+
+ rc = efx_ef10_vadaptor_free(vf->efx, EVB_PORT_ID_ASSIGNED);
+ if (rc)
+ goto restore_filters;
+ }
+
+ if (vf->vport_assigned) {
+ rc = efx_ef10_evb_port_assign(efx, EVB_PORT_ID_NULL, vf_i);
+ if (rc) {
+ netif_warn(efx, drv, efx->net_dev,
+ "Failed to change vlan on VF %d.\n", vf_i);
+ netif_warn(efx, drv, efx->net_dev,
+ "This is likely because the VF is bound to a driver in a VM.\n");
+ netif_warn(efx, drv, efx->net_dev,
+ "Please unload the driver in the VM.\n");
+ goto restore_vadaptor;
+ }
+ vf->vport_assigned = 0;
+ }
+
+ if (!is_zero_ether_addr(vf->mac)) {
+ rc = efx_ef10_vport_del_mac(efx, vf->vport_id, vf->mac);
+ if (rc)
+ goto restore_evb_port;
+ }
+
+ if (vf->vport_id) {
+ rc = efx_ef10_vport_free(efx, vf->vport_id);
+ if (rc)
+ goto restore_mac;
+ vf->vport_id = 0;
+ }
+
+ /* Do the actual vlan change */
+ old_vlan = vf->vlan;
+ vf->vlan = new_vlan;
+
+ /* Restore everything in reverse order */
+ rc = efx_ef10_vport_alloc(efx, EVB_PORT_ID_ASSIGNED,
+ MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL,
+ vf->vlan, &vf->vport_id);
+ if (rc)
+ goto reset_nic;
+
+restore_mac:
+ if (!is_zero_ether_addr(vf->mac)) {
+ rc2 = efx_ef10_vport_add_mac(efx, vf->vport_id, vf->mac);
+ if (rc2) {
+ eth_zero_addr(vf->mac);
+ goto reset_nic;
+ }
+ }
+
+restore_evb_port:
+ rc2 = efx_ef10_evb_port_assign(efx, vf->vport_id, vf_i);
+ if (rc2)
+ goto reset_nic;
+ else
+ vf->vport_assigned = 1;
+
+restore_vadaptor:
+ if (vf->efx) {
+ rc2 = efx_ef10_vadaptor_alloc(vf->efx, EVB_PORT_ID_ASSIGNED);
+ if (rc2)
+ goto reset_nic;
+ }
+
+restore_filters:
+ if (vf->efx) {
+ rc2 = vf->efx->type->filter_table_probe(vf->efx);
+ if (rc2)
+ goto reset_nic;
+
+ up_write(&vf->efx->filter_sem);
+
+ rc2 = efx_net_open(vf->efx->net_dev);
+ if (rc2)
+ goto reset_nic;
+
+ netif_device_attach(vf->efx->net_dev);
+ }
+ return rc;
+
+reset_nic:
+ if (vf->efx) {
+ up_write(&vf->efx->filter_sem);
+ netif_err(efx, drv, efx->net_dev,
+ "Failed to restore VF - scheduling reset.\n");
+ efx_schedule_reset(vf->efx, RESET_TYPE_DATAPATH);
+ } else {
+ netif_err(efx, drv, efx->net_dev,
+ "Failed to restore the VF and cannot reset the VF "
+ "- VF is not functional.\n");
+ netif_err(efx, drv, efx->net_dev,
+ "Please reload the driver attached to the VF.\n");
+ }
+
+ return rc ? rc : rc2;
+}
+
+int efx_ef10_sriov_set_vf_spoofchk(struct efx_nic *efx, int vf_i,
+ bool spoofchk)
+{
+ return spoofchk ? -EOPNOTSUPP : 0;
+}
+
+int efx_ef10_sriov_set_vf_link_state(struct efx_nic *efx, int vf_i,
+ int link_state)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_LINK_STATE_MODE_IN_LEN);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ BUILD_BUG_ON(IFLA_VF_LINK_STATE_AUTO !=
+ MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_AUTO);
+ BUILD_BUG_ON(IFLA_VF_LINK_STATE_ENABLE !=
+ MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_UP);
+ BUILD_BUG_ON(IFLA_VF_LINK_STATE_DISABLE !=
+ MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_DOWN);
+ MCDI_POPULATE_DWORD_2(inbuf, LINK_STATE_MODE_IN_FUNCTION,
+ LINK_STATE_MODE_IN_FUNCTION_PF,
+ nic_data->pf_index,
+ LINK_STATE_MODE_IN_FUNCTION_VF, vf_i);
+ MCDI_SET_DWORD(inbuf, LINK_STATE_MODE_IN_NEW_MODE, link_state);
+ return efx_mcdi_rpc(efx, MC_CMD_LINK_STATE_MODE, inbuf, sizeof(inbuf),
+ NULL, 0, NULL); /* don't care what old mode was */
+}
+
+int efx_ef10_sriov_get_vf_config(struct efx_nic *efx, int vf_i,
+ struct ifla_vf_info *ivf)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_LINK_STATE_MODE_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_LINK_STATE_MODE_OUT_LEN);
+
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct ef10_vf *vf;
+ size_t outlen;
+ int rc;
+
+ if (vf_i >= efx->vf_count)
+ return -EINVAL;
+
+ if (!nic_data->vf)
+ return -EOPNOTSUPP;
+
+ vf = nic_data->vf + vf_i;
+
+ ivf->vf = vf_i;
+ ivf->min_tx_rate = 0;
+ ivf->max_tx_rate = 0;
+ ether_addr_copy(ivf->mac, vf->mac);
+ ivf->vlan = (vf->vlan == EFX_EF10_NO_VLAN) ? 0 : vf->vlan;
+ ivf->qos = 0;
+
+ MCDI_POPULATE_DWORD_2(inbuf, LINK_STATE_MODE_IN_FUNCTION,
+ LINK_STATE_MODE_IN_FUNCTION_PF,
+ nic_data->pf_index,
+ LINK_STATE_MODE_IN_FUNCTION_VF, vf_i);
+ MCDI_SET_DWORD(inbuf, LINK_STATE_MODE_IN_NEW_MODE,
+ MC_CMD_LINK_STATE_MODE_IN_DO_NOT_CHANGE);
+ rc = efx_mcdi_rpc(efx, MC_CMD_LINK_STATE_MODE, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < MC_CMD_LINK_STATE_MODE_OUT_LEN)
+ return -EIO;
+ ivf->linkstate = MCDI_DWORD(outbuf, LINK_STATE_MODE_OUT_OLD_MODE);
+
+ return 0;
+}
+
+int efx_ef10_sriov_get_phys_port_id(struct efx_nic *efx,
+ struct netdev_phys_item_id *ppid)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ if (!is_valid_ether_addr(nic_data->port_id))
+ return -EOPNOTSUPP;
+
+ ppid->id_len = ETH_ALEN;
+ memcpy(ppid->id, nic_data->port_id, ppid->id_len);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.h b/drivers/net/ethernet/sfc/ef10_sriov.h
new file mode 100644
index 000000000000..db4ef537c610
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ef10_sriov.h
@@ -0,0 +1,69 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2015 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EF10_SRIOV_H
+#define EF10_SRIOV_H
+
+#include "net_driver.h"
+
+/**
+ * struct ef10_vf - PF's store of VF data
+ * @efx: efx_nic struct for the current VF
+ * @pci_dev: the pci_dev struct for the VF, retained while the VF is assigned
+ * @vport_id: vport ID for the VF
+ * @vport_assigned: record whether the vport is currently assigned to the VF
+ * @mac: MAC address for the VF, zero when address is removed from the vport
+ * @vlan: Default VLAN for the VF or #EFX_EF10_NO_VLAN
+ */
+struct ef10_vf {
+ struct efx_nic *efx;
+ struct pci_dev *pci_dev;
+ unsigned int vport_id;
+ unsigned int vport_assigned;
+ u8 mac[ETH_ALEN];
+ u16 vlan;
+#define EFX_EF10_NO_VLAN 0
+};
+
+static inline bool efx_ef10_sriov_wanted(struct efx_nic *efx)
+{
+ return false;
+}
+
+int efx_ef10_sriov_configure(struct efx_nic *efx, int num_vfs);
+int efx_ef10_sriov_init(struct efx_nic *efx);
+static inline void efx_ef10_sriov_reset(struct efx_nic *efx) {}
+void efx_ef10_sriov_fini(struct efx_nic *efx);
+static inline void efx_ef10_sriov_flr(struct efx_nic *efx, unsigned vf_i) {}
+
+int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf, u8 *mac);
+
+int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i,
+ u16 vlan, u8 qos);
+
+int efx_ef10_sriov_set_vf_spoofchk(struct efx_nic *efx, int vf,
+ bool spoofchk);
+
+int efx_ef10_sriov_get_vf_config(struct efx_nic *efx, int vf_i,
+ struct ifla_vf_info *ivf);
+
+int efx_ef10_sriov_set_vf_link_state(struct efx_nic *efx, int vf_i,
+ int link_state);
+
+int efx_ef10_sriov_get_phys_port_id(struct efx_nic *efx,
+ struct netdev_phys_item_id *ppid);
+
+int efx_ef10_vswitching_probe_pf(struct efx_nic *efx);
+int efx_ef10_vswitching_probe_vf(struct efx_nic *efx);
+int efx_ef10_vswitching_restore_pf(struct efx_nic *efx);
+int efx_ef10_vswitching_restore_vf(struct efx_nic *efx);
+void efx_ef10_vswitching_remove_pf(struct efx_nic *efx);
+void efx_ef10_vswitching_remove_vf(struct efx_nic *efx);
+
+#endif /* EF10_SRIOV_H */
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 65944dd8bf6b..804b9ad553d3 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -26,6 +26,7 @@
#include "efx.h"
#include "nic.h"
#include "selftest.h"
+#include "sriov.h"
#include "mcdi.h"
#include "workarounds.h"
@@ -76,6 +77,7 @@ const char *const efx_reset_type_names[] = {
[RESET_TYPE_RECOVER_OR_ALL] = "RECOVER_OR_ALL",
[RESET_TYPE_WORLD] = "WORLD",
[RESET_TYPE_RECOVER_OR_DISABLE] = "RECOVER_OR_DISABLE",
+ [RESET_TYPE_DATAPATH] = "DATAPATH",
[RESET_TYPE_MC_BIST] = "MC_BIST",
[RESET_TYPE_DISABLE] = "DISABLE",
[RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG",
@@ -948,6 +950,16 @@ void efx_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc)
static void efx_fini_port(struct efx_nic *efx);
+/* We assume that efx->type->reconfigure_mac will always try to sync RX
+ * filters and therefore needs to read-lock the filter table against freeing
+ */
+void efx_mac_reconfigure(struct efx_nic *efx)
+{
+ down_read(&efx->filter_sem);
+ efx->type->reconfigure_mac(efx);
+ up_read(&efx->filter_sem);
+}
+
/* Push loopback/power/transmit disable settings to the PHY, and reconfigure
* the MAC appropriately. All other PHY configuration changes are pushed
* through phy_op->set_settings(), and pushed asynchronously to the MAC
@@ -1001,7 +1013,7 @@ static void efx_mac_work(struct work_struct *data)
mutex_lock(&efx->mac_lock);
if (efx->port_enabled)
- efx->type->reconfigure_mac(efx);
+ efx_mac_reconfigure(efx);
mutex_unlock(&efx->mac_lock);
}
@@ -1041,11 +1053,11 @@ static int efx_init_port(struct efx_nic *efx)
/* Reconfigure the MAC before creating dma queues (required for
* Falcon/A1 where RX_INGR_EN/TX_DRAIN_EN isn't supported) */
- efx->type->reconfigure_mac(efx);
+ efx_mac_reconfigure(efx);
/* Ensure the PHY advertises the correct flow control settings */
rc = efx->phy_op->reconfigure(efx);
- if (rc)
+ if (rc && rc != -EPERM)
goto fail2;
mutex_unlock(&efx->mac_lock);
@@ -1067,7 +1079,7 @@ static void efx_start_port(struct efx_nic *efx)
efx->port_enabled = true;
/* Ensure MAC ingress/egress is enabled */
- efx->type->reconfigure_mac(efx);
+ efx_mac_reconfigure(efx);
mutex_unlock(&efx->mac_lock);
}
@@ -1200,10 +1212,12 @@ static int efx_init_io(struct efx_nic *efx)
struct pci_dev *pci_dev = efx->pci_dev;
dma_addr_t dma_mask = efx->type->max_dma_mask;
unsigned int mem_map_size = efx->type->mem_map_size(efx);
- int rc;
+ int rc, bar;
netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
+ bar = efx->type->mem_bar;
+
rc = pci_enable_device(pci_dev);
if (rc) {
netif_err(efx, probe, efx->net_dev,
@@ -1234,8 +1248,8 @@ static int efx_init_io(struct efx_nic *efx)
netif_dbg(efx, probe, efx->net_dev,
"using DMA mask %llx\n", (unsigned long long) dma_mask);
- efx->membase_phys = pci_resource_start(efx->pci_dev, EFX_MEM_BAR);
- rc = pci_request_region(pci_dev, EFX_MEM_BAR, "sfc");
+ efx->membase_phys = pci_resource_start(efx->pci_dev, bar);
+ rc = pci_request_region(pci_dev, bar, "sfc");
if (rc) {
netif_err(efx, probe, efx->net_dev,
"request for memory BAR failed\n");
@@ -1258,7 +1272,7 @@ static int efx_init_io(struct efx_nic *efx)
return 0;
fail4:
- pci_release_region(efx->pci_dev, EFX_MEM_BAR);
+ pci_release_region(efx->pci_dev, bar);
fail3:
efx->membase_phys = 0;
fail2:
@@ -1269,6 +1283,8 @@ static int efx_init_io(struct efx_nic *efx)
static void efx_fini_io(struct efx_nic *efx)
{
+ int bar;
+
netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n");
if (efx->membase) {
@@ -1277,11 +1293,23 @@ static void efx_fini_io(struct efx_nic *efx)
}
if (efx->membase_phys) {
- pci_release_region(efx->pci_dev, EFX_MEM_BAR);
+ bar = efx->type->mem_bar;
+ pci_release_region(efx->pci_dev, bar);
efx->membase_phys = 0;
}
- pci_disable_device(efx->pci_dev);
+ /* Don't disable bus-mastering if VFs are assigned */
+ if (!pci_vfs_assigned(efx->pci_dev))
+ pci_disable_device(efx->pci_dev);
+}
+
+void efx_set_default_rx_indir_table(struct efx_nic *efx)
+{
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
+ efx->rx_indir_table[i] =
+ ethtool_rxfh_indir_default(i, efx->rss_spread);
}
static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
@@ -1314,15 +1342,19 @@ static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
/* If RSS is requested for the PF *and* VFs then we can't write RSS
* table entries that are inaccessible to VFs
*/
- if (efx->type->sriov_wanted(efx) && efx_vf_size(efx) > 1 &&
- count > efx_vf_size(efx)) {
- netif_warn(efx, probe, efx->net_dev,
- "Reducing number of RSS channels from %u to %u for "
- "VF support. Increase vf-msix-limit to use more "
- "channels on the PF.\n",
- count, efx_vf_size(efx));
- count = efx_vf_size(efx);
+#ifdef CONFIG_SFC_SRIOV
+ if (efx->type->sriov_wanted) {
+ if (efx->type->sriov_wanted(efx) && efx_vf_size(efx) > 1 &&
+ count > efx_vf_size(efx)) {
+ netif_warn(efx, probe, efx->net_dev,
+ "Reducing number of RSS channels from %u to %u for "
+ "VF support. Increase vf-msix-limit to use more "
+ "channels on the PF.\n",
+ count, efx_vf_size(efx));
+ count = efx_vf_size(efx);
+ }
}
+#endif
return count;
}
@@ -1426,10 +1458,15 @@ static int efx_probe_interrupts(struct efx_nic *efx)
}
/* RSS might be usable on VFs even if it is disabled on the PF */
-
- efx->rss_spread = ((efx->n_rx_channels > 1 ||
- !efx->type->sriov_wanted(efx)) ?
- efx->n_rx_channels : efx_vf_size(efx));
+#ifdef CONFIG_SFC_SRIOV
+ if (efx->type->sriov_wanted) {
+ efx->rss_spread = ((efx->n_rx_channels > 1 ||
+ !efx->type->sriov_wanted(efx)) ?
+ efx->n_rx_channels : efx_vf_size(efx));
+ return 0;
+ }
+#endif
+ efx->rss_spread = efx->n_rx_channels;
return 0;
}
@@ -1593,7 +1630,6 @@ static void efx_set_channels(struct efx_nic *efx)
static int efx_probe_nic(struct efx_nic *efx)
{
- size_t i;
int rc;
netif_dbg(efx, probe, efx->net_dev, "creating NIC\n");
@@ -1616,10 +1652,9 @@ static int efx_probe_nic(struct efx_nic *efx)
goto fail2;
if (efx->n_channels > 1)
- netdev_rss_key_fill(&efx->rx_hash_key, sizeof(efx->rx_hash_key));
- for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
- efx->rx_indir_table[i] =
- ethtool_rxfh_indir_default(i, efx->rss_spread);
+ netdev_rss_key_fill(&efx->rx_hash_key,
+ sizeof(efx->rx_hash_key));
+ efx_set_default_rx_indir_table(efx);
netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
@@ -1650,10 +1685,11 @@ static int efx_probe_filters(struct efx_nic *efx)
int rc;
spin_lock_init(&efx->filter_lock);
-
+ init_rwsem(&efx->filter_sem);
+ down_write(&efx->filter_sem);
rc = efx->type->filter_table_probe(efx);
if (rc)
- return rc;
+ goto out_unlock;
#ifdef CONFIG_RFS_ACCEL
if (efx->type->offload_features & NETIF_F_NTUPLE) {
@@ -1662,12 +1698,14 @@ static int efx_probe_filters(struct efx_nic *efx)
GFP_KERNEL);
if (!efx->rps_flow_id) {
efx->type->filter_table_remove(efx);
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto out_unlock;
}
}
#endif
-
- return 0;
+out_unlock:
+ up_write(&efx->filter_sem);
+ return rc;
}
static void efx_remove_filters(struct efx_nic *efx)
@@ -1675,12 +1713,16 @@ static void efx_remove_filters(struct efx_nic *efx)
#ifdef CONFIG_RFS_ACCEL
kfree(efx->rps_flow_id);
#endif
+ down_write(&efx->filter_sem);
efx->type->filter_table_remove(efx);
+ up_write(&efx->filter_sem);
}
static void efx_restore_filters(struct efx_nic *efx)
{
+ down_read(&efx->filter_sem);
efx->type->filter_table_restore(efx);
+ up_read(&efx->filter_sem);
}
/**************************************************************************
@@ -1712,21 +1754,33 @@ static int efx_probe_all(struct efx_nic *efx)
}
efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE;
+#ifdef CONFIG_SFC_SRIOV
+ rc = efx->type->vswitching_probe(efx);
+ if (rc) /* not fatal; the PF will still work fine */
+ netif_warn(efx, probe, efx->net_dev,
+ "failed to setup vswitching rc=%d;"
+ " VFs may not function\n", rc);
+#endif
+
rc = efx_probe_filters(efx);
if (rc) {
netif_err(efx, probe, efx->net_dev,
"failed to create filter tables\n");
- goto fail3;
+ goto fail4;
}
rc = efx_probe_channels(efx);
if (rc)
- goto fail4;
+ goto fail5;
return 0;
- fail4:
+ fail5:
efx_remove_filters(efx);
+ fail4:
+#ifdef CONFIG_SFC_SRIOV
+ efx->type->vswitching_remove(efx);
+#endif
fail3:
efx_remove_port(efx);
fail2:
@@ -1816,6 +1870,9 @@ static void efx_remove_all(struct efx_nic *efx)
{
efx_remove_channels(efx);
efx_remove_filters(efx);
+#ifdef CONFIG_SFC_SRIOV
+ efx->type->vswitching_remove(efx);
+#endif
efx_remove_port(efx);
efx_remove_nic(efx);
}
@@ -2059,7 +2116,7 @@ static int efx_busy_poll(struct napi_struct *napi)
*************************************************************************/
/* Context: process, rtnl_lock() held. */
-static int efx_net_open(struct net_device *net_dev)
+int efx_net_open(struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
int rc;
@@ -2088,7 +2145,7 @@ static int efx_net_open(struct net_device *net_dev)
* Note that the kernel will ignore our return code; this method
* should really be a void.
*/
-static int efx_net_stop(struct net_device *net_dev)
+int efx_net_stop(struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
@@ -2146,7 +2203,7 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
mutex_lock(&efx->mac_lock);
net_dev->mtu = new_mtu;
- efx->type->reconfigure_mac(efx);
+ efx_mac_reconfigure(efx);
mutex_unlock(&efx->mac_lock);
efx_start_all(efx);
@@ -2159,6 +2216,8 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
struct efx_nic *efx = netdev_priv(net_dev);
struct sockaddr *addr = data;
u8 *new_addr = addr->sa_data;
+ u8 old_addr[6];
+ int rc;
if (!is_valid_ether_addr(new_addr)) {
netif_err(efx, drv, efx->net_dev,
@@ -2167,12 +2226,20 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
return -EADDRNOTAVAIL;
}
+ /* save old address */
+ ether_addr_copy(old_addr, net_dev->dev_addr);
ether_addr_copy(net_dev->dev_addr, new_addr);
- efx->type->sriov_mac_address_changed(efx);
+ if (efx->type->set_mac_address) {
+ rc = efx->type->set_mac_address(efx);
+ if (rc) {
+ ether_addr_copy(net_dev->dev_addr, old_addr);
+ return rc;
+ }
+ }
/* Reconfigure the MAC */
mutex_lock(&efx->mac_lock);
- efx->type->reconfigure_mac(efx);
+ efx_mac_reconfigure(efx);
mutex_unlock(&efx->mac_lock);
return 0;
@@ -2199,7 +2266,7 @@ static int efx_set_features(struct net_device *net_dev, netdev_features_t data)
return 0;
}
-static const struct net_device_ops efx_farch_netdev_ops = {
+static const struct net_device_ops efx_netdev_ops = {
.ndo_open = efx_net_open,
.ndo_stop = efx_net_stop,
.ndo_get_stats64 = efx_net_stats,
@@ -2212,10 +2279,12 @@ static const struct net_device_ops efx_farch_netdev_ops = {
.ndo_set_rx_mode = efx_set_rx_mode,
.ndo_set_features = efx_set_features,
#ifdef CONFIG_SFC_SRIOV
- .ndo_set_vf_mac = efx_siena_sriov_set_vf_mac,
- .ndo_set_vf_vlan = efx_siena_sriov_set_vf_vlan,
- .ndo_set_vf_spoofchk = efx_siena_sriov_set_vf_spoofchk,
- .ndo_get_vf_config = efx_siena_sriov_get_vf_config,
+ .ndo_set_vf_mac = efx_sriov_set_vf_mac,
+ .ndo_set_vf_vlan = efx_sriov_set_vf_vlan,
+ .ndo_set_vf_spoofchk = efx_sriov_set_vf_spoofchk,
+ .ndo_get_vf_config = efx_sriov_get_vf_config,
+ .ndo_set_vf_link_state = efx_sriov_set_vf_link_state,
+ .ndo_get_phys_port_id = efx_sriov_get_phys_port_id,
#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = efx_netpoll,
@@ -2229,29 +2298,6 @@ static const struct net_device_ops efx_farch_netdev_ops = {
#endif
};
-static const struct net_device_ops efx_ef10_netdev_ops = {
- .ndo_open = efx_net_open,
- .ndo_stop = efx_net_stop,
- .ndo_get_stats64 = efx_net_stats,
- .ndo_tx_timeout = efx_watchdog,
- .ndo_start_xmit = efx_hard_start_xmit,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = efx_ioctl,
- .ndo_change_mtu = efx_change_mtu,
- .ndo_set_mac_address = efx_set_mac_address,
- .ndo_set_rx_mode = efx_set_rx_mode,
- .ndo_set_features = efx_set_features,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = efx_netpoll,
-#endif
-#ifdef CONFIG_NET_RX_BUSY_POLL
- .ndo_busy_poll = efx_busy_poll,
-#endif
-#ifdef CONFIG_RFS_ACCEL
- .ndo_rx_flow_steer = efx_filter_rfs,
-#endif
-};
-
static void efx_update_name(struct efx_nic *efx)
{
strcpy(efx->name, efx->net_dev->name);
@@ -2264,8 +2310,7 @@ static int efx_netdev_event(struct notifier_block *this,
{
struct net_device *net_dev = netdev_notifier_info_to_dev(ptr);
- if ((net_dev->netdev_ops == &efx_farch_netdev_ops ||
- net_dev->netdev_ops == &efx_ef10_netdev_ops) &&
+ if ((net_dev->netdev_ops == &efx_netdev_ops) &&
event == NETDEV_CHANGENAME)
efx_update_name(netdev_priv(net_dev));
@@ -2284,6 +2329,28 @@ show_phy_type(struct device *dev, struct device_attribute *attr, char *buf)
}
static DEVICE_ATTR(phy_type, 0444, show_phy_type, NULL);
+#ifdef CONFIG_SFC_MCDI_LOGGING
+static ssize_t show_mcdi_log(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", mcdi->logging_enabled);
+}
+static ssize_t set_mcdi_log(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+ bool enable = count > 0 && *buf != '0';
+
+ mcdi->logging_enabled = enable;
+ return count;
+}
+static DEVICE_ATTR(mcdi_logging, 0644, show_mcdi_log, set_mcdi_log);
+#endif
+
static int efx_register_netdev(struct efx_nic *efx)
{
struct net_device *net_dev = efx->net_dev;
@@ -2292,12 +2359,9 @@ static int efx_register_netdev(struct efx_nic *efx)
net_dev->watchdog_timeo = 5 * HZ;
net_dev->irq = efx->pci_dev->irq;
- if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) {
- net_dev->netdev_ops = &efx_ef10_netdev_ops;
+ net_dev->netdev_ops = &efx_netdev_ops;
+ if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
net_dev->priv_flags |= IFF_UNICAST_FLT;
- } else {
- net_dev->netdev_ops = &efx_farch_netdev_ops;
- }
net_dev->ethtool_ops = &efx_ethtool_ops;
net_dev->gso_max_segs = EFX_TSO_MAX_SEGS;
@@ -2344,9 +2408,21 @@ static int efx_register_netdev(struct efx_nic *efx)
"failed to init net dev attributes\n");
goto fail_registered;
}
+#ifdef CONFIG_SFC_MCDI_LOGGING
+ rc = device_create_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev,
+ "failed to init net dev attributes\n");
+ goto fail_attr_mcdi_logging;
+ }
+#endif
return 0;
+#ifdef CONFIG_SFC_MCDI_LOGGING
+fail_attr_mcdi_logging:
+ device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
+#endif
fail_registered:
rtnl_lock();
efx_dissociate(efx);
@@ -2365,13 +2441,14 @@ static void efx_unregister_netdev(struct efx_nic *efx)
BUG_ON(netdev_priv(efx->net_dev) != efx);
- strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
- device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
-
- rtnl_lock();
- unregister_netdevice(efx->net_dev);
- efx->state = STATE_UNINIT;
- rtnl_unlock();
+ if (efx_dev_registered(efx)) {
+ strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
+#ifdef CONFIG_SFC_MCDI_LOGGING
+ device_remove_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging);
+#endif
+ device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
+ unregister_netdev(efx->net_dev);
+ }
}
/**************************************************************************
@@ -2393,7 +2470,8 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method)
efx_disable_interrupts(efx);
mutex_lock(&efx->mac_lock);
- if (efx->port_initialized && method != RESET_TYPE_INVISIBLE)
+ if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
+ method != RESET_TYPE_DATAPATH)
efx->phy_op->fini(efx);
efx->type->fini(efx);
}
@@ -2422,11 +2500,13 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
if (!ok)
goto fail;
- if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) {
+ if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
+ method != RESET_TYPE_DATAPATH) {
rc = efx->phy_op->init(efx);
if (rc)
goto fail;
- if (efx->phy_op->reconfigure(efx))
+ rc = efx->phy_op->reconfigure(efx);
+ if (rc && rc != -EPERM)
netif_err(efx, drv, efx->net_dev,
"could not restore PHY settings\n");
}
@@ -2434,8 +2514,20 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
rc = efx_enable_interrupts(efx);
if (rc)
goto fail;
+
+#ifdef CONFIG_SFC_SRIOV
+ rc = efx->type->vswitching_restore(efx);
+ if (rc) /* not fatal; the PF will still work fine */
+ netif_warn(efx, probe, efx->net_dev,
+ "failed to restore vswitching rc=%d;"
+ " VFs may not function\n", rc);
+#endif
+
+ down_read(&efx->filter_sem);
efx_restore_filters(efx);
- efx->type->sriov_reset(efx);
+ up_read(&efx->filter_sem);
+ if (efx->type->sriov_reset)
+ efx->type->sriov_reset(efx);
mutex_unlock(&efx->mac_lock);
@@ -2605,6 +2697,7 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
case RESET_TYPE_WORLD:
case RESET_TYPE_DISABLE:
case RESET_TYPE_RECOVER_OR_DISABLE:
+ case RESET_TYPE_DATAPATH:
case RESET_TYPE_MC_BIST:
case RESET_TYPE_MCDI_TIMEOUT:
method = type;
@@ -2655,6 +2748,8 @@ static const struct pci_device_id efx_pci_table[] = {
.driver_data = (unsigned long) &siena_a0_nic_type},
{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0903), /* SFC9120 PF */
.driver_data = (unsigned long) &efx_hunt_a0_nic_type},
+ {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1903), /* SFC9120 VF */
+ .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type},
{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0923), /* SFC9140 PF */
.driver_data = (unsigned long) &efx_hunt_a0_nic_type},
{0} /* end of list */
@@ -2809,7 +2904,8 @@ static void efx_pci_remove_main(struct efx_nic *efx)
}
/* Final NIC shutdown
- * This is called only at module unload (or hotplug removal).
+ * This is called only at module unload (or hotplug removal). A PF can call
+ * this on its VFs to ensure they are unbound first.
*/
static void efx_pci_remove(struct pci_dev *pci_dev)
{
@@ -2824,9 +2920,12 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
efx_dissociate(efx);
dev_close(efx->net_dev);
efx_disable_interrupts(efx);
+ efx->state = STATE_UNINIT;
rtnl_unlock();
- efx->type->sriov_fini(efx);
+ if (efx->type->sriov_fini)
+ efx->type->sriov_fini(efx);
+
efx_unregister_netdev(efx);
efx_mtd_remove(efx);
@@ -3008,7 +3107,8 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
netif_info(efx, probe, efx->net_dev,
"Solarflare NIC detected\n");
- efx_probe_vpd_strings(efx);
+ if (!efx->type->is_vf)
+ efx_probe_vpd_strings(efx);
/* Set up basic I/O (BAR mappings etc) */
rc = efx_init_io(efx);
@@ -3023,10 +3123,12 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
if (rc)
goto fail4;
- rc = efx->type->sriov_init(efx);
- if (rc)
- netif_err(efx, probe, efx->net_dev,
- "SR-IOV can't be enabled rc %d\n", rc);
+ if (efx->type->sriov_init) {
+ rc = efx->type->sriov_init(efx);
+ if (rc)
+ netif_err(efx, probe, efx->net_dev,
+ "SR-IOV can't be enabled rc %d\n", rc);
+ }
netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n");
@@ -3058,6 +3160,26 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
return rc;
}
+/* efx_pci_sriov_configure returns the actual number of Virtual Functions
+ * enabled on success
+ */
+#ifdef CONFIG_SFC_SRIOV
+static int efx_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
+{
+ int rc;
+ struct efx_nic *efx = pci_get_drvdata(dev);
+
+ if (efx->type->sriov_configure) {
+ rc = efx->type->sriov_configure(efx, num_vfs);
+ if (rc)
+ return rc;
+ else
+ return num_vfs;
+ } else
+ return -EOPNOTSUPP;
+}
+#endif
+
static int efx_pm_freeze(struct device *dev)
{
struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
@@ -3280,6 +3402,9 @@ static struct pci_driver efx_pci_driver = {
.remove = efx_pci_remove,
.driver.pm = &efx_pm_ops,
.err_handler = &efx_err_handlers,
+#ifdef CONFIG_SFC_SRIOV
+ .sriov_configure = efx_pci_sriov_configure,
+#endif
};
/**************************************************************************
@@ -3302,9 +3427,11 @@ static int __init efx_init_module(void)
if (rc)
goto err_notifier;
+#ifdef CONFIG_SFC_SRIOV
rc = efx_init_sriov();
if (rc)
goto err_sriov;
+#endif
reset_workqueue = create_singlethread_workqueue("sfc_reset");
if (!reset_workqueue) {
@@ -3321,8 +3448,10 @@ static int __init efx_init_module(void)
err_pci:
destroy_workqueue(reset_workqueue);
err_reset:
+#ifdef CONFIG_SFC_SRIOV
efx_fini_sriov();
err_sriov:
+#endif
unregister_netdevice_notifier(&efx_netdev_notifier);
err_notifier:
return rc;
@@ -3334,7 +3463,9 @@ static void __exit efx_exit_module(void)
pci_unregister_driver(&efx_pci_driver);
destroy_workqueue(reset_workqueue);
+#ifdef CONFIG_SFC_SRIOV
efx_fini_sriov();
+#endif
unregister_netdevice_notifier(&efx_netdev_notifier);
}
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index 2587c582a821..acb1e0718485 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -15,7 +15,12 @@
#include "filter.h"
/* All controllers use BAR 0 for I/O space and BAR 2(&3) for memory */
+/* All VFs use BAR 0/1 for memory */
#define EFX_MEM_BAR 2
+#define EFX_MEM_VF_BAR 0
+
+int efx_net_open(struct net_device *net_dev);
+int efx_net_stop(struct net_device *net_dev);
/* TX */
int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
@@ -32,6 +37,7 @@ unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
extern unsigned int efx_piobuf_size;
/* RX */
+void efx_set_default_rx_indir_table(struct efx_nic *efx);
void efx_rx_config_page_split(struct efx_nic *efx);
int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
@@ -71,6 +77,8 @@ void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
/* Filters */
+void efx_mac_reconfigure(struct efx_nic *efx);
+
/**
* efx_filter_insert_filter - add or replace a filter
* @efx: NIC in which to insert the filter
@@ -220,6 +228,13 @@ static inline void efx_mtd_rename(struct efx_nic *efx) {}
static inline void efx_mtd_remove(struct efx_nic *efx) {}
#endif
+#ifdef CONFIG_SFC_SRIOV
+static inline unsigned int efx_vf_size(struct efx_nic *efx)
+{
+ return 1 << efx->vi_scale;
+}
+#endif
+
static inline void efx_schedule_channel(struct efx_channel *channel)
{
netif_vdbg(channel->efx, intr, channel->efx->net_dev,
diff --git a/drivers/net/ethernet/sfc/enum.h b/drivers/net/ethernet/sfc/enum.h
index d1dbb5fb31bb..c94f56271dd4 100644
--- a/drivers/net/ethernet/sfc/enum.h
+++ b/drivers/net/ethernet/sfc/enum.h
@@ -143,6 +143,7 @@ enum efx_loopback_mode {
* @RESET_TYPE_WORLD: Reset as much as possible
* @RESET_TYPE_RECOVER_OR_DISABLE: Try to recover. Apply RESET_TYPE_DISABLE if
* unsuccessful.
+ * @RESET_TYPE_DATAPATH: Reset datapath only.
* @RESET_TYPE_MC_BIST: MC entering BIST mode.
* @RESET_TYPE_DISABLE: Reset datapath, MAC and PHY; leave NIC disabled
* @RESET_TYPE_TX_WATCHDOG: reset due to TX watchdog
@@ -159,6 +160,7 @@ enum reset_type {
RESET_TYPE_ALL,
RESET_TYPE_WORLD,
RESET_TYPE_RECOVER_OR_DISABLE,
+ RESET_TYPE_DATAPATH,
RESET_TYPE_MC_BIST,
RESET_TYPE_DISABLE,
RESET_TYPE_MAX_METHOD,
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 4835bc0d0de8..034797661f96 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -734,7 +734,7 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
/* Reconfigure the MAC. The PHY *may* generate a link state change event
* if the user just changed the advertised capabilities, but there's no
* harm doing this twice */
- efx->type->reconfigure_mac(efx);
+ efx_mac_reconfigure(efx);
out:
mutex_unlock(&efx->mac_lock);
@@ -1109,9 +1109,8 @@ static int efx_ethtool_set_rxfh(struct net_device *net_dev, const u32 *indir,
return -EOPNOTSUPP;
if (!indir)
return 0;
- memcpy(efx->rx_indir_table, indir, sizeof(efx->rx_indir_table));
- efx->type->rx_push_rss_config(efx);
- return 0;
+
+ return efx->type->rx_push_rss_config(efx, true, indir);
}
static int efx_ethtool_get_ts_info(struct net_device *net_dev,
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index f166c8ef38a3..80e69af21642 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -477,16 +477,29 @@ static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
*
**************************************************************************
*/
+static int dummy_rx_push_rss_config(struct efx_nic *efx, bool user,
+ const u32 *rx_indir_table)
+{
+ (void) efx;
+ (void) user;
+ (void) rx_indir_table;
+ return -ENOSYS;
+}
-static void falcon_b0_rx_push_rss_config(struct efx_nic *efx)
+static int falcon_b0_rx_push_rss_config(struct efx_nic *efx, bool user,
+ const u32 *rx_indir_table)
{
efx_oword_t temp;
+ (void) user;
/* Set hash key for IPv4 */
memcpy(&temp, efx->rx_hash_key, sizeof(temp));
efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY);
+ memcpy(efx->rx_indir_table, rx_indir_table,
+ sizeof(efx->rx_indir_table));
efx_farch_rx_push_indir_table(efx);
+ return 0;
}
/**************************************************************************
@@ -2507,7 +2520,7 @@ static int falcon_init_nic(struct efx_nic *efx)
falcon_init_rx_cfg(efx);
if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
- falcon_b0_rx_push_rss_config(efx);
+ falcon_b0_rx_push_rss_config(efx, false, efx->rx_indir_table);
/* Set destination of both TX and RX Flush events */
EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0);
@@ -2687,6 +2700,8 @@ static int falcon_set_wol(struct efx_nic *efx, u32 type)
*/
const struct efx_nic_type falcon_a1_nic_type = {
+ .is_vf = false,
+ .mem_bar = EFX_MEM_BAR,
.mem_map_size = falcon_a1_mem_map_size,
.probe = falcon_probe_nic,
.remove = falcon_remove_nic,
@@ -2729,7 +2744,7 @@ const struct efx_nic_type falcon_a1_nic_type = {
.tx_init = efx_farch_tx_init,
.tx_remove = efx_farch_tx_remove,
.tx_write = efx_farch_tx_write,
- .rx_push_rss_config = efx_port_dummy_op_void,
+ .rx_push_rss_config = dummy_rx_push_rss_config,
.rx_probe = efx_farch_rx_probe,
.rx_init = efx_farch_rx_init,
.rx_remove = efx_farch_rx_remove,
@@ -2766,11 +2781,6 @@ const struct efx_nic_type falcon_a1_nic_type = {
.mtd_write = falcon_mtd_write,
.mtd_sync = falcon_mtd_sync,
#endif
- .sriov_init = efx_falcon_sriov_init,
- .sriov_fini = efx_falcon_sriov_fini,
- .sriov_mac_address_changed = efx_falcon_sriov_mac_address_changed,
- .sriov_wanted = efx_falcon_sriov_wanted,
- .sriov_reset = efx_falcon_sriov_reset,
.revision = EFX_REV_FALCON_A1,
.txd_ptr_tbl_base = FR_AA_TX_DESC_PTR_TBL_KER,
@@ -2788,6 +2798,8 @@ const struct efx_nic_type falcon_a1_nic_type = {
};
const struct efx_nic_type falcon_b0_nic_type = {
+ .is_vf = false,
+ .mem_bar = EFX_MEM_BAR,
.mem_map_size = falcon_b0_mem_map_size,
.probe = falcon_probe_nic,
.remove = falcon_remove_nic,
@@ -2867,11 +2879,6 @@ const struct efx_nic_type falcon_b0_nic_type = {
.mtd_write = falcon_mtd_write,
.mtd_sync = falcon_mtd_sync,
#endif
- .sriov_init = efx_falcon_sriov_init,
- .sriov_fini = efx_falcon_sriov_fini,
- .sriov_mac_address_changed = efx_falcon_sriov_mac_address_changed,
- .sriov_wanted = efx_falcon_sriov_wanted,
- .sriov_reset = efx_falcon_sriov_reset,
.revision = EFX_REV_FALCON_B0,
.txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index bb89e96a125e..f08266f0eca2 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -20,6 +20,8 @@
#include "efx.h"
#include "nic.h"
#include "farch_regs.h"
+#include "sriov.h"
+#include "siena_sriov.h"
#include "io.h"
#include "workarounds.h"
@@ -1198,13 +1200,17 @@ efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
channel->channel, ev_sub_data);
efx_farch_handle_tx_flush_done(efx, event);
+#ifdef CONFIG_SFC_SRIOV
efx_siena_sriov_tx_flush_done(efx, event);
+#endif
break;
case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
channel->channel, ev_sub_data);
efx_farch_handle_rx_flush_done(efx, event);
+#ifdef CONFIG_SFC_SRIOV
efx_siena_sriov_rx_flush_done(efx, event);
+#endif
break;
case FSE_AZ_EVQ_INIT_DONE_EV:
netif_dbg(efx, hw, efx->net_dev,
@@ -1242,8 +1248,11 @@ efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
" RX Q %d is disabled.\n", ev_sub_data,
ev_sub_data);
efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
- } else
+ }
+#ifdef CONFIG_SFC_SRIOV
+ else
efx_siena_sriov_desc_fetch_err(efx, ev_sub_data);
+#endif
break;
case FSE_BZ_TX_DSC_ERROR_EV:
if (ev_sub_data < EFX_VI_BASE) {
@@ -1252,8 +1261,11 @@ efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
" TX Q %d is disabled.\n", ev_sub_data,
ev_sub_data);
efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
- } else
+ }
+#ifdef CONFIG_SFC_SRIOV
+ else
efx_siena_sriov_desc_fetch_err(efx, ev_sub_data);
+#endif
break;
default:
netif_vdbg(efx, hw, efx->net_dev,
@@ -1317,9 +1329,11 @@ int efx_farch_ev_process(struct efx_channel *channel, int budget)
case FSE_AZ_EV_CODE_DRIVER_EV:
efx_farch_handle_driver_event(channel, &event);
break;
+#ifdef CONFIG_SFC_SRIOV
case FSE_CZ_EV_CODE_USER_EV:
efx_siena_sriov_event(channel, &event);
break;
+#endif
case FSE_CZ_EV_CODE_MCDI_EV:
efx_mcdi_process_event(channel, &event);
break;
@@ -1685,28 +1699,32 @@ void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw)
vi_count = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
#ifdef CONFIG_SFC_SRIOV
- if (efx->type->sriov_wanted(efx)) {
- unsigned vi_dc_entries, buftbl_free, entries_per_vf, vf_limit;
-
- nic_data->vf_buftbl_base = buftbl_min;
-
- vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES;
- vi_count = max(vi_count, EFX_VI_BASE);
- buftbl_free = (sram_lim_qw - buftbl_min -
- vi_count * vi_dc_entries);
-
- entries_per_vf = ((vi_dc_entries + EFX_VF_BUFTBL_PER_VI) *
- efx_vf_size(efx));
- vf_limit = min(buftbl_free / entries_per_vf,
- (1024U - EFX_VI_BASE) >> efx->vi_scale);
-
- if (efx->vf_count > vf_limit) {
- netif_err(efx, probe, efx->net_dev,
- "Reducing VF count from from %d to %d\n",
- efx->vf_count, vf_limit);
- efx->vf_count = vf_limit;
+ if (efx->type->sriov_wanted) {
+ if (efx->type->sriov_wanted(efx)) {
+ unsigned vi_dc_entries, buftbl_free;
+ unsigned entries_per_vf, vf_limit;
+
+ nic_data->vf_buftbl_base = buftbl_min;
+
+ vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES;
+ vi_count = max(vi_count, EFX_VI_BASE);
+ buftbl_free = (sram_lim_qw - buftbl_min -
+ vi_count * vi_dc_entries);
+
+ entries_per_vf = ((vi_dc_entries +
+ EFX_VF_BUFTBL_PER_VI) *
+ efx_vf_size(efx));
+ vf_limit = min(buftbl_free / entries_per_vf,
+ (1024U - EFX_VI_BASE) >> efx->vi_scale);
+
+ if (efx->vf_count > vf_limit) {
+ netif_err(efx, probe, efx->net_dev,
+ "Reducing VF count from from %d to %d\n",
+ efx->vf_count, vf_limit);
+ efx->vf_count = vf_limit;
+ }
+ vi_count += efx->vf_count * efx_vf_size(efx);
}
- vi_count += efx->vf_count * efx_vf_size(efx);
}
#endif
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index d37928f01949..81640f8bb811 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -8,6 +8,7 @@
*/
#include <linux/delay.h>
+#include <linux/moduleparam.h>
#include <asm/cmpxchg.h>
#include "net_driver.h"
#include "nic.h"
@@ -54,18 +55,32 @@ static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
static bool efx_mcdi_poll_once(struct efx_nic *efx);
static void efx_mcdi_abandon(struct efx_nic *efx);
+#ifdef CONFIG_SFC_MCDI_LOGGING
+static bool mcdi_logging_default;
+module_param(mcdi_logging_default, bool, 0644);
+MODULE_PARM_DESC(mcdi_logging_default,
+ "Enable MCDI logging on newly-probed functions");
+#endif
+
int efx_mcdi_init(struct efx_nic *efx)
{
struct efx_mcdi_iface *mcdi;
bool already_attached;
- int rc;
+ int rc = -ENOMEM;
efx->mcdi = kzalloc(sizeof(*efx->mcdi), GFP_KERNEL);
if (!efx->mcdi)
- return -ENOMEM;
+ goto fail;
mcdi = efx_mcdi(efx);
mcdi->efx = efx;
+#ifdef CONFIG_SFC_MCDI_LOGGING
+ /* consuming code assumes buffer is page-sized */
+ mcdi->logging_buffer = (char *)__get_free_page(GFP_KERNEL);
+ if (!mcdi->logging_buffer)
+ goto fail1;
+ mcdi->logging_enabled = mcdi_logging_default;
+#endif
init_waitqueue_head(&mcdi->wq);
spin_lock_init(&mcdi->iface_lock);
mcdi->state = MCDI_STATE_QUIESCENT;
@@ -81,7 +96,7 @@ int efx_mcdi_init(struct efx_nic *efx)
/* Recover from a failed assertion before probing */
rc = efx_mcdi_handle_assertion(efx);
if (rc)
- return rc;
+ goto fail2;
/* Let the MC (and BMC, if this is a LOM) know that the driver
* is loaded. We should do this before we reset the NIC.
@@ -90,7 +105,7 @@ int efx_mcdi_init(struct efx_nic *efx)
if (rc) {
netif_err(efx, probe, efx->net_dev,
"Unable to register driver with MCPU\n");
- return rc;
+ goto fail2;
}
if (already_attached)
/* Not a fatal error */
@@ -102,6 +117,15 @@ int efx_mcdi_init(struct efx_nic *efx)
efx->primary = efx;
return 0;
+fail2:
+#ifdef CONFIG_SFC_MCDI_LOGGING
+ free_page((unsigned long)mcdi->logging_buffer);
+fail1:
+#endif
+ kfree(efx->mcdi);
+ efx->mcdi = NULL;
+fail:
+ return rc;
}
void efx_mcdi_fini(struct efx_nic *efx)
@@ -114,6 +138,10 @@ void efx_mcdi_fini(struct efx_nic *efx)
/* Relinquish the device (back to the BMC, if this is a LOM) */
efx_mcdi_drv_attach(efx, false, NULL);
+#ifdef CONFIG_SFC_MCDI_LOGGING
+ free_page((unsigned long)efx->mcdi->iface.logging_buffer);
+#endif
+
kfree(efx->mcdi);
}
@@ -121,6 +149,9 @@ static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd,
const efx_dword_t *inbuf, size_t inlen)
{
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+#ifdef CONFIG_SFC_MCDI_LOGGING
+ char *buf = mcdi->logging_buffer; /* page-sized */
+#endif
efx_dword_t hdr[2];
size_t hdr_len;
u32 xflags, seqno;
@@ -165,6 +196,31 @@ static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd,
hdr_len = 8;
}
+#ifdef CONFIG_SFC_MCDI_LOGGING
+ if (mcdi->logging_enabled && !WARN_ON_ONCE(!buf)) {
+ int bytes = 0;
+ int i;
+ /* Lengths should always be a whole number of dwords, so scream
+ * if they're not.
+ */
+ WARN_ON_ONCE(hdr_len % 4);
+ WARN_ON_ONCE(inlen % 4);
+
+ /* We own the logging buffer, as only one MCDI can be in
+ * progress on a NIC at any one time. So no need for locking.
+ */
+ for (i = 0; i < hdr_len / 4 && bytes < PAGE_SIZE; i++)
+ bytes += snprintf(buf + bytes, PAGE_SIZE - bytes,
+ " %08x", le32_to_cpu(hdr[i].u32[0]));
+
+ for (i = 0; i < inlen / 4 && bytes < PAGE_SIZE; i++)
+ bytes += snprintf(buf + bytes, PAGE_SIZE - bytes,
+ " %08x", le32_to_cpu(inbuf[i].u32[0]));
+
+ netif_info(efx, hw, efx->net_dev, "MCDI RPC REQ:%s\n", buf);
+ }
+#endif
+
efx->type->mcdi_request(efx, hdr, hdr_len, inbuf, inlen);
mcdi->new_epoch = false;
@@ -206,6 +262,9 @@ static void efx_mcdi_read_response_header(struct efx_nic *efx)
{
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
unsigned int respseq, respcmd, error;
+#ifdef CONFIG_SFC_MCDI_LOGGING
+ char *buf = mcdi->logging_buffer; /* page-sized */
+#endif
efx_dword_t hdr;
efx->type->mcdi_read_response(efx, &hdr, 0, 4);
@@ -223,6 +282,39 @@ static void efx_mcdi_read_response_header(struct efx_nic *efx)
EFX_DWORD_FIELD(hdr, MC_CMD_V2_EXTN_IN_ACTUAL_LEN);
}
+#ifdef CONFIG_SFC_MCDI_LOGGING
+ if (mcdi->logging_enabled && !WARN_ON_ONCE(!buf)) {
+ size_t hdr_len, data_len;
+ int bytes = 0;
+ int i;
+
+ WARN_ON_ONCE(mcdi->resp_hdr_len % 4);
+ hdr_len = mcdi->resp_hdr_len / 4;
+ /* MCDI_DECLARE_BUF ensures that underlying buffer is padded
+ * to dword size, and the MCDI buffer is always dword size
+ */
+ data_len = DIV_ROUND_UP(mcdi->resp_data_len, 4);
+
+ /* We own the logging buffer, as only one MCDI can be in
+ * progress on a NIC at any one time. So no need for locking.
+ */
+ for (i = 0; i < hdr_len && bytes < PAGE_SIZE; i++) {
+ efx->type->mcdi_read_response(efx, &hdr, (i * 4), 4);
+ bytes += snprintf(buf + bytes, PAGE_SIZE - bytes,
+ " %08x", le32_to_cpu(hdr.u32[0]));
+ }
+
+ for (i = 0; i < data_len && bytes < PAGE_SIZE; i++) {
+ efx->type->mcdi_read_response(efx, &hdr,
+ mcdi->resp_hdr_len + (i * 4), 4);
+ bytes += snprintf(buf + bytes, PAGE_SIZE - bytes,
+ " %08x", le32_to_cpu(hdr.u32[0]));
+ }
+
+ netif_info(efx, hw, efx->net_dev, "MCDI RPC RESP:%s\n", buf);
+ }
+#endif
+
if (error && mcdi->resp_data_len == 0) {
netif_err(efx, hw, efx->net_dev, "MC rebooted\n");
mcdi->resprc = -EIO;
@@ -406,7 +498,7 @@ static bool efx_mcdi_complete_async(struct efx_mcdi_iface *mcdi, bool timeout)
struct efx_mcdi_async_param *async;
size_t hdr_len, data_len, err_len;
efx_dword_t *outbuf;
- MCDI_DECLARE_BUF_OUT_OR_ERR(errbuf, 0);
+ MCDI_DECLARE_BUF_ERR(errbuf);
int rc;
if (cmpxchg(&mcdi->state,
@@ -534,7 +626,7 @@ static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
size_t *outlen_actual, bool quiet)
{
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
- MCDI_DECLARE_BUF_OUT_OR_ERR(errbuf, 0);
+ MCDI_DECLARE_BUF_ERR(errbuf);
int rc;
if (mcdi->mode == MCDI_MODE_POLL)
@@ -1035,7 +1127,9 @@ void efx_mcdi_process_event(struct efx_channel *channel,
/* MAC stats are gather lazily. We can ignore this. */
break;
case MCDI_EVENT_CODE_FLR:
- efx_siena_sriov_flr(efx, MCDI_EVENT_FIELD(*event, FLR_VF));
+ if (efx->type->sriov_flr)
+ efx->type->sriov_flr(efx,
+ MCDI_EVENT_FIELD(*event, FLR_VF));
break;
case MCDI_EVENT_CODE_PTP_RX:
case MCDI_EVENT_CODE_PTP_FAULT:
@@ -1081,9 +1175,7 @@ void efx_mcdi_process_event(struct efx_channel *channel,
void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
{
- MCDI_DECLARE_BUF(outbuf,
- max(MC_CMD_GET_VERSION_OUT_LEN,
- MC_CMD_GET_CAPABILITIES_OUT_LEN));
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_VERSION_OUT_LEN);
size_t outlength;
const __le16 *ver_words;
size_t offset;
@@ -1108,19 +1200,11 @@ void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
* single version. Report which variants are running.
*/
if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) {
- BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0);
- rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0,
- outbuf, sizeof(outbuf), &outlength);
- if (rc || outlength < MC_CMD_GET_CAPABILITIES_OUT_LEN)
- offset += snprintf(
- buf + offset, len - offset, " rx? tx?");
- else
- offset += snprintf(
- buf + offset, len - offset, " rx%x tx%x",
- MCDI_WORD(outbuf,
- GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID),
- MCDI_WORD(outbuf,
- GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID));
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ offset += snprintf(buf + offset, len - offset, " rx%x tx%x",
+ nic_data->rx_dpcpu_fw_id,
+ nic_data->tx_dpcpu_fw_id);
/* It's theoretically possible for the string to exceed 31
* characters, though in practice the first three version
@@ -1150,10 +1234,26 @@ static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1);
MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID, MC_CMD_FW_LOW_LATENCY);
- rc = efx_mcdi_rpc(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf),
- outbuf, sizeof(outbuf), &outlen);
- if (rc)
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ /* If we're not the primary PF, trying to ATTACH with a FIRMWARE_ID
+ * specified will fail with EPERM, and we have to tell the MC we don't
+ * care what firmware we get.
+ */
+ if (rc == -EPERM) {
+ netif_dbg(efx, probe, efx->net_dev,
+ "efx_mcdi_drv_attach with fw-variant setting failed EPERM, trying without it\n");
+ MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID,
+ MC_CMD_FW_DONT_CARE);
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_DRV_ATTACH, inbuf,
+ sizeof(inbuf), outbuf, sizeof(outbuf),
+ &outlen);
+ }
+ if (rc) {
+ efx_mcdi_display_error(efx, MC_CMD_DRV_ATTACH, sizeof(inbuf),
+ outbuf, outlen, rc);
goto fail;
+ }
if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN) {
rc = -EIO;
goto fail;
@@ -1178,16 +1278,6 @@ static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
* and are completely trusted by firmware. Abort probing
* if that's not true for this function.
*/
- if (driver_operating &&
- (efx->mcdi->fn_flags &
- (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL |
- 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED)) !=
- (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL |
- 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED)) {
- netif_err(efx, probe, efx->net_dev,
- "This driver version only supports one function per port\n");
- return -ENODEV;
- }
if (was_attached != NULL)
*was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE);
@@ -1385,10 +1475,13 @@ fail1:
return rc;
}
+/* Returns 1 if an assertion was read, 0 if no assertion had fired,
+ * negative on error.
+ */
static int efx_mcdi_read_assertion(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_ASSERTS_IN_LEN);
- MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN);
unsigned int flags, index;
const char *reason;
size_t outlen;
@@ -1406,6 +1499,8 @@ static int efx_mcdi_read_assertion(struct efx_nic *efx)
rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_ASSERTS,
inbuf, MC_CMD_GET_ASSERTS_IN_LEN,
outbuf, sizeof(outbuf), &outlen);
+ if (rc == -EPERM)
+ return 0;
} while ((rc == -EINTR || rc == -EIO) && retry-- > 0);
if (rc) {
@@ -1443,24 +1538,31 @@ static int efx_mcdi_read_assertion(struct efx_nic *efx)
MCDI_ARRAY_DWORD(outbuf, GET_ASSERTS_OUT_GP_REGS_OFFS,
index));
- return 0;
+ return 1;
}
-static void efx_mcdi_exit_assertion(struct efx_nic *efx)
+static int efx_mcdi_exit_assertion(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN);
+ int rc;
/* If the MC is running debug firmware, it might now be
* waiting for a debugger to attach, but we just want it to
* reboot. We set a flag that makes the command a no-op if it
- * has already done so. We don't know what return code to
- * expect (0 or -EIO), so ignore it.
+ * has already done so.
+ * The MCDI will thus return either 0 or -EIO.
*/
BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS,
MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION);
- (void) efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN,
- NULL, 0, NULL);
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN,
+ NULL, 0, NULL);
+ if (rc == -EIO)
+ rc = 0;
+ if (rc)
+ efx_mcdi_display_error(efx, MC_CMD_REBOOT, MC_CMD_REBOOT_IN_LEN,
+ NULL, 0, rc);
+ return rc;
}
int efx_mcdi_handle_assertion(struct efx_nic *efx)
@@ -1468,12 +1570,10 @@ int efx_mcdi_handle_assertion(struct efx_nic *efx)
int rc;
rc = efx_mcdi_read_assertion(efx);
- if (rc)
+ if (rc <= 0)
return rc;
- efx_mcdi_exit_assertion(efx);
-
- return 0;
+ return efx_mcdi_exit_assertion(efx);
}
void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
@@ -1550,7 +1650,9 @@ int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method)
if (rc)
return rc;
- if (method == RESET_TYPE_WORLD)
+ if (method == RESET_TYPE_DATAPATH)
+ return 0;
+ else if (method == RESET_TYPE_WORLD)
return efx_mcdi_reset_mc(efx);
else
return efx_mcdi_reset_func(efx);
@@ -1688,6 +1790,36 @@ int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled)
NULL, 0, NULL);
}
+int efx_mcdi_get_workarounds(struct efx_nic *efx, unsigned int *impl_out,
+ unsigned int *enabled_out)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_WORKAROUNDS_OUT_LEN);
+ size_t outlen;
+ int rc;
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_WORKAROUNDS, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto fail;
+
+ if (outlen < MC_CMD_GET_WORKAROUNDS_OUT_LEN) {
+ rc = -EIO;
+ goto fail;
+ }
+
+ if (impl_out)
+ *impl_out = MCDI_DWORD(outbuf, GET_WORKAROUNDS_OUT_IMPLEMENTED);
+
+ if (enabled_out)
+ *enabled_out = MCDI_DWORD(outbuf, GET_WORKAROUNDS_OUT_ENABLED);
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
#ifdef CONFIG_SFC_MTD
#define EFX_MCDI_NVRAM_LEN_MAX 128
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index 56465f7465a2..1838afe2da92 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -58,6 +58,8 @@ enum efx_mcdi_mode {
* enabled
* @async_list: Queue of asynchronous requests
* @async_timer: Timer for asynchronous request timeout
+ * @logging_buffer: buffer that may be used to build MCDI tracing messages
+ * @logging_enabled: whether to trace MCDI
*/
struct efx_mcdi_iface {
struct efx_nic *efx;
@@ -74,6 +76,10 @@ struct efx_mcdi_iface {
spinlock_t async_lock;
struct list_head async_list;
struct timer_list async_timer;
+#ifdef CONFIG_SFC_MCDI_LOGGING
+ char *logging_buffer;
+ bool logging_enabled;
+#endif
};
struct efx_mcdi_mon {
@@ -176,10 +182,12 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
* 32-bit-aligned. Also, on Siena we must copy to the MC shared
* memory strictly 32 bits at a time, so add any necessary padding.
*/
-#define MCDI_DECLARE_BUF(_name, _len) \
+#define _MCDI_DECLARE_BUF(_name, _len) \
efx_dword_t _name[DIV_ROUND_UP(_len, 4)]
-#define MCDI_DECLARE_BUF_OUT_OR_ERR(_name, _len) \
- MCDI_DECLARE_BUF(_name, max_t(size_t, _len, 8))
+#define MCDI_DECLARE_BUF(_name, _len) \
+ _MCDI_DECLARE_BUF(_name, _len) = {{{0}}}
+#define MCDI_DECLARE_BUF_ERR(_name) \
+ MCDI_DECLARE_BUF(_name, 8)
#define _MCDI_PTR(_buf, _offset) \
((u8 *)(_buf) + (_offset))
#define MCDI_PTR(_buf, _field) \
@@ -339,6 +347,8 @@ bool efx_mcdi_mac_check_fault(struct efx_nic *efx);
enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason);
int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method);
int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled);
+int efx_mcdi_get_workarounds(struct efx_nic *efx, unsigned int *impl_out,
+ unsigned int *enabled_out);
#ifdef CONFIG_SFC_MCDI_MON
int efx_mcdi_mon_probe(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index e028de10e1b7..45fca9fc66b7 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -638,6 +638,8 @@
*/
#define MC_CMD_READ32 0x1
+#define MC_CMD_0x1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_READ32_IN msgrequest */
#define MC_CMD_READ32_IN_LEN 8
#define MC_CMD_READ32_IN_ADDR_OFST 0
@@ -659,6 +661,8 @@
*/
#define MC_CMD_WRITE32 0x2
+#define MC_CMD_0x2_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_WRITE32_IN msgrequest */
#define MC_CMD_WRITE32_IN_LENMIN 8
#define MC_CMD_WRITE32_IN_LENMAX 252
@@ -679,6 +683,8 @@
*/
#define MC_CMD_COPYCODE 0x3
+#define MC_CMD_0x3_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_COPYCODE_IN msgrequest */
#define MC_CMD_COPYCODE_IN_LEN 16
/* Source address */
@@ -717,6 +723,8 @@
*/
#define MC_CMD_SET_FUNC 0x4
+#define MC_CMD_0x4_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_SET_FUNC_IN msgrequest */
#define MC_CMD_SET_FUNC_IN_LEN 4
/* Set function */
@@ -732,6 +740,8 @@
*/
#define MC_CMD_GET_BOOT_STATUS 0x5
+#define MC_CMD_0x5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_GET_BOOT_STATUS_IN msgrequest */
#define MC_CMD_GET_BOOT_STATUS_IN_LEN 0
@@ -758,6 +768,8 @@
*/
#define MC_CMD_GET_ASSERTS 0x6
+#define MC_CMD_0x6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_GET_ASSERTS_IN msgrequest */
#define MC_CMD_GET_ASSERTS_IN_LEN 4
/* Set to clear assertion */
@@ -794,6 +806,8 @@
*/
#define MC_CMD_LOG_CTRL 0x7
+#define MC_CMD_0x7_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_LOG_CTRL_IN msgrequest */
#define MC_CMD_LOG_CTRL_IN_LEN 8
/* Log destination */
@@ -814,6 +828,8 @@
*/
#define MC_CMD_GET_VERSION 0x8
+#define MC_CMD_0x8_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_VERSION_IN msgrequest */
#define MC_CMD_GET_VERSION_IN_LEN 0
@@ -870,6 +886,8 @@
*/
#define MC_CMD_PTP 0xb
+#define MC_CMD_0xb_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_PTP_IN msgrequest */
#define MC_CMD_PTP_IN_LEN 1
/* PTP operation code */
@@ -1404,6 +1422,8 @@
*/
#define MC_CMD_CSR_READ32 0xc
+#define MC_CMD_0xc_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_CSR_READ32_IN msgrequest */
#define MC_CMD_CSR_READ32_IN_LEN 12
/* Address */
@@ -1428,6 +1448,8 @@
*/
#define MC_CMD_CSR_WRITE32 0xd
+#define MC_CMD_0xd_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_CSR_WRITE32_IN msgrequest */
#define MC_CMD_CSR_WRITE32_IN_LENMIN 12
#define MC_CMD_CSR_WRITE32_IN_LENMAX 252
@@ -1452,6 +1474,8 @@
*/
#define MC_CMD_HP 0x54
+#define MC_CMD_0x54_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_HP_IN msgrequest */
#define MC_CMD_HP_IN_LEN 16
/* HP OCSD sub-command. When address is not NULL, request activation of OCSD at
@@ -1493,6 +1517,8 @@
*/
#define MC_CMD_STACKINFO 0xf
+#define MC_CMD_0xf_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_STACKINFO_IN msgrequest */
#define MC_CMD_STACKINFO_IN_LEN 0
@@ -1513,6 +1539,8 @@
*/
#define MC_CMD_MDIO_READ 0x10
+#define MC_CMD_0x10_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_MDIO_READ_IN msgrequest */
#define MC_CMD_MDIO_READ_IN_LEN 16
/* Bus number; there are two MDIO buses: one for the internal PHY, and one for
@@ -1552,6 +1580,8 @@
*/
#define MC_CMD_MDIO_WRITE 0x11
+#define MC_CMD_0x11_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_MDIO_WRITE_IN msgrequest */
#define MC_CMD_MDIO_WRITE_IN_LEN 20
/* Bus number; there are two MDIO buses: one for the internal PHY, and one for
@@ -1591,6 +1621,8 @@
*/
#define MC_CMD_DBI_WRITE 0x12
+#define MC_CMD_0x12_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_DBI_WRITE_IN msgrequest */
#define MC_CMD_DBI_WRITE_IN_LENMIN 12
#define MC_CMD_DBI_WRITE_IN_LENMAX 252
@@ -1739,6 +1771,8 @@
*/
#define MC_CMD_GET_BOARD_CFG 0x18
+#define MC_CMD_0x18_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_BOARD_CFG_IN msgrequest */
#define MC_CMD_GET_BOARD_CFG_IN_LEN 0
@@ -1778,6 +1812,8 @@
*/
#define MC_CMD_DBI_READX 0x19
+#define MC_CMD_0x19_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_DBI_READX_IN msgrequest */
#define MC_CMD_DBI_READX_IN_LENMIN 8
#define MC_CMD_DBI_READX_IN_LENMAX 248
@@ -1822,6 +1858,8 @@
*/
#define MC_CMD_SET_RAND_SEED 0x1a
+#define MC_CMD_0x1a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_SET_RAND_SEED_IN msgrequest */
#define MC_CMD_SET_RAND_SEED_IN_LEN 16
/* Seed value. */
@@ -1863,6 +1901,8 @@
*/
#define MC_CMD_DRV_ATTACH 0x1c
+#define MC_CMD_0x1c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_DRV_ATTACH_IN msgrequest */
#define MC_CMD_DRV_ATTACH_IN_LEN 12
/* new state (0=detached, 1=attached) to set if UPDATE=1 */
@@ -1875,6 +1915,8 @@
#define MC_CMD_FW_FULL_FEATURED 0x0
/* enum: Prefer to use firmware with fewer features but lower latency */
#define MC_CMD_FW_LOW_LATENCY 0x1
+/* enum: Only this option is allowed for non-admin functions */
+#define MC_CMD_FW_DONT_CARE 0xffffffff
/* MC_CMD_DRV_ATTACH_OUT msgresponse */
#define MC_CMD_DRV_ATTACH_OUT_LEN 4
@@ -1920,6 +1962,8 @@
*/
#define MC_CMD_PORT_RESET 0x20
+#define MC_CMD_0x20_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_PORT_RESET_IN msgrequest */
#define MC_CMD_PORT_RESET_IN_LEN 0
@@ -1934,6 +1978,7 @@
* extended version of the deprecated MC_CMD_PORT_RESET with added fields.
*/
#define MC_CMD_ENTITY_RESET 0x20
+/* MC_CMD_0x20_PRIVILEGE_CTG SRIOV_CTG_GENERAL */
/* MC_CMD_ENTITY_RESET_IN msgrequest */
#define MC_CMD_ENTITY_RESET_IN_LEN 4
@@ -2023,6 +2068,8 @@
*/
#define MC_CMD_PUTS 0x23
+#define MC_CMD_0x23_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_PUTS_IN msgrequest */
#define MC_CMD_PUTS_IN_LENMIN 13
#define MC_CMD_PUTS_IN_LENMAX 252
@@ -2050,6 +2097,8 @@
*/
#define MC_CMD_GET_PHY_CFG 0x24
+#define MC_CMD_0x24_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_PHY_CFG_IN msgrequest */
#define MC_CMD_GET_PHY_CFG_IN_LEN 0
@@ -2149,6 +2198,8 @@
*/
#define MC_CMD_START_BIST 0x25
+#define MC_CMD_0x25_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_START_BIST_IN msgrequest */
#define MC_CMD_START_BIST_IN_LEN 4
/* Type of test. */
@@ -2185,6 +2236,8 @@
*/
#define MC_CMD_POLL_BIST 0x26
+#define MC_CMD_0x26_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_POLL_BIST_IN msgrequest */
#define MC_CMD_POLL_BIST_IN_LEN 0
@@ -2344,6 +2397,8 @@
*/
#define MC_CMD_GET_LOOPBACK_MODES 0x28
+#define MC_CMD_0x28_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_LOOPBACK_MODES_IN msgrequest */
#define MC_CMD_GET_LOOPBACK_MODES_IN_LEN 0
@@ -2463,6 +2518,8 @@
*/
#define MC_CMD_GET_LINK 0x29
+#define MC_CMD_0x29_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_LINK_IN msgrequest */
#define MC_CMD_GET_LINK_IN_LEN 0
@@ -2519,6 +2576,8 @@
*/
#define MC_CMD_SET_LINK 0x2a
+#define MC_CMD_0x2a_PRIVILEGE_CTG SRIOV_CTG_LINK
+
/* MC_CMD_SET_LINK_IN msgrequest */
#define MC_CMD_SET_LINK_IN_LEN 16
/* ??? */
@@ -2550,6 +2609,8 @@
*/
#define MC_CMD_SET_ID_LED 0x2b
+#define MC_CMD_0x2b_PRIVILEGE_CTG SRIOV_CTG_LINK
+
/* MC_CMD_SET_ID_LED_IN msgrequest */
#define MC_CMD_SET_ID_LED_IN_LEN 4
/* Set LED state. */
@@ -2568,6 +2629,8 @@
*/
#define MC_CMD_SET_MAC 0x2c
+#define MC_CMD_0x2c_PRIVILEGE_CTG SRIOV_CTG_LINK
+
/* MC_CMD_SET_MAC_IN msgrequest */
#define MC_CMD_SET_MAC_IN_LEN 24
/* The MTU is the MTU programmed directly into the XMAC/GMAC (inclusive of
@@ -2609,6 +2672,8 @@
*/
#define MC_CMD_PHY_STATS 0x2d
+#define MC_CMD_0x2d_PRIVILEGE_CTG SRIOV_CTG_LINK
+
/* MC_CMD_PHY_STATS_IN msgrequest */
#define MC_CMD_PHY_STATS_IN_LEN 8
/* ??? */
@@ -2687,8 +2752,10 @@
*/
#define MC_CMD_MAC_STATS 0x2e
+#define MC_CMD_0x2e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_MAC_STATS_IN msgrequest */
-#define MC_CMD_MAC_STATS_IN_LEN 16
+#define MC_CMD_MAC_STATS_IN_LEN 20
/* ??? */
#define MC_CMD_MAC_STATS_IN_DMA_ADDR_OFST 0
#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LEN 8
@@ -2710,6 +2777,8 @@
#define MC_CMD_MAC_STATS_IN_PERIOD_MS_LBN 16
#define MC_CMD_MAC_STATS_IN_PERIOD_MS_WIDTH 16
#define MC_CMD_MAC_STATS_IN_DMA_LEN_OFST 12
+/* port id so vadapter stats can be provided */
+#define MC_CMD_MAC_STATS_IN_PORT_ID_OFST 16
/* MC_CMD_MAC_STATS_OUT_DMA msgresponse */
#define MC_CMD_MAC_STATS_OUT_DMA_LEN 0
@@ -2824,11 +2893,31 @@
/* enum: RXDP counter: Number of times an emergency descriptor fetch was
* performed. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
*/
-#define MC_CMD_MAC_RXDP_EMERGENCY_FETCH_CONDITIONS 0x47
+#define MC_CMD_MAC_RXDP_HLB_FETCH_CONDITIONS 0x47
/* enum: RXDP counter: Number of times the DPCPU waited for an existing
* descriptor fetch. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
*/
-#define MC_CMD_MAC_RXDP_EMERGENCY_WAIT_CONDITIONS 0x48
+#define MC_CMD_MAC_RXDP_HLB_WAIT_CONDITIONS 0x48
+#define MC_CMD_MAC_VADAPTER_RX_DMABUF_START 0x4c /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_UNICAST_PACKETS 0x4c /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_UNICAST_BYTES 0x4d /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_MULTICAST_PACKETS 0x4e /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_MULTICAST_BYTES 0x4f /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_BROADCAST_PACKETS 0x50 /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_BROADCAST_BYTES 0x51 /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_BAD_PACKETS 0x52 /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_BAD_BYTES 0x53 /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_OVERFLOW 0x54 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_DMABUF_START 0x57 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_UNICAST_PACKETS 0x57 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_UNICAST_BYTES 0x58 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_MULTICAST_PACKETS 0x59 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_MULTICAST_BYTES 0x5a /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_BROADCAST_PACKETS 0x5b /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_BROADCAST_BYTES 0x5c /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_BAD_PACKETS 0x5d /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_BAD_BYTES 0x5e /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_OVERFLOW 0x5f /* enum */
/* enum: Start of GMAC stats buffer space, for Siena only. */
#define MC_CMD_GMAC_DMABUF_START 0x40
/* enum: End of GMAC stats buffer space, for Siena only. */
@@ -2926,6 +3015,8 @@
*/
#define MC_CMD_WOL_FILTER_SET 0x32
+#define MC_CMD_0x32_PRIVILEGE_CTG SRIOV_CTG_LINK
+
/* MC_CMD_WOL_FILTER_SET_IN msgrequest */
#define MC_CMD_WOL_FILTER_SET_IN_LEN 192
#define MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0
@@ -3020,6 +3111,8 @@
*/
#define MC_CMD_WOL_FILTER_REMOVE 0x33
+#define MC_CMD_0x33_PRIVILEGE_CTG SRIOV_CTG_LINK
+
/* MC_CMD_WOL_FILTER_REMOVE_IN msgrequest */
#define MC_CMD_WOL_FILTER_REMOVE_IN_LEN 4
#define MC_CMD_WOL_FILTER_REMOVE_IN_FILTER_ID_OFST 0
@@ -3035,6 +3128,8 @@
*/
#define MC_CMD_WOL_FILTER_RESET 0x34
+#define MC_CMD_0x34_PRIVILEGE_CTG SRIOV_CTG_LINK
+
/* MC_CMD_WOL_FILTER_RESET_IN msgrequest */
#define MC_CMD_WOL_FILTER_RESET_IN_LEN 4
#define MC_CMD_WOL_FILTER_RESET_IN_MASK_OFST 0
@@ -3069,6 +3164,8 @@
*/
#define MC_CMD_NVRAM_TYPES 0x36
+#define MC_CMD_0x36_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_NVRAM_TYPES_IN msgrequest */
#define MC_CMD_NVRAM_TYPES_IN_LEN 0
@@ -3125,6 +3222,8 @@
*/
#define MC_CMD_NVRAM_INFO 0x37
+#define MC_CMD_0x37_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_NVRAM_INFO_IN msgrequest */
#define MC_CMD_NVRAM_INFO_IN_LEN 4
#define MC_CMD_NVRAM_INFO_IN_TYPE_OFST 0
@@ -3157,6 +3256,8 @@
*/
#define MC_CMD_NVRAM_UPDATE_START 0x38
+#define MC_CMD_0x38_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_NVRAM_UPDATE_START_IN msgrequest */
#define MC_CMD_NVRAM_UPDATE_START_IN_LEN 4
#define MC_CMD_NVRAM_UPDATE_START_IN_TYPE_OFST 0
@@ -3175,6 +3276,8 @@
*/
#define MC_CMD_NVRAM_READ 0x39
+#define MC_CMD_0x39_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_NVRAM_READ_IN msgrequest */
#define MC_CMD_NVRAM_READ_IN_LEN 12
#define MC_CMD_NVRAM_READ_IN_TYPE_OFST 0
@@ -3202,6 +3305,8 @@
*/
#define MC_CMD_NVRAM_WRITE 0x3a
+#define MC_CMD_0x3a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_NVRAM_WRITE_IN msgrequest */
#define MC_CMD_NVRAM_WRITE_IN_LENMIN 13
#define MC_CMD_NVRAM_WRITE_IN_LENMAX 252
@@ -3228,6 +3333,8 @@
*/
#define MC_CMD_NVRAM_ERASE 0x3b
+#define MC_CMD_0x3b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_NVRAM_ERASE_IN msgrequest */
#define MC_CMD_NVRAM_ERASE_IN_LEN 12
#define MC_CMD_NVRAM_ERASE_IN_TYPE_OFST 0
@@ -3248,6 +3355,8 @@
*/
#define MC_CMD_NVRAM_UPDATE_FINISH 0x3c
+#define MC_CMD_0x3c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_NVRAM_UPDATE_FINISH_IN msgrequest */
#define MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN 8
#define MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_OFST 0
@@ -3279,6 +3388,8 @@
*/
#define MC_CMD_REBOOT 0x3d
+#define MC_CMD_0x3d_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_REBOOT_IN msgrequest */
#define MC_CMD_REBOOT_IN_LEN 4
#define MC_CMD_REBOOT_IN_FLAGS_OFST 0
@@ -3316,6 +3427,8 @@
*/
#define MC_CMD_REBOOT_MODE 0x3f
+#define MC_CMD_0x3f_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_REBOOT_MODE_IN msgrequest */
#define MC_CMD_REBOOT_MODE_IN_LEN 4
#define MC_CMD_REBOOT_MODE_IN_VALUE_OFST 0
@@ -3368,6 +3481,8 @@
*/
#define MC_CMD_SENSOR_INFO 0x41
+#define MC_CMD_0x41_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_SENSOR_INFO_IN msgrequest */
#define MC_CMD_SENSOR_INFO_IN_LEN 0
@@ -3542,6 +3657,8 @@
*/
#define MC_CMD_READ_SENSORS 0x42
+#define MC_CMD_0x42_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_READ_SENSORS_IN msgrequest */
#define MC_CMD_READ_SENSORS_IN_LEN 8
/* DMA address of host buffer for sensor readings (must be 4Kbyte aligned). */
@@ -3602,6 +3719,8 @@
*/
#define MC_CMD_GET_PHY_STATE 0x43
+#define MC_CMD_0x43_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_PHY_STATE_IN msgrequest */
#define MC_CMD_GET_PHY_STATE_IN_LEN 0
@@ -3636,6 +3755,8 @@
*/
#define MC_CMD_WOL_FILTER_GET 0x45
+#define MC_CMD_0x45_PRIVILEGE_CTG SRIOV_CTG_LINK
+
/* MC_CMD_WOL_FILTER_GET_IN msgrequest */
#define MC_CMD_WOL_FILTER_GET_IN_LEN 0
@@ -3651,6 +3772,8 @@
*/
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD 0x46
+#define MC_CMD_0x46_PRIVILEGE_CTG SRIOV_CTG_LINK
+
/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN msgrequest */
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMIN 8
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMAX 252
@@ -3692,6 +3815,8 @@
*/
#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD 0x47
+#define MC_CMD_0x47_PRIVILEGE_CTG SRIOV_CTG_LINK
+
/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN msgrequest */
#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_LEN 8
#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
@@ -3722,6 +3847,8 @@
*/
#define MC_CMD_TESTASSERT 0x49
+#define MC_CMD_0x49_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_TESTASSERT_IN msgrequest */
#define MC_CMD_TESTASSERT_IN_LEN 0
@@ -3739,6 +3866,8 @@
*/
#define MC_CMD_WORKAROUND 0x4a
+#define MC_CMD_0x4a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_WORKAROUND_IN msgrequest */
#define MC_CMD_WORKAROUND_IN_LEN 8
#define MC_CMD_WORKAROUND_IN_TYPE_OFST 0
@@ -3765,6 +3894,8 @@
*/
#define MC_CMD_GET_PHY_MEDIA_INFO 0x4b
+#define MC_CMD_0x4b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_GET_PHY_MEDIA_INFO_IN msgrequest */
#define MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN 4
#define MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_OFST 0
@@ -3788,6 +3919,8 @@
*/
#define MC_CMD_NVRAM_TEST 0x4c
+#define MC_CMD_0x4c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_NVRAM_TEST_IN msgrequest */
#define MC_CMD_NVRAM_TEST_IN_LEN 4
#define MC_CMD_NVRAM_TEST_IN_TYPE_OFST 0
@@ -3849,6 +3982,8 @@
*/
#define MC_CMD_SENSOR_SET_LIMS 0x4e
+#define MC_CMD_0x4e_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_SENSOR_SET_LIMS_IN msgrequest */
#define MC_CMD_SENSOR_SET_LIMS_IN_LEN 20
#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_OFST 0
@@ -3890,6 +4025,8 @@
*/
#define MC_CMD_NVRAM_PARTITIONS 0x51
+#define MC_CMD_0x51_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_NVRAM_PARTITIONS_IN msgrequest */
#define MC_CMD_NVRAM_PARTITIONS_IN_LEN 0
@@ -3913,6 +4050,8 @@
*/
#define MC_CMD_NVRAM_METADATA 0x52
+#define MC_CMD_0x52_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_NVRAM_METADATA_IN msgrequest */
#define MC_CMD_NVRAM_METADATA_IN_LEN 4
/* Partition type ID code */
@@ -3958,6 +4097,8 @@
*/
#define MC_CMD_GET_MAC_ADDRESSES 0x55
+#define MC_CMD_0x55_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_MAC_ADDRESSES_IN msgrequest */
#define MC_CMD_GET_MAC_ADDRESSES_IN_LEN 0
@@ -4087,11 +4228,66 @@
/***********************************/
+/* MC_CMD_GET_WORKAROUNDS
+ * Read the list of all implemented and all currently enabled workarounds. The
+ * enums here must correspond with those in MC_CMD_WORKAROUND.
+ */
+#define MC_CMD_GET_WORKAROUNDS 0x59
+
+/* MC_CMD_GET_WORKAROUNDS_OUT msgresponse */
+#define MC_CMD_GET_WORKAROUNDS_OUT_LEN 8
+/* Each workaround is represented by a single bit according to the enums below.
+ */
+#define MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_OFST 0
+#define MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_OFST 4
+/* enum: Bug 17230 work around. */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG17230 0x2
+/* enum: Bug 35388 work around (unsafe EVQ writes). */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG35388 0x4
+/* enum: Bug35017 workaround (A64 tables must be identity map) */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG35017 0x8
+
+
+/***********************************/
+/* MC_CMD_LINK_STATE_MODE
+ * Read/set link state mode of a VF
+ */
+#define MC_CMD_LINK_STATE_MODE 0x5c
+
+#define MC_CMD_0x5c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LINK_STATE_MODE_IN msgrequest */
+#define MC_CMD_LINK_STATE_MODE_IN_LEN 8
+/* The target function to have its link state mode read or set, must be a VF
+ * e.g. VF 1,3 = 0x00030001
+ */
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_OFST 0
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_LBN 0
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_WIDTH 16
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_LBN 16
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_WIDTH 16
+/* New link state mode to be set */
+#define MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_OFST 4
+#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_AUTO 0x0 /* enum */
+#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_UP 0x1 /* enum */
+#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_DOWN 0x2 /* enum */
+/* enum: Use this value to just read the existing setting without modifying it.
+ */
+#define MC_CMD_LINK_STATE_MODE_IN_DO_NOT_CHANGE 0xffffffff
+
+/* MC_CMD_LINK_STATE_MODE_OUT msgresponse */
+#define MC_CMD_LINK_STATE_MODE_OUT_LEN 4
+#define MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_OFST 0
+
+
+/***********************************/
/* MC_CMD_READ_REGS
* Get a dump of the MCPU registers
*/
#define MC_CMD_READ_REGS 0x50
+#define MC_CMD_0x50_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_READ_REGS_IN msgrequest */
#define MC_CMD_READ_REGS_IN_LEN 0
@@ -4115,6 +4311,8 @@
*/
#define MC_CMD_INIT_EVQ 0x80
+#define MC_CMD_0x80_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_INIT_EVQ_IN msgrequest */
#define MC_CMD_INIT_EVQ_IN_LENMIN 44
#define MC_CMD_INIT_EVQ_IN_LENMAX 548
@@ -4213,6 +4411,8 @@
*/
#define MC_CMD_INIT_RXQ 0x81
+#define MC_CMD_0x81_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_INIT_RXQ_IN msgrequest */
#define MC_CMD_INIT_RXQ_IN_LENMIN 36
#define MC_CMD_INIT_RXQ_IN_LENMAX 252
@@ -4265,6 +4465,8 @@
*/
#define MC_CMD_INIT_TXQ 0x82
+#define MC_CMD_0x82_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_INIT_TXQ_IN msgrequest */
#define MC_CMD_INIT_TXQ_IN_LENMIN 36
#define MC_CMD_INIT_TXQ_IN_LENMAX 252
@@ -4322,6 +4524,8 @@
*/
#define MC_CMD_FINI_EVQ 0x83
+#define MC_CMD_0x83_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_FINI_EVQ_IN msgrequest */
#define MC_CMD_FINI_EVQ_IN_LEN 4
/* Instance of EVQ to destroy. Should be the same instance as that previously
@@ -4339,6 +4543,8 @@
*/
#define MC_CMD_FINI_RXQ 0x84
+#define MC_CMD_0x84_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_FINI_RXQ_IN msgrequest */
#define MC_CMD_FINI_RXQ_IN_LEN 4
/* Instance of RXQ to destroy */
@@ -4354,6 +4560,8 @@
*/
#define MC_CMD_FINI_TXQ 0x85
+#define MC_CMD_0x85_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_FINI_TXQ_IN msgrequest */
#define MC_CMD_FINI_TXQ_IN_LEN 4
/* Instance of TXQ to destroy */
@@ -4369,6 +4577,8 @@
*/
#define MC_CMD_DRIVER_EVENT 0x86
+#define MC_CMD_0x86_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_DRIVER_EVENT_IN msgrequest */
#define MC_CMD_DRIVER_EVENT_IN_LEN 12
/* Handle of target EVQ */
@@ -4392,6 +4602,8 @@
*/
#define MC_CMD_PROXY_CMD 0x5b
+#define MC_CMD_0x5b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_PROXY_CMD_IN msgrequest */
#define MC_CMD_PROXY_CMD_IN_LEN 4
/* The handle of the target function. */
@@ -4414,6 +4626,8 @@
*/
#define MC_CMD_ALLOC_BUFTBL_CHUNK 0x87
+#define MC_CMD_0x87_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
/* MC_CMD_ALLOC_BUFTBL_CHUNK_IN msgrequest */
#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_LEN 8
/* Owner ID to use */
@@ -4437,6 +4651,8 @@
*/
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES 0x88
+#define MC_CMD_0x88_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN msgrequest */
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMIN 20
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX 268
@@ -4463,6 +4679,8 @@
*/
#define MC_CMD_FREE_BUFTBL_CHUNK 0x89
+#define MC_CMD_0x89_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
/* MC_CMD_FREE_BUFTBL_CHUNK_IN msgrequest */
#define MC_CMD_FREE_BUFTBL_CHUNK_IN_LEN 4
#define MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_OFST 0
@@ -4477,6 +4695,8 @@
*/
#define MC_CMD_FILTER_OP 0x8a
+#define MC_CMD_0x8a_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_FILTER_OP_IN msgrequest */
#define MC_CMD_FILTER_OP_IN_LEN 108
/* identifies the type of operation requested */
@@ -4637,6 +4857,8 @@
*/
#define MC_CMD_GET_PARSER_DISP_INFO 0xe4
+#define MC_CMD_0xe4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_PARSER_DISP_INFO_IN msgrequest */
#define MC_CMD_GET_PARSER_DISP_INFO_IN_LEN 4
/* identifies the type of operation requested */
@@ -4669,6 +4891,8 @@
*/
#define MC_CMD_PARSER_DISP_RW 0xe5
+#define MC_CMD_0xe5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_PARSER_DISP_RW_IN msgrequest */
#define MC_CMD_PARSER_DISP_RW_IN_LEN 32
/* identifies the target of the operation */
@@ -4719,6 +4943,8 @@
*/
#define MC_CMD_GET_PF_COUNT 0xb6
+#define MC_CMD_0xb6_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_PF_COUNT_IN msgrequest */
#define MC_CMD_GET_PF_COUNT_IN_LEN 0
@@ -4750,6 +4976,8 @@
*/
#define MC_CMD_GET_PORT_ASSIGNMENT 0xb8
+#define MC_CMD_0xb8_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_PORT_ASSIGNMENT_IN msgrequest */
#define MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN 0
@@ -4765,6 +4993,8 @@
*/
#define MC_CMD_SET_PORT_ASSIGNMENT 0xb9
+#define MC_CMD_0xb9_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_SET_PORT_ASSIGNMENT_IN msgrequest */
#define MC_CMD_SET_PORT_ASSIGNMENT_IN_LEN 4
/* Identifies the port assignment for this function. */
@@ -4780,6 +5010,8 @@
*/
#define MC_CMD_ALLOC_VIS 0x8b
+#define MC_CMD_0x8b_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_ALLOC_VIS_IN msgrequest */
#define MC_CMD_ALLOC_VIS_IN_LEN 8
/* The minimum number of VIs that is acceptable */
@@ -4804,6 +5036,8 @@
*/
#define MC_CMD_FREE_VIS 0x8c
+#define MC_CMD_0x8c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_FREE_VIS_IN msgrequest */
#define MC_CMD_FREE_VIS_IN_LEN 0
@@ -4817,6 +5051,8 @@
*/
#define MC_CMD_GET_SRIOV_CFG 0xba
+#define MC_CMD_0xba_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_SRIOV_CFG_IN msgrequest */
#define MC_CMD_GET_SRIOV_CFG_IN_LEN 0
@@ -4841,6 +5077,8 @@
*/
#define MC_CMD_SET_SRIOV_CFG 0xbb
+#define MC_CMD_0xbb_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_SET_SRIOV_CFG_IN msgrequest */
#define MC_CMD_SET_SRIOV_CFG_IN_LEN 20
/* Number of VFs currently enabled. */
@@ -4870,6 +5108,8 @@
*/
#define MC_CMD_GET_VI_ALLOC_INFO 0x8d
+#define MC_CMD_0x8d_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_VI_ALLOC_INFO_IN msgrequest */
#define MC_CMD_GET_VI_ALLOC_INFO_IN_LEN 0
@@ -4889,6 +5129,8 @@
*/
#define MC_CMD_DUMP_VI_STATE 0x8e
+#define MC_CMD_0x8e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_DUMP_VI_STATE_IN msgrequest */
#define MC_CMD_DUMP_VI_STATE_IN_LEN 4
/* The VI number to query. */
@@ -4998,6 +5240,8 @@
*/
#define MC_CMD_ALLOC_PIOBUF 0x8f
+#define MC_CMD_0x8f_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
/* MC_CMD_ALLOC_PIOBUF_IN msgrequest */
#define MC_CMD_ALLOC_PIOBUF_IN_LEN 0
@@ -5013,6 +5257,8 @@
*/
#define MC_CMD_FREE_PIOBUF 0x90
+#define MC_CMD_0x90_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
/* MC_CMD_FREE_PIOBUF_IN msgrequest */
#define MC_CMD_FREE_PIOBUF_IN_LEN 4
/* Handle for allocated push I/O buffer. */
@@ -5028,6 +5274,8 @@
*/
#define MC_CMD_GET_VI_TLP_PROCESSING 0xb0
+#define MC_CMD_0xb0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_VI_TLP_PROCESSING_IN msgrequest */
#define MC_CMD_GET_VI_TLP_PROCESSING_IN_LEN 4
/* VI number to get information for. */
@@ -5062,6 +5310,8 @@
*/
#define MC_CMD_SET_VI_TLP_PROCESSING 0xb1
+#define MC_CMD_0xb1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_SET_VI_TLP_PROCESSING_IN msgrequest */
#define MC_CMD_SET_VI_TLP_PROCESSING_IN_LEN 8
/* VI number to set information for. */
@@ -5096,6 +5346,8 @@
*/
#define MC_CMD_GET_TLP_PROCESSING_GLOBALS 0xbc
+#define MC_CMD_0xbc_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN msgrequest */
#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_LEN 4
#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
@@ -5157,6 +5409,8 @@
*/
#define MC_CMD_SET_TLP_PROCESSING_GLOBALS 0xbd
+#define MC_CMD_0xbd_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN msgrequest */
#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_LEN 8
#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
@@ -5203,6 +5457,8 @@
*/
#define MC_CMD_SATELLITE_DOWNLOAD 0x91
+#define MC_CMD_0x91_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_SATELLITE_DOWNLOAD_IN msgrequest: The reset requirements for the CPUs
* are subtle, and so downloads must proceed in a number of phases.
*
@@ -5318,6 +5574,7 @@
*/
#define MC_CMD_GET_CAPABILITIES 0xbe
+#define MC_CMD_0xbe_PRIVILEGE_CTG SRIOV_CTG_GENERAL
/* MC_CMD_GET_CAPABILITIES_IN msgrequest */
#define MC_CMD_GET_CAPABILITIES_IN_LEN 0
@@ -5343,6 +5600,8 @@
#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN 27
#define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN 30
+#define MC_CMD_GET_CAPABILITIES_OUT_EVB_WIDTH 1
/* RxDPCPU firmware id. */
#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_OFST 4
#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_LEN 2
@@ -5433,6 +5692,8 @@
*/
#define MC_CMD_TCM_BUCKET_ALLOC 0xb2
+#define MC_CMD_0xb2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_TCM_BUCKET_ALLOC_IN msgrequest */
#define MC_CMD_TCM_BUCKET_ALLOC_IN_LEN 0
@@ -5448,6 +5709,8 @@
*/
#define MC_CMD_TCM_BUCKET_FREE 0xb3
+#define MC_CMD_0xb3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_TCM_BUCKET_FREE_IN msgrequest */
#define MC_CMD_TCM_BUCKET_FREE_IN_LEN 4
/* the bucket id */
@@ -5463,6 +5726,8 @@
*/
#define MC_CMD_TCM_BUCKET_INIT 0xb4
+#define MC_CMD_0xb4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_TCM_BUCKET_INIT_IN msgrequest */
#define MC_CMD_TCM_BUCKET_INIT_IN_LEN 8
/* the bucket id */
@@ -5480,6 +5745,8 @@
*/
#define MC_CMD_TCM_TXQ_INIT 0xb5
+#define MC_CMD_0xb5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_TCM_TXQ_INIT_IN msgrequest */
#define MC_CMD_TCM_TXQ_INIT_IN_LEN 28
/* the txq id */
@@ -5511,6 +5778,8 @@
*/
#define MC_CMD_LINK_PIOBUF 0x92
+#define MC_CMD_0x92_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
/* MC_CMD_LINK_PIOBUF_IN msgrequest */
#define MC_CMD_LINK_PIOBUF_IN_LEN 8
/* Handle for allocated push I/O buffer. */
@@ -5528,6 +5797,8 @@
*/
#define MC_CMD_UNLINK_PIOBUF 0x93
+#define MC_CMD_0x93_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
/* MC_CMD_UNLINK_PIOBUF_IN msgrequest */
#define MC_CMD_UNLINK_PIOBUF_IN_LEN 4
/* Function Local Instance (VI) number. */
@@ -5543,6 +5814,8 @@
*/
#define MC_CMD_VSWITCH_ALLOC 0x94
+#define MC_CMD_0x94_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_VSWITCH_ALLOC_IN msgrequest */
#define MC_CMD_VSWITCH_ALLOC_IN_LEN 16
/* The port to connect to the v-switch's upstream port. */
@@ -5572,6 +5845,8 @@
*/
#define MC_CMD_VSWITCH_FREE 0x95
+#define MC_CMD_0x95_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_VSWITCH_FREE_IN msgrequest */
#define MC_CMD_VSWITCH_FREE_IN_LEN 4
/* The port to which the v-switch is connected. */
@@ -5587,6 +5862,8 @@
*/
#define MC_CMD_VPORT_ALLOC 0x96
+#define MC_CMD_0x96_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_VPORT_ALLOC_IN msgrequest */
#define MC_CMD_VPORT_ALLOC_IN_LEN 20
/* The port to which the v-switch is connected. */
@@ -5636,6 +5913,8 @@
*/
#define MC_CMD_VPORT_FREE 0x97
+#define MC_CMD_0x97_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_VPORT_FREE_IN msgrequest */
#define MC_CMD_VPORT_FREE_IN_LEN 4
/* The handle of the v-port */
@@ -5651,8 +5930,10 @@
*/
#define MC_CMD_VADAPTOR_ALLOC 0x98
+#define MC_CMD_0x98_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_VADAPTOR_ALLOC_IN msgrequest */
-#define MC_CMD_VADAPTOR_ALLOC_IN_LEN 16
+#define MC_CMD_VADAPTOR_ALLOC_IN_LEN 30
/* The port to connect to the v-adaptor's port. */
#define MC_CMD_VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
/* Flags controlling v-adaptor creation */
@@ -5661,6 +5942,19 @@
#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_WIDTH 1
/* The number of VLAN tags to strip on receive */
#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLANS_OFST 12
+/* The number of VLAN tags to transparently insert/remove. */
+#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLAN_TAGS_OFST 16
+/* The actual VLAN tags to insert/remove */
+#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAGS_OFST 20
+#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_LBN 0
+#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_WIDTH 16
+#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_1_LBN 16
+#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_1_WIDTH 16
+/* The MAC address to assign to this v-adaptor */
+#define MC_CMD_VADAPTOR_ALLOC_IN_MACADDR_OFST 24
+#define MC_CMD_VADAPTOR_ALLOC_IN_MACADDR_LEN 6
+/* enum: Derive the MAC address from the upstream port */
+#define MC_CMD_VADAPTOR_ALLOC_IN_AUTO_MAC 0x0
/* MC_CMD_VADAPTOR_ALLOC_OUT msgresponse */
#define MC_CMD_VADAPTOR_ALLOC_OUT_LEN 0
@@ -5672,6 +5966,8 @@
*/
#define MC_CMD_VADAPTOR_FREE 0x99
+#define MC_CMD_0x99_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_VADAPTOR_FREE_IN msgrequest */
#define MC_CMD_VADAPTOR_FREE_IN_LEN 4
/* The port to which the v-adaptor is connected. */
@@ -5682,11 +5978,53 @@
/***********************************/
+/* MC_CMD_VADAPTOR_SET_MAC
+ * assign a new MAC address to a v-adaptor.
+ */
+#define MC_CMD_VADAPTOR_SET_MAC 0x5d
+
+#define MC_CMD_0x5d_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VADAPTOR_SET_MAC_IN msgrequest */
+#define MC_CMD_VADAPTOR_SET_MAC_IN_LEN 10
+/* The port to which the v-adaptor is connected. */
+#define MC_CMD_VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID_OFST 0
+/* The new MAC address to assign to this v-adaptor */
+#define MC_CMD_VADAPTOR_SET_MAC_IN_MACADDR_OFST 4
+#define MC_CMD_VADAPTOR_SET_MAC_IN_MACADDR_LEN 6
+
+/* MC_CMD_VADAPTOR_SET_MAC_OUT msgresponse */
+#define MC_CMD_VADAPTOR_SET_MAC_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VADAPTOR_GET_MAC
+ * read the MAC address assigned to a v-adaptor.
+ */
+#define MC_CMD_VADAPTOR_GET_MAC 0x5e
+
+#define MC_CMD_0x5e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VADAPTOR_GET_MAC_IN msgrequest */
+#define MC_CMD_VADAPTOR_GET_MAC_IN_LEN 4
+/* The port to which the v-adaptor is connected. */
+#define MC_CMD_VADAPTOR_GET_MAC_IN_UPSTREAM_PORT_ID_OFST 0
+
+/* MC_CMD_VADAPTOR_GET_MAC_OUT msgresponse */
+#define MC_CMD_VADAPTOR_GET_MAC_OUT_LEN 6
+/* The MAC address assigned to this v-adaptor */
+#define MC_CMD_VADAPTOR_GET_MAC_OUT_MACADDR_OFST 0
+#define MC_CMD_VADAPTOR_GET_MAC_OUT_MACADDR_LEN 6
+
+
+/***********************************/
/* MC_CMD_EVB_PORT_ASSIGN
* assign a port to a PCI function.
*/
#define MC_CMD_EVB_PORT_ASSIGN 0x9a
+#define MC_CMD_0x9a_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_EVB_PORT_ASSIGN_IN msgrequest */
#define MC_CMD_EVB_PORT_ASSIGN_IN_LEN 8
/* The port to assign. */
@@ -5708,6 +6046,8 @@
*/
#define MC_CMD_RDWR_A64_REGIONS 0x9b
+#define MC_CMD_0x9b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_RDWR_A64_REGIONS_IN msgrequest */
#define MC_CMD_RDWR_A64_REGIONS_IN_LEN 17
#define MC_CMD_RDWR_A64_REGIONS_IN_REGION0_OFST 0
@@ -5736,6 +6076,8 @@
*/
#define MC_CMD_ONLOAD_STACK_ALLOC 0x9c
+#define MC_CMD_0x9c_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
/* MC_CMD_ONLOAD_STACK_ALLOC_IN msgrequest */
#define MC_CMD_ONLOAD_STACK_ALLOC_IN_LEN 4
/* The handle of the owning upstream port */
@@ -5753,6 +6095,8 @@
*/
#define MC_CMD_ONLOAD_STACK_FREE 0x9d
+#define MC_CMD_0x9d_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
/* MC_CMD_ONLOAD_STACK_FREE_IN msgrequest */
#define MC_CMD_ONLOAD_STACK_FREE_IN_LEN 4
/* The handle of the Onload stack */
@@ -5768,6 +6112,8 @@
*/
#define MC_CMD_RSS_CONTEXT_ALLOC 0x9e
+#define MC_CMD_0x9e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_RSS_CONTEXT_ALLOC_IN msgrequest */
#define MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN 12
/* The handle of the owning upstream port */
@@ -5800,6 +6146,8 @@
*/
#define MC_CMD_RSS_CONTEXT_FREE 0x9f
+#define MC_CMD_0x9f_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_RSS_CONTEXT_FREE_IN msgrequest */
#define MC_CMD_RSS_CONTEXT_FREE_IN_LEN 4
/* The handle of the RSS context */
@@ -5815,6 +6163,8 @@
*/
#define MC_CMD_RSS_CONTEXT_SET_KEY 0xa0
+#define MC_CMD_0xa0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_RSS_CONTEXT_SET_KEY_IN msgrequest */
#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN 44
/* The handle of the RSS context */
@@ -5833,6 +6183,8 @@
*/
#define MC_CMD_RSS_CONTEXT_GET_KEY 0xa1
+#define MC_CMD_0xa1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_RSS_CONTEXT_GET_KEY_IN msgrequest */
#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN 4
/* The handle of the RSS context */
@@ -5851,6 +6203,8 @@
*/
#define MC_CMD_RSS_CONTEXT_SET_TABLE 0xa2
+#define MC_CMD_0xa2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_RSS_CONTEXT_SET_TABLE_IN msgrequest */
#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN 132
/* The handle of the RSS context */
@@ -5869,6 +6223,8 @@
*/
#define MC_CMD_RSS_CONTEXT_GET_TABLE 0xa3
+#define MC_CMD_0xa3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_RSS_CONTEXT_GET_TABLE_IN msgrequest */
#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN 4
/* The handle of the RSS context */
@@ -5887,6 +6243,8 @@
*/
#define MC_CMD_RSS_CONTEXT_SET_FLAGS 0xe1
+#define MC_CMD_0xe1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_RSS_CONTEXT_SET_FLAGS_IN msgrequest */
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN 8
/* The handle of the RSS context */
@@ -5912,6 +6270,8 @@
*/
#define MC_CMD_RSS_CONTEXT_GET_FLAGS 0xe2
+#define MC_CMD_0xe2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_RSS_CONTEXT_GET_FLAGS_IN msgrequest */
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_LEN 4
/* The handle of the RSS context */
@@ -5937,6 +6297,8 @@
*/
#define MC_CMD_DOT1P_MAPPING_ALLOC 0xa4
+#define MC_CMD_0xa4_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_DOT1P_MAPPING_ALLOC_IN msgrequest */
#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_LEN 8
/* The handle of the owning upstream port */
@@ -5959,6 +6321,8 @@
*/
#define MC_CMD_DOT1P_MAPPING_FREE 0xa5
+#define MC_CMD_0xa5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_DOT1P_MAPPING_FREE_IN msgrequest */
#define MC_CMD_DOT1P_MAPPING_FREE_IN_LEN 4
/* The handle of the .1p mapping */
@@ -5974,6 +6338,8 @@
*/
#define MC_CMD_DOT1P_MAPPING_SET_TABLE 0xa6
+#define MC_CMD_0xa6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_DOT1P_MAPPING_SET_TABLE_IN msgrequest */
#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_LEN 36
/* The handle of the .1p mapping */
@@ -5994,6 +6360,8 @@
*/
#define MC_CMD_DOT1P_MAPPING_GET_TABLE 0xa7
+#define MC_CMD_0xa7_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_DOT1P_MAPPING_GET_TABLE_IN msgrequest */
#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_LEN 4
/* The handle of the .1p mapping */
@@ -6014,6 +6382,8 @@
*/
#define MC_CMD_GET_VECTOR_CFG 0xbf
+#define MC_CMD_0xbf_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_VECTOR_CFG_IN msgrequest */
#define MC_CMD_GET_VECTOR_CFG_IN_LEN 0
@@ -6033,6 +6403,8 @@
*/
#define MC_CMD_SET_VECTOR_CFG 0xc0
+#define MC_CMD_0xc0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_SET_VECTOR_CFG_IN msgrequest */
#define MC_CMD_SET_VECTOR_CFG_IN_LEN 12
/* Base absolute interrupt vector number, or MC_CMD_RESOURCE_INSTANCE_ANY to
@@ -6423,6 +6795,8 @@
*/
#define MC_CMD_VPORT_ADD_MAC_ADDRESS 0xa8
+#define MC_CMD_0xa8_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_VPORT_ADD_MAC_ADDRESS_IN msgrequest */
#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN 10
/* The handle of the v-port */
@@ -6441,6 +6815,8 @@
*/
#define MC_CMD_VPORT_DEL_MAC_ADDRESS 0xa9
+#define MC_CMD_0xa9_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_VPORT_DEL_MAC_ADDRESS_IN msgrequest */
#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN 10
/* The handle of the v-port */
@@ -6459,6 +6835,8 @@
*/
#define MC_CMD_VPORT_GET_MAC_ADDRESSES 0xaa
+#define MC_CMD_0xaa_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_VPORT_GET_MAC_ADDRESSES_IN msgrequest */
#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN 4
/* The handle of the v-port */
@@ -6486,6 +6864,8 @@
*/
#define MC_CMD_DUMP_BUFTBL_ENTRIES 0xab
+#define MC_CMD_0xab_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_DUMP_BUFTBL_ENTRIES_IN msgrequest */
#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_LEN 8
/* Index of the first buffer table entry. */
@@ -6510,6 +6890,8 @@
*/
#define MC_CMD_SET_RXDP_CONFIG 0xc1
+#define MC_CMD_0xc1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_SET_RXDP_CONFIG_IN msgrequest */
#define MC_CMD_SET_RXDP_CONFIG_IN_LEN 4
#define MC_CMD_SET_RXDP_CONFIG_IN_DATA_OFST 0
@@ -6526,6 +6908,8 @@
*/
#define MC_CMD_GET_RXDP_CONFIG 0xc2
+#define MC_CMD_0xc2_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_GET_RXDP_CONFIG_IN msgrequest */
#define MC_CMD_GET_RXDP_CONFIG_IN_LEN 0
@@ -6890,6 +7274,8 @@
*/
#define MC_CMD_GET_CLOCK 0xac
+#define MC_CMD_0xac_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_CLOCK_IN msgrequest */
#define MC_CMD_GET_CLOCK_IN_LEN 0
@@ -6907,6 +7293,8 @@
*/
#define MC_CMD_SET_CLOCK 0xad
+#define MC_CMD_0xad_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_SET_CLOCK_IN msgrequest */
#define MC_CMD_SET_CLOCK_IN_LEN 12
/* Requested system frequency in MHz; 0 leaves unchanged. */
@@ -6932,6 +7320,8 @@
*/
#define MC_CMD_DPCPU_RPC 0xae
+#define MC_CMD_0xae_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_DPCPU_RPC_IN msgrequest */
#define MC_CMD_DPCPU_RPC_IN_LEN 36
#define MC_CMD_DPCPU_RPC_IN_CPU_OFST 0
@@ -7016,6 +7406,8 @@
*/
#define MC_CMD_TRIGGER_INTERRUPT 0xe3
+#define MC_CMD_0xe3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_TRIGGER_INTERRUPT_IN msgrequest */
#define MC_CMD_TRIGGER_INTERRUPT_IN_LEN 4
/* Interrupt level relative to base for function. */
@@ -7031,6 +7423,8 @@
*/
#define MC_CMD_CAP_BLK_READ 0xe7
+#define MC_CMD_0xe7_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_CAP_BLK_READ_IN msgrequest */
#define MC_CMD_CAP_BLK_READ_IN_LEN 12
#define MC_CMD_CAP_BLK_READ_IN_CAP_REG_OFST 0
@@ -7055,6 +7449,8 @@
*/
#define MC_CMD_DUMP_DO 0xe8
+#define MC_CMD_0xe8_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_DUMP_DO_IN msgrequest */
#define MC_CMD_DUMP_DO_IN_LEN 52
#define MC_CMD_DUMP_DO_IN_PADDING_OFST 0
@@ -7108,6 +7504,8 @@
*/
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED 0xe9
+#define MC_CMD_0xe9_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN msgrequest */
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_LEN 52
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_ENABLE_OFST 0
@@ -7151,6 +7549,8 @@
*/
#define MC_CMD_SET_PSU 0xea
+#define MC_CMD_0xea_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_SET_PSU_IN msgrequest */
#define MC_CMD_SET_PSU_IN_LEN 12
#define MC_CMD_SET_PSU_IN_PARAM_OFST 0
@@ -7171,6 +7571,8 @@
*/
#define MC_CMD_GET_FUNCTION_INFO 0xec
+#define MC_CMD_0xec_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_FUNCTION_INFO_IN msgrequest */
#define MC_CMD_GET_FUNCTION_INFO_IN_LEN 0
@@ -7188,6 +7590,8 @@
*/
#define MC_CMD_ENABLE_OFFLINE_BIST 0xed
+#define MC_CMD_0xed_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_ENABLE_OFFLINE_BIST_IN msgrequest */
#define MC_CMD_ENABLE_OFFLINE_BIST_IN_LEN 0
@@ -7203,6 +7607,8 @@
*/
#define MC_CMD_UART_SEND_DATA 0xee
+#define MC_CMD_0xee_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_UART_SEND_DATA_OUT msgrequest */
#define MC_CMD_UART_SEND_DATA_OUT_LENMIN 16
#define MC_CMD_UART_SEND_DATA_OUT_LENMAX 252
@@ -7231,6 +7637,8 @@
*/
#define MC_CMD_UART_RECV_DATA 0xef
+#define MC_CMD_0xef_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_UART_RECV_DATA_OUT msgrequest */
#define MC_CMD_UART_RECV_DATA_OUT_LEN 16
/* CRC32 over OFFSET, LENGTH, RESERVED */
@@ -7266,6 +7674,8 @@
*/
#define MC_CMD_READ_FUSES 0xf0
+#define MC_CMD_0xf0_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_READ_FUSES_IN msgrequest */
#define MC_CMD_READ_FUSES_IN_LEN 8
/* Offset in OTP to read */
@@ -7292,6 +7702,8 @@
*/
#define MC_CMD_KR_TUNE 0xf1
+#define MC_CMD_0xf1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_KR_TUNE_IN msgrequest */
#define MC_CMD_KR_TUNE_IN_LENMIN 4
#define MC_CMD_KR_TUNE_IN_LENMAX 252
@@ -7550,6 +7962,8 @@
*/
#define MC_CMD_PCIE_TUNE 0xf2
+#define MC_CMD_0xf2_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_PCIE_TUNE_IN msgrequest */
#define MC_CMD_PCIE_TUNE_IN_LENMIN 4
#define MC_CMD_PCIE_TUNE_IN_LENMAX 252
@@ -7711,6 +8125,8 @@
*/
#define MC_CMD_LICENSING 0xf3
+#define MC_CMD_0xf3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_LICENSING_IN msgrequest */
#define MC_CMD_LICENSING_IN_LEN 4
/* identifies the type of operation requested */
@@ -7756,6 +8172,8 @@
*/
#define MC_CMD_MC2MC_PROXY 0xf4
+#define MC_CMD_0xf4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_MC2MC_PROXY_IN msgrequest */
#define MC_CMD_MC2MC_PROXY_IN_LEN 0
@@ -7771,6 +8189,8 @@
*/
#define MC_CMD_GET_LICENSED_APP_STATE 0xf5
+#define MC_CMD_0xf5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_LICENSED_APP_STATE_IN msgrequest */
#define MC_CMD_GET_LICENSED_APP_STATE_IN_LEN 4
/* application ID to query (LICENSED_APP_ID_xxx) */
@@ -7792,6 +8212,8 @@
*/
#define MC_CMD_LICENSED_APP_OP 0xf6
+#define MC_CMD_0xf6_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_LICENSED_APP_OP_IN msgrequest */
#define MC_CMD_LICENSED_APP_OP_IN_LENMIN 8
#define MC_CMD_LICENSED_APP_OP_IN_LENMAX 252
@@ -7847,6 +8269,8 @@
*/
#define MC_CMD_SET_PORT_SNIFF_CONFIG 0xf7
+#define MC_CMD_0xf7_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_SET_PORT_SNIFF_CONFIG_IN msgrequest */
#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_LEN 16
/* configuration flags */
@@ -7881,6 +8305,8 @@
*/
#define MC_CMD_GET_PORT_SNIFF_CONFIG 0xf8
+#define MC_CMD_0xf8_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_GET_PORT_SNIFF_CONFIG_IN msgrequest */
#define MC_CMD_GET_PORT_SNIFF_CONFIG_IN_LEN 0
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c
index fb19b70eac01..7f295c4d7b80 100644
--- a/drivers/net/ethernet/sfc/mcdi_port.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -865,6 +865,7 @@ int efx_mcdi_set_mac(struct efx_nic *efx)
BUILD_BUG_ON(MC_CMD_SET_MAC_OUT_LEN != 0);
+ /* This has no effect on EF10 */
ether_addr_copy(MCDI_PTR(cmdbytes, SET_MAC_IN_ADDR),
efx->net_dev->dev_addr);
@@ -923,6 +924,7 @@ enum efx_stats_action {
static int efx_mcdi_mac_stats(struct efx_nic *efx,
enum efx_stats_action action, int clear)
{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN);
int rc;
int change = action == EFX_STATS_PULL ? 0 : 1;
@@ -944,9 +946,14 @@ static int efx_mcdi_mac_stats(struct efx_nic *efx,
MAC_STATS_IN_PERIODIC_NOEVENT, 1,
MAC_STATS_IN_PERIOD_MS, period);
MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
-
- rc = efx_mcdi_rpc(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
- NULL, 0, NULL);
+ MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, nic_data->vport_id);
+
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ /* Expect ENOENT if DMA queues have not been set up */
+ if (rc && (rc != -ENOENT || atomic_read(&efx->active_queues)))
+ efx_mcdi_display_error(efx, MC_CMD_MAC_STATS, sizeof(inbuf),
+ NULL, 0, rc);
return rc;
}
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 325dd94bca46..d72f522bf9c3 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -25,6 +25,7 @@
#include <linux/highmem.h>
#include <linux/workqueue.h>
#include <linux/mutex.h>
+#include <linux/rwsem.h>
#include <linux/vmalloc.h>
#include <linux/i2c.h>
#include <linux/mtd/mtd.h>
@@ -793,7 +794,6 @@ union efx_multicast_hash {
efx_oword_t oword[EFX_MCAST_HASH_ENTRIES / sizeof(efx_oword_t) / 8];
};
-struct efx_vf;
struct vfdi_status;
/**
@@ -897,7 +897,8 @@ struct vfdi_status;
* @loopback_mode: Loopback status
* @loopback_modes: Supported loopback mode bitmask
* @loopback_selftest: Offline self-test private state
- * @filter_lock: Filter table lock
+ * @filter_sem: Filter table rw_semaphore, for freeing the table
+ * @filter_lock: Filter table lock, for mere content changes
* @filter_state: Architecture-dependent filter table state
* @rps_flow_id: Flow IDs of filters allocated for accelerated RFS,
* indexed by filter ID
@@ -909,7 +910,6 @@ struct vfdi_status;
* completed (either success or failure). Not used when MCDI is used to
* flush receive queues.
* @flush_wq: wait queue used by efx_nic_flush_queues() to wait for flush completions.
- * @vf: Array of &struct efx_vf objects.
* @vf_count: Number of VFs intended to be enabled.
* @vf_init_count: Number of VFs that have been fully initialised.
* @vi_scale: log2 number of vnics per VF.
@@ -1040,6 +1040,7 @@ struct efx_nic {
void *loopback_selftest;
+ struct rw_semaphore filter_sem;
spinlock_t filter_lock;
void *filter_state;
#ifdef CONFIG_RFS_ACCEL
@@ -1053,7 +1054,6 @@ struct efx_nic {
wait_queue_head_t flush_wq;
#ifdef CONFIG_SFC_SRIOV
- struct efx_vf *vf;
unsigned vf_count;
unsigned vf_init_count;
unsigned vi_scale;
@@ -1092,6 +1092,7 @@ struct efx_mtd_partition {
/**
* struct efx_nic_type - Efx device type definition
+ * @mem_bar: Get the memory BAR
* @mem_map_size: Get memory BAR mapped size
* @probe: Probe the controller
* @remove: Free resources allocated by probe()
@@ -1204,6 +1205,7 @@ struct efx_mtd_partition {
* @ptp_set_ts_config: Set hardware timestamp configuration. The flags
* and tx_type will already have been validated but this operation
* must validate and update rx_filter.
+ * @set_mac_address: Set the MAC address of the device
* @revision: Hardware architecture revision
* @txd_ptr_tbl_base: TX descriptor ring base address
* @rxd_ptr_tbl_base: RX descriptor ring base address
@@ -1226,6 +1228,8 @@ struct efx_mtd_partition {
* @hwtstamp_filters: Mask of hardware timestamp filter types supported
*/
struct efx_nic_type {
+ bool is_vf;
+ unsigned int mem_bar;
unsigned int (*mem_map_size)(struct efx_nic *efx);
int (*probe)(struct efx_nic *efx);
void (*remove)(struct efx_nic *efx);
@@ -1277,7 +1281,8 @@ struct efx_nic_type {
void (*tx_init)(struct efx_tx_queue *tx_queue);
void (*tx_remove)(struct efx_tx_queue *tx_queue);
void (*tx_write)(struct efx_tx_queue *tx_queue);
- void (*rx_push_rss_config)(struct efx_nic *efx);
+ int (*rx_push_rss_config)(struct efx_nic *efx, bool user,
+ const u32 *rx_indir_table);
int (*rx_probe)(struct efx_rx_queue *rx_queue);
void (*rx_init)(struct efx_rx_queue *rx_queue);
void (*rx_remove)(struct efx_rx_queue *rx_queue);
@@ -1330,11 +1335,28 @@ struct efx_nic_type {
int (*ptp_set_ts_sync_events)(struct efx_nic *efx, bool en, bool temp);
int (*ptp_set_ts_config)(struct efx_nic *efx,
struct hwtstamp_config *init);
+ int (*sriov_configure)(struct efx_nic *efx, int num_vfs);
int (*sriov_init)(struct efx_nic *efx);
void (*sriov_fini)(struct efx_nic *efx);
- void (*sriov_mac_address_changed)(struct efx_nic *efx);
bool (*sriov_wanted)(struct efx_nic *efx);
void (*sriov_reset)(struct efx_nic *efx);
+ void (*sriov_flr)(struct efx_nic *efx, unsigned vf_i);
+ int (*sriov_set_vf_mac)(struct efx_nic *efx, int vf_i, u8 *mac);
+ int (*sriov_set_vf_vlan)(struct efx_nic *efx, int vf_i, u16 vlan,
+ u8 qos);
+ int (*sriov_set_vf_spoofchk)(struct efx_nic *efx, int vf_i,
+ bool spoofchk);
+ int (*sriov_get_vf_config)(struct efx_nic *efx, int vf_i,
+ struct ifla_vf_info *ivi);
+ int (*sriov_set_vf_link_state)(struct efx_nic *efx, int vf_i,
+ int link_state);
+ int (*sriov_get_phys_port_id)(struct efx_nic *efx,
+ struct netdev_phys_item_id *ppid);
+ int (*vswitching_probe)(struct efx_nic *efx);
+ int (*vswitching_restore)(struct efx_nic *efx);
+ void (*vswitching_remove)(struct efx_nic *efx);
+ int (*get_mac_address)(struct efx_nic *efx, unsigned char *perm_addr);
+ int (*set_mac_address)(struct efx_nic *efx);
int revision;
unsigned int txd_ptr_tbl_base;
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 93d10cbbd1cf..31ff9084d9a4 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -381,6 +381,7 @@ enum {
* @efx: Pointer back to main interface structure
* @wol_filter_id: Wake-on-LAN packet filter id
* @stats: Hardware statistics
+ * @vf: Array of &struct siena_vf objects
* @vf_buftbl_base: The zeroth buffer table index used to back VF queues.
* @vfdi_status: Common VFDI status page to be dmad to VF address space.
* @local_addr_list: List of local addresses. Protected by %local_lock.
@@ -394,6 +395,7 @@ struct siena_nic_data {
int wol_filter_id;
u64 stats[SIENA_STAT_COUNT];
#ifdef CONFIG_SFC_SRIOV
+ struct siena_vf *vf;
struct efx_channel *vfdi_channel;
unsigned vf_buftbl_base;
struct efx_buffer vfdi_status;
@@ -405,59 +407,77 @@ struct siena_nic_data {
};
enum {
- EF10_STAT_tx_bytes = GENERIC_STAT_COUNT,
- EF10_STAT_tx_packets,
- EF10_STAT_tx_pause,
- EF10_STAT_tx_control,
- EF10_STAT_tx_unicast,
- EF10_STAT_tx_multicast,
- EF10_STAT_tx_broadcast,
- EF10_STAT_tx_lt64,
- EF10_STAT_tx_64,
- EF10_STAT_tx_65_to_127,
- EF10_STAT_tx_128_to_255,
- EF10_STAT_tx_256_to_511,
- EF10_STAT_tx_512_to_1023,
- EF10_STAT_tx_1024_to_15xx,
- EF10_STAT_tx_15xx_to_jumbo,
- EF10_STAT_rx_bytes,
- EF10_STAT_rx_bytes_minus_good_bytes,
- EF10_STAT_rx_good_bytes,
- EF10_STAT_rx_bad_bytes,
- EF10_STAT_rx_packets,
- EF10_STAT_rx_good,
- EF10_STAT_rx_bad,
- EF10_STAT_rx_pause,
- EF10_STAT_rx_control,
+ EF10_STAT_port_tx_bytes = GENERIC_STAT_COUNT,
+ EF10_STAT_port_tx_packets,
+ EF10_STAT_port_tx_pause,
+ EF10_STAT_port_tx_control,
+ EF10_STAT_port_tx_unicast,
+ EF10_STAT_port_tx_multicast,
+ EF10_STAT_port_tx_broadcast,
+ EF10_STAT_port_tx_lt64,
+ EF10_STAT_port_tx_64,
+ EF10_STAT_port_tx_65_to_127,
+ EF10_STAT_port_tx_128_to_255,
+ EF10_STAT_port_tx_256_to_511,
+ EF10_STAT_port_tx_512_to_1023,
+ EF10_STAT_port_tx_1024_to_15xx,
+ EF10_STAT_port_tx_15xx_to_jumbo,
+ EF10_STAT_port_rx_bytes,
+ EF10_STAT_port_rx_bytes_minus_good_bytes,
+ EF10_STAT_port_rx_good_bytes,
+ EF10_STAT_port_rx_bad_bytes,
+ EF10_STAT_port_rx_packets,
+ EF10_STAT_port_rx_good,
+ EF10_STAT_port_rx_bad,
+ EF10_STAT_port_rx_pause,
+ EF10_STAT_port_rx_control,
+ EF10_STAT_port_rx_unicast,
+ EF10_STAT_port_rx_multicast,
+ EF10_STAT_port_rx_broadcast,
+ EF10_STAT_port_rx_lt64,
+ EF10_STAT_port_rx_64,
+ EF10_STAT_port_rx_65_to_127,
+ EF10_STAT_port_rx_128_to_255,
+ EF10_STAT_port_rx_256_to_511,
+ EF10_STAT_port_rx_512_to_1023,
+ EF10_STAT_port_rx_1024_to_15xx,
+ EF10_STAT_port_rx_15xx_to_jumbo,
+ EF10_STAT_port_rx_gtjumbo,
+ EF10_STAT_port_rx_bad_gtjumbo,
+ EF10_STAT_port_rx_overflow,
+ EF10_STAT_port_rx_align_error,
+ EF10_STAT_port_rx_length_error,
+ EF10_STAT_port_rx_nodesc_drops,
+ EF10_STAT_port_rx_pm_trunc_bb_overflow,
+ EF10_STAT_port_rx_pm_discard_bb_overflow,
+ EF10_STAT_port_rx_pm_trunc_vfifo_full,
+ EF10_STAT_port_rx_pm_discard_vfifo_full,
+ EF10_STAT_port_rx_pm_trunc_qbb,
+ EF10_STAT_port_rx_pm_discard_qbb,
+ EF10_STAT_port_rx_pm_discard_mapping,
+ EF10_STAT_port_rx_dp_q_disabled_packets,
+ EF10_STAT_port_rx_dp_di_dropped_packets,
+ EF10_STAT_port_rx_dp_streaming_packets,
+ EF10_STAT_port_rx_dp_hlb_fetch,
+ EF10_STAT_port_rx_dp_hlb_wait,
EF10_STAT_rx_unicast,
+ EF10_STAT_rx_unicast_bytes,
EF10_STAT_rx_multicast,
+ EF10_STAT_rx_multicast_bytes,
EF10_STAT_rx_broadcast,
- EF10_STAT_rx_lt64,
- EF10_STAT_rx_64,
- EF10_STAT_rx_65_to_127,
- EF10_STAT_rx_128_to_255,
- EF10_STAT_rx_256_to_511,
- EF10_STAT_rx_512_to_1023,
- EF10_STAT_rx_1024_to_15xx,
- EF10_STAT_rx_15xx_to_jumbo,
- EF10_STAT_rx_gtjumbo,
- EF10_STAT_rx_bad_gtjumbo,
+ EF10_STAT_rx_broadcast_bytes,
+ EF10_STAT_rx_bad,
+ EF10_STAT_rx_bad_bytes,
EF10_STAT_rx_overflow,
- EF10_STAT_rx_align_error,
- EF10_STAT_rx_length_error,
- EF10_STAT_rx_nodesc_drops,
- EF10_STAT_rx_pm_trunc_bb_overflow,
- EF10_STAT_rx_pm_discard_bb_overflow,
- EF10_STAT_rx_pm_trunc_vfifo_full,
- EF10_STAT_rx_pm_discard_vfifo_full,
- EF10_STAT_rx_pm_trunc_qbb,
- EF10_STAT_rx_pm_discard_qbb,
- EF10_STAT_rx_pm_discard_mapping,
- EF10_STAT_rx_dp_q_disabled_packets,
- EF10_STAT_rx_dp_di_dropped_packets,
- EF10_STAT_rx_dp_streaming_packets,
- EF10_STAT_rx_dp_hlb_fetch,
- EF10_STAT_rx_dp_hlb_wait,
+ EF10_STAT_tx_unicast,
+ EF10_STAT_tx_unicast_bytes,
+ EF10_STAT_tx_multicast,
+ EF10_STAT_tx_multicast_bytes,
+ EF10_STAT_tx_broadcast,
+ EF10_STAT_tx_broadcast_bytes,
+ EF10_STAT_tx_bad,
+ EF10_STAT_tx_bad_bytes,
+ EF10_STAT_tx_overflow,
EF10_STAT_COUNT
};
@@ -483,12 +503,21 @@ enum {
* @must_restore_piobufs: Flag: PIO buffers have yet to be restored after MC
* reboot
* @rx_rss_context: Firmware handle for our RSS context
+ * @rx_rss_context_exclusive: Whether our RSS context is exclusive or shared
* @stats: Hardware statistics
* @workaround_35388: Flag: firmware supports workaround for bug 35388
* @must_check_datapath_caps: Flag: @datapath_caps needs to be revalidated
* after MC reboot
* @datapath_caps: Capabilities of datapath firmware (FLAGS1 field of
* %MC_CMD_GET_CAPABILITIES response)
+ * @rx_dpcpu_fw_id: Firmware ID of the RxDPCPU
+ * @tx_dpcpu_fw_id: Firmware ID of the TxDPCPU
+ * @vport_id: The function's vport ID, only relevant for PFs
+ * @must_probe_vswitching: Flag: vswitching has yet to be setup after MC reboot
+ * @pf_index: The number for this PF, or the parent PF if this is a VF
+#ifdef CONFIG_SFC_SRIOV
+ * @vf: Pointer to VF data structure
+#endif
*/
struct efx_ef10_nic_data {
struct efx_buffer mcdi_buf;
@@ -503,126 +532,27 @@ struct efx_ef10_nic_data {
unsigned int piobuf_handle[EF10_TX_PIOBUF_COUNT];
bool must_restore_piobufs;
u32 rx_rss_context;
+ bool rx_rss_context_exclusive;
u64 stats[EF10_STAT_COUNT];
bool workaround_35388;
bool must_check_datapath_caps;
u32 datapath_caps;
-};
-
-/*
- * On the SFC9000 family each port is associated with 1 PCI physical
- * function (PF) handled by sfc and a configurable number of virtual
- * functions (VFs) that may be handled by some other driver, often in
- * a VM guest. The queue pointer registers are mapped in both PF and
- * VF BARs such that an 8K region provides access to a single RX, TX
- * and event queue (collectively a Virtual Interface, VI or VNIC).
- *
- * The PF has access to all 1024 VIs while VFs are mapped to VIs
- * according to VI_BASE and VI_SCALE: VF i has access to VIs numbered
- * in range [VI_BASE + i << VI_SCALE, VI_BASE + i + 1 << VI_SCALE).
- * The number of VIs and the VI_SCALE value are configurable but must
- * be established at boot time by firmware.
- */
-
-/* Maximum VI_SCALE parameter supported by Siena */
-#define EFX_VI_SCALE_MAX 6
-/* Base VI to use for SR-IOV. Must be aligned to (1 << EFX_VI_SCALE_MAX),
- * so this is the smallest allowed value. */
-#define EFX_VI_BASE 128U
-/* Maximum number of VFs allowed */
-#define EFX_VF_COUNT_MAX 127
-/* Limit EVQs on VFs to be only 8k to reduce buffer table reservation */
-#define EFX_MAX_VF_EVQ_SIZE 8192UL
-/* The number of buffer table entries reserved for each VI on a VF */
-#define EFX_VF_BUFTBL_PER_VI \
- ((EFX_MAX_VF_EVQ_SIZE + 2 * EFX_MAX_DMAQ_SIZE) * \
- sizeof(efx_qword_t) / EFX_BUF_SIZE)
-
+ unsigned int rx_dpcpu_fw_id;
+ unsigned int tx_dpcpu_fw_id;
+ unsigned int vport_id;
+ bool must_probe_vswitching;
+ unsigned int pf_index;
+ u8 port_id[ETH_ALEN];
#ifdef CONFIG_SFC_SRIOV
-
-/* SIENA */
-static inline bool efx_siena_sriov_wanted(struct efx_nic *efx)
-{
- return efx->vf_count != 0;
-}
-
-static inline bool efx_siena_sriov_enabled(struct efx_nic *efx)
-{
- return efx->vf_init_count != 0;
-}
-
-static inline unsigned int efx_vf_size(struct efx_nic *efx)
-{
- return 1 << efx->vi_scale;
-}
+ unsigned int vf_index;
+ struct ef10_vf *vf;
+#endif
+ u8 vport_mac[ETH_ALEN];
+};
int efx_init_sriov(void);
-void efx_siena_sriov_probe(struct efx_nic *efx);
-int efx_siena_sriov_init(struct efx_nic *efx);
-void efx_siena_sriov_mac_address_changed(struct efx_nic *efx);
-void efx_siena_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event);
-void efx_siena_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event);
-void efx_siena_sriov_event(struct efx_channel *channel, efx_qword_t *event);
-void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq);
-void efx_siena_sriov_flr(struct efx_nic *efx, unsigned flr);
-void efx_siena_sriov_reset(struct efx_nic *efx);
-void efx_siena_sriov_fini(struct efx_nic *efx);
void efx_fini_sriov(void);
-/* EF10 */
-static inline bool efx_ef10_sriov_wanted(struct efx_nic *efx) { return false; }
-static inline int efx_ef10_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; }
-static inline void efx_ef10_sriov_mac_address_changed(struct efx_nic *efx) {}
-static inline void efx_ef10_sriov_reset(struct efx_nic *efx) {}
-static inline void efx_ef10_sriov_fini(struct efx_nic *efx) {}
-
-#else
-
-/* SIENA */
-static inline bool efx_siena_sriov_wanted(struct efx_nic *efx) { return false; }
-static inline bool efx_siena_sriov_enabled(struct efx_nic *efx) { return false; }
-static inline unsigned int efx_vf_size(struct efx_nic *efx) { return 0; }
-static inline int efx_init_sriov(void) { return 0; }
-static inline void efx_siena_sriov_probe(struct efx_nic *efx) {}
-static inline int efx_siena_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; }
-static inline void efx_siena_sriov_mac_address_changed(struct efx_nic *efx) {}
-static inline void efx_siena_sriov_tx_flush_done(struct efx_nic *efx,
- efx_qword_t *event) {}
-static inline void efx_siena_sriov_rx_flush_done(struct efx_nic *efx,
- efx_qword_t *event) {}
-static inline void efx_siena_sriov_event(struct efx_channel *channel,
- efx_qword_t *event) {}
-static inline void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx,
- unsigned dmaq) {}
-static inline void efx_siena_sriov_flr(struct efx_nic *efx, unsigned flr) {}
-static inline void efx_siena_sriov_reset(struct efx_nic *efx) {}
-static inline void efx_siena_sriov_fini(struct efx_nic *efx) {}
-static inline void efx_fini_sriov(void) {}
-
-/* EF10 */
-static inline bool efx_ef10_sriov_wanted(struct efx_nic *efx) { return false; }
-static inline int efx_ef10_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; }
-static inline void efx_ef10_sriov_mac_address_changed(struct efx_nic *efx) {}
-static inline void efx_ef10_sriov_reset(struct efx_nic *efx) {}
-static inline void efx_ef10_sriov_fini(struct efx_nic *efx) {}
-
-#endif
-
-/* FALCON */
-static inline bool efx_falcon_sriov_wanted(struct efx_nic *efx) { return false; }
-static inline int efx_falcon_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; }
-static inline void efx_falcon_sriov_mac_address_changed(struct efx_nic *efx) {}
-static inline void efx_falcon_sriov_reset(struct efx_nic *efx) {}
-static inline void efx_falcon_sriov_fini(struct efx_nic *efx) {}
-
-int efx_siena_sriov_set_vf_mac(struct net_device *dev, int vf, u8 *mac);
-int efx_siena_sriov_set_vf_vlan(struct net_device *dev, int vf,
- u16 vlan, u8 qos);
-int efx_siena_sriov_get_vf_config(struct net_device *dev, int vf,
- struct ifla_vf_info *ivf);
-int efx_siena_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf,
- bool spoofchk);
-
struct ethtool_ts_info;
int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel);
void efx_ptp_defer_probe_with_channel(struct efx_nic *efx);
@@ -654,6 +584,7 @@ extern const struct efx_nic_type falcon_a1_nic_type;
extern const struct efx_nic_type falcon_b0_nic_type;
extern const struct efx_nic_type siena_a0_nic_type;
extern const struct efx_nic_type efx_hunt_a0_nic_type;
+extern const struct efx_nic_type efx_hunt_a0_vf_nic_type;
/**************************************************************************
*
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index a2e9aee05cdd..ad62615a93dc 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -306,7 +306,7 @@ struct efx_ptp_data {
struct work_struct pps_work;
struct workqueue_struct *pps_workwq;
bool nic_ts_enabled;
- MCDI_DECLARE_BUF(txbuf, MC_CMD_PTP_IN_TRANSMIT_LENMAX);
+ _MCDI_DECLARE_BUF(txbuf, MC_CMD_PTP_IN_TRANSMIT_LENMAX);
unsigned int good_syncs;
unsigned int fast_syncs;
@@ -389,11 +389,8 @@ size_t efx_ptp_update_stats(struct efx_nic *efx, u64 *stats)
MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), NULL);
- if (rc) {
- netif_err(efx, hw, efx->net_dev,
- "MC_CMD_PTP_OP_STATUS failed (%d)\n", rc);
+ if (rc)
memset(outbuf, 0, sizeof(outbuf));
- }
efx_nic_update_stats(efx_ptp_stat_desc, PTP_STAT_COUNT,
efx_ptp_stat_mask,
stats, _MCDI_PTR(outbuf, 0), false);
@@ -490,14 +487,20 @@ static int efx_ptp_get_attributes(struct efx_nic *efx)
*/
MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_GET_ATTRIBUTES);
MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
- rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
- outbuf, sizeof(outbuf), &out_len);
- if (rc == 0)
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &out_len);
+ if (rc == 0) {
fmt = MCDI_DWORD(outbuf, PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT);
- else if (rc == -EINVAL)
+ } else if (rc == -EINVAL) {
fmt = MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS;
- else
+ } else if (rc == -EPERM) {
+ netif_info(efx, probe, efx->net_dev, "no PTP support\n");
+ return rc;
+ } else {
+ efx_mcdi_display_error(efx, MC_CMD_PTP, sizeof(inbuf),
+ outbuf, sizeof(outbuf), rc);
return rc;
+ }
if (fmt == MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_27FRACTION) {
ptp->ns_to_nic_time = efx_ptp_ns_to_s27;
@@ -541,8 +544,8 @@ static int efx_ptp_get_timestamp_corrections(struct efx_nic *efx)
MC_CMD_PTP_OP_GET_TIMESTAMP_CORRECTIONS);
MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
- rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
- outbuf, sizeof(outbuf), NULL);
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), NULL);
if (rc == 0) {
efx->ptp_data->ts_corrections.tx = MCDI_DWORD(outbuf,
PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT);
@@ -558,6 +561,8 @@ static int efx_ptp_get_timestamp_corrections(struct efx_nic *efx)
efx->ptp_data->ts_corrections.pps_out = 0;
efx->ptp_data->ts_corrections.pps_in = 0;
} else {
+ efx_mcdi_display_error(efx, MC_CMD_PTP, sizeof(inbuf), outbuf,
+ sizeof(outbuf), rc);
return rc;
}
@@ -568,7 +573,7 @@ static int efx_ptp_get_timestamp_corrections(struct efx_nic *efx)
static int efx_ptp_enable(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_ENABLE_LEN);
- MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, 0);
+ MCDI_DECLARE_BUF_ERR(outbuf);
int rc;
MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ENABLE);
@@ -596,7 +601,7 @@ static int efx_ptp_enable(struct efx_nic *efx)
static int efx_ptp_disable(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_DISABLE_LEN);
- MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, 0);
+ MCDI_DECLARE_BUF_ERR(outbuf);
int rc;
MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_DISABLE);
@@ -604,7 +609,12 @@ static int efx_ptp_disable(struct efx_nic *efx)
rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), NULL);
rc = (rc == -EALREADY) ? 0 : rc;
- if (rc)
+ /* If we get ENOSYS, the NIC doesn't support PTP, and thus this function
+ * should only have been called during probe.
+ */
+ if (rc == -ENOSYS || rc == -EPERM)
+ netif_info(efx, probe, efx->net_dev, "no PTP support\n");
+ else if (rc)
efx_mcdi_display_error(efx, MC_CMD_PTP,
MC_CMD_PTP_IN_DISABLE_LEN,
outbuf, sizeof(outbuf), rc);
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index f12c811938d2..b323b9167526 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -25,6 +25,7 @@
#include "mcdi.h"
#include "mcdi_pcol.h"
#include "selftest.h"
+#include "siena_sriov.h"
/* Hardware control for SFC9000 family including SFL9021 (aka Siena). */
@@ -306,7 +307,9 @@ static int siena_probe_nic(struct efx_nic *efx)
if (rc)
goto fail5;
+#ifdef CONFIG_SFC_SRIOV
efx_siena_sriov_probe(efx);
+#endif
efx_ptp_defer_probe_with_channel(efx);
return 0;
@@ -321,7 +324,8 @@ fail1:
return rc;
}
-static void siena_rx_push_rss_config(struct efx_nic *efx)
+static int siena_rx_push_rss_config(struct efx_nic *efx, bool user,
+ const u32 *rx_indir_table)
{
efx_oword_t temp;
@@ -343,7 +347,11 @@ static void siena_rx_push_rss_config(struct efx_nic *efx)
FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8);
efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3);
+ memcpy(efx->rx_indir_table, rx_indir_table,
+ sizeof(efx->rx_indir_table));
efx_farch_rx_push_indir_table(efx);
+
+ return 0;
}
/* This call performs hardware-specific global initialisation, such as
@@ -386,7 +394,7 @@ static int siena_init_nic(struct efx_nic *efx)
EFX_RX_USR_BUF_SIZE >> 5);
efx_writeo(efx, &temp, FR_AZ_RX_CFG);
- siena_rx_push_rss_config(efx);
+ siena_rx_push_rss_config(efx, false, efx->rx_indir_table);
/* Enable event logging */
rc = efx_mcdi_log_ctrl(efx, true, false, 0);
@@ -909,6 +917,8 @@ fail:
*/
const struct efx_nic_type siena_a0_nic_type = {
+ .is_vf = false,
+ .mem_bar = EFX_MEM_BAR,
.mem_map_size = siena_mem_map_size,
.probe = siena_probe_nic,
.remove = siena_remove_nic,
@@ -996,11 +1006,22 @@ const struct efx_nic_type siena_a0_nic_type = {
#endif
.ptp_write_host_time = siena_ptp_write_host_time,
.ptp_set_ts_config = siena_ptp_set_ts_config,
+#ifdef CONFIG_SFC_SRIOV
+ .sriov_configure = efx_siena_sriov_configure,
.sriov_init = efx_siena_sriov_init,
.sriov_fini = efx_siena_sriov_fini,
- .sriov_mac_address_changed = efx_siena_sriov_mac_address_changed,
.sriov_wanted = efx_siena_sriov_wanted,
.sriov_reset = efx_siena_sriov_reset,
+ .sriov_flr = efx_siena_sriov_flr,
+ .sriov_set_vf_mac = efx_siena_sriov_set_vf_mac,
+ .sriov_set_vf_vlan = efx_siena_sriov_set_vf_vlan,
+ .sriov_set_vf_spoofchk = efx_siena_sriov_set_vf_spoofchk,
+ .sriov_get_vf_config = efx_siena_sriov_get_vf_config,
+ .vswitching_probe = efx_port_dummy_op_int,
+ .vswitching_restore = efx_port_dummy_op_int,
+ .vswitching_remove = efx_port_dummy_op_void,
+ .set_mac_address = efx_siena_sriov_mac_address_changed,
+#endif
.revision = EFX_REV_SIENA_A0,
.txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena_sriov.c
index fe83430796fd..da7b94f34604 100644
--- a/drivers/net/ethernet/sfc/siena_sriov.c
+++ b/drivers/net/ethernet/sfc/siena_sriov.c
@@ -16,6 +16,7 @@
#include "filter.h"
#include "mcdi_pcol.h"
#include "farch_regs.h"
+#include "siena_sriov.h"
#include "vfdi.h"
/* Number of longs required to track all the VIs in a VF */
@@ -38,7 +39,7 @@ enum efx_vf_tx_filter_mode {
};
/**
- * struct efx_vf - Back-end resource and protocol state for a PCI VF
+ * struct siena_vf - Back-end resource and protocol state for a PCI VF
* @efx: The Efx NIC owning this VF
* @pci_rid: The PCI requester ID for this VF
* @pci_name: The PCI name (formatted address) of this VF
@@ -83,7 +84,7 @@ enum efx_vf_tx_filter_mode {
* @rxq_retry_count: Number of receive queues in @rxq_retry_mask.
* @reset_work: Work item to schedule a VF reset.
*/
-struct efx_vf {
+struct siena_vf {
struct efx_nic *efx;
unsigned int pci_rid;
char pci_name[13]; /* dddd:bb:dd.f */
@@ -189,7 +190,7 @@ MODULE_PARM_DESC(max_vfs,
*/
static struct workqueue_struct *vfdi_workqueue;
-static unsigned abs_index(struct efx_vf *vf, unsigned index)
+static unsigned abs_index(struct siena_vf *vf, unsigned index)
{
return EFX_VI_BASE + vf->index * efx_vf_size(vf->efx) + index;
}
@@ -207,8 +208,8 @@ static int efx_siena_sriov_cmd(struct efx_nic *efx, bool enable,
MCDI_SET_DWORD(inbuf, SRIOV_IN_VI_BASE, EFX_VI_BASE);
MCDI_SET_DWORD(inbuf, SRIOV_IN_VF_COUNT, efx->vf_count);
- rc = efx_mcdi_rpc(efx, MC_CMD_SRIOV, inbuf, MC_CMD_SRIOV_IN_LEN,
- outbuf, MC_CMD_SRIOV_OUT_LEN, &outlen);
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_SRIOV, inbuf, MC_CMD_SRIOV_IN_LEN,
+ outbuf, MC_CMD_SRIOV_OUT_LEN, &outlen);
if (rc)
return rc;
if (outlen < MC_CMD_SRIOV_OUT_LEN)
@@ -299,7 +300,7 @@ out:
/* The TX filter is entirely controlled by this driver, and is modified
* underneath the feet of the VF
*/
-static void efx_siena_sriov_reset_tx_filter(struct efx_vf *vf)
+static void efx_siena_sriov_reset_tx_filter(struct siena_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct efx_filter_spec filter;
@@ -343,7 +344,7 @@ static void efx_siena_sriov_reset_tx_filter(struct efx_vf *vf)
}
/* The RX filter is managed here on behalf of the VF driver */
-static void efx_siena_sriov_reset_rx_filter(struct efx_vf *vf)
+static void efx_siena_sriov_reset_rx_filter(struct siena_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct efx_filter_spec filter;
@@ -382,7 +383,7 @@ static void efx_siena_sriov_reset_rx_filter(struct efx_vf *vf)
}
}
-static void __efx_siena_sriov_update_vf_addr(struct efx_vf *vf)
+static void __efx_siena_sriov_update_vf_addr(struct siena_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct siena_nic_data *nic_data = efx->nic_data;
@@ -397,7 +398,7 @@ static void __efx_siena_sriov_update_vf_addr(struct efx_vf *vf)
* local_page_list, either by acquiring local_lock or by running from
* efx_siena_sriov_peer_work()
*/
-static void __efx_siena_sriov_push_vf_status(struct efx_vf *vf)
+static void __efx_siena_sriov_push_vf_status(struct siena_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct siena_nic_data *nic_data = efx->nic_data;
@@ -509,8 +510,9 @@ static bool bad_buf_count(unsigned buf_count, unsigned max_entry_count)
* Optionally set VF index and VI index within the VF.
*/
static bool map_vi_index(struct efx_nic *efx, unsigned abs_index,
- struct efx_vf **vf_out, unsigned *rel_index_out)
+ struct siena_vf **vf_out, unsigned *rel_index_out)
{
+ struct siena_nic_data *nic_data = efx->nic_data;
unsigned vf_i;
if (abs_index < EFX_VI_BASE)
@@ -520,13 +522,13 @@ static bool map_vi_index(struct efx_nic *efx, unsigned abs_index,
return true;
if (vf_out)
- *vf_out = efx->vf + vf_i;
+ *vf_out = nic_data->vf + vf_i;
if (rel_index_out)
*rel_index_out = abs_index % efx_vf_size(efx);
return false;
}
-static int efx_vfdi_init_evq(struct efx_vf *vf)
+static int efx_vfdi_init_evq(struct siena_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct vfdi_req *req = vf->buf.addr;
@@ -567,7 +569,7 @@ static int efx_vfdi_init_evq(struct efx_vf *vf)
return VFDI_RC_SUCCESS;
}
-static int efx_vfdi_init_rxq(struct efx_vf *vf)
+static int efx_vfdi_init_rxq(struct siena_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct vfdi_req *req = vf->buf.addr;
@@ -608,7 +610,7 @@ static int efx_vfdi_init_rxq(struct efx_vf *vf)
return VFDI_RC_SUCCESS;
}
-static int efx_vfdi_init_txq(struct efx_vf *vf)
+static int efx_vfdi_init_txq(struct siena_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct vfdi_req *req = vf->buf.addr;
@@ -655,7 +657,7 @@ static int efx_vfdi_init_txq(struct efx_vf *vf)
}
/* Returns true when efx_vfdi_fini_all_queues should wake */
-static bool efx_vfdi_flush_wake(struct efx_vf *vf)
+static bool efx_vfdi_flush_wake(struct siena_vf *vf)
{
/* Ensure that all updates are visible to efx_vfdi_fini_all_queues() */
smp_mb();
@@ -664,7 +666,7 @@ static bool efx_vfdi_flush_wake(struct efx_vf *vf)
atomic_read(&vf->rxq_retry_count);
}
-static void efx_vfdi_flush_clear(struct efx_vf *vf)
+static void efx_vfdi_flush_clear(struct siena_vf *vf)
{
memset(vf->txq_mask, 0, sizeof(vf->txq_mask));
vf->txq_count = 0;
@@ -674,7 +676,7 @@ static void efx_vfdi_flush_clear(struct efx_vf *vf)
atomic_set(&vf->rxq_retry_count, 0);
}
-static int efx_vfdi_fini_all_queues(struct efx_vf *vf)
+static int efx_vfdi_fini_all_queues(struct siena_vf *vf)
{
struct efx_nic *efx = vf->efx;
efx_oword_t reg;
@@ -757,7 +759,7 @@ static int efx_vfdi_fini_all_queues(struct efx_vf *vf)
return timeout ? 0 : VFDI_RC_ETIMEDOUT;
}
-static int efx_vfdi_insert_filter(struct efx_vf *vf)
+static int efx_vfdi_insert_filter(struct siena_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct siena_nic_data *nic_data = efx->nic_data;
@@ -789,7 +791,7 @@ static int efx_vfdi_insert_filter(struct efx_vf *vf)
return VFDI_RC_SUCCESS;
}
-static int efx_vfdi_remove_all_filters(struct efx_vf *vf)
+static int efx_vfdi_remove_all_filters(struct siena_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct siena_nic_data *nic_data = efx->nic_data;
@@ -801,7 +803,7 @@ static int efx_vfdi_remove_all_filters(struct efx_vf *vf)
return VFDI_RC_SUCCESS;
}
-static int efx_vfdi_set_status_page(struct efx_vf *vf)
+static int efx_vfdi_set_status_page(struct siena_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct siena_nic_data *nic_data = efx->nic_data;
@@ -846,7 +848,7 @@ static int efx_vfdi_set_status_page(struct efx_vf *vf)
return VFDI_RC_SUCCESS;
}
-static int efx_vfdi_clear_status_page(struct efx_vf *vf)
+static int efx_vfdi_clear_status_page(struct siena_vf *vf)
{
mutex_lock(&vf->status_lock);
vf->status_addr = 0;
@@ -855,7 +857,7 @@ static int efx_vfdi_clear_status_page(struct efx_vf *vf)
return VFDI_RC_SUCCESS;
}
-typedef int (*efx_vfdi_op_t)(struct efx_vf *vf);
+typedef int (*efx_vfdi_op_t)(struct siena_vf *vf);
static const efx_vfdi_op_t vfdi_ops[VFDI_OP_LIMIT] = {
[VFDI_OP_INIT_EVQ] = efx_vfdi_init_evq,
@@ -870,7 +872,7 @@ static const efx_vfdi_op_t vfdi_ops[VFDI_OP_LIMIT] = {
static void efx_siena_sriov_vfdi(struct work_struct *work)
{
- struct efx_vf *vf = container_of(work, struct efx_vf, req);
+ struct siena_vf *vf = container_of(work, struct siena_vf, req);
struct efx_nic *efx = vf->efx;
struct vfdi_req *req = vf->buf.addr;
struct efx_memcpy_req copy[2];
@@ -936,7 +938,8 @@ static void efx_siena_sriov_vfdi(struct work_struct *work)
* event ring in guest memory with VFDI reset events, then (re-initialise) the
* event queue to raise an interrupt. The guest driver will then recover.
*/
-static void efx_siena_sriov_reset_vf(struct efx_vf *vf,
+
+static void efx_siena_sriov_reset_vf(struct siena_vf *vf,
struct efx_buffer *buffer)
{
struct efx_nic *efx = vf->efx;
@@ -1006,7 +1009,7 @@ static void efx_siena_sriov_reset_vf(struct efx_vf *vf,
static void efx_siena_sriov_reset_vf_work(struct work_struct *work)
{
- struct efx_vf *vf = container_of(work, struct efx_vf, req);
+ struct siena_vf *vf = container_of(work, struct siena_vf, req);
struct efx_nic *efx = vf->efx;
struct efx_buffer buf;
@@ -1055,8 +1058,10 @@ void efx_siena_sriov_probe(struct efx_nic *efx)
if (!max_vfs)
return;
- if (efx_siena_sriov_cmd(efx, false, &efx->vi_scale, &count))
+ if (efx_siena_sriov_cmd(efx, false, &efx->vi_scale, &count)) {
+ netif_info(efx, probe, efx->net_dev, "no SR-IOV VFs probed\n");
return;
+ }
if (count > 0 && count > max_vfs)
count = max_vfs;
@@ -1077,7 +1082,7 @@ static void efx_siena_sriov_peer_work(struct work_struct *data)
peer_work);
struct efx_nic *efx = nic_data->efx;
struct vfdi_status *vfdi_status = nic_data->vfdi_status.addr;
- struct efx_vf *vf;
+ struct siena_vf *vf;
struct efx_local_addr *local_addr;
struct vfdi_endpoint *peer;
struct efx_endpoint_page *epp;
@@ -1099,7 +1104,7 @@ static void efx_siena_sriov_peer_work(struct work_struct *data)
peer_space = ARRAY_SIZE(vfdi_status->peers) - 1;
peer_count = 1;
for (pos = 0; pos < efx->vf_count; ++pos) {
- vf = efx->vf + pos;
+ vf = nic_data->vf + pos;
mutex_lock(&vf->status_lock);
if (vf->rx_filtering && !is_zero_ether_addr(vf->addr.mac_addr)) {
@@ -1155,7 +1160,7 @@ static void efx_siena_sriov_peer_work(struct work_struct *data)
/* Finally, push the pages */
for (pos = 0; pos < efx->vf_count; ++pos) {
- vf = efx->vf + pos;
+ vf = nic_data->vf + pos;
mutex_lock(&vf->status_lock);
if (vf->status_addr)
@@ -1190,14 +1195,16 @@ static void efx_siena_sriov_free_local(struct efx_nic *efx)
static int efx_siena_sriov_vf_alloc(struct efx_nic *efx)
{
unsigned index;
- struct efx_vf *vf;
+ struct siena_vf *vf;
+ struct siena_nic_data *nic_data = efx->nic_data;
- efx->vf = kzalloc(sizeof(struct efx_vf) * efx->vf_count, GFP_KERNEL);
- if (!efx->vf)
+ nic_data->vf = kcalloc(efx->vf_count, sizeof(*nic_data->vf),
+ GFP_KERNEL);
+ if (!nic_data->vf)
return -ENOMEM;
for (index = 0; index < efx->vf_count; ++index) {
- vf = efx->vf + index;
+ vf = nic_data->vf + index;
vf->efx = efx;
vf->index = index;
@@ -1216,11 +1223,12 @@ static int efx_siena_sriov_vf_alloc(struct efx_nic *efx)
static void efx_siena_sriov_vfs_fini(struct efx_nic *efx)
{
- struct efx_vf *vf;
+ struct siena_nic_data *nic_data = efx->nic_data;
+ struct siena_vf *vf;
unsigned int pos;
for (pos = 0; pos < efx->vf_count; ++pos) {
- vf = efx->vf + pos;
+ vf = nic_data->vf + pos;
efx_nic_free_buffer(efx, &vf->buf);
kfree(vf->peer_page_addrs);
@@ -1237,7 +1245,7 @@ static int efx_siena_sriov_vfs_init(struct efx_nic *efx)
struct siena_nic_data *nic_data = efx->nic_data;
unsigned index, devfn, sriov, buftbl_base;
u16 offset, stride;
- struct efx_vf *vf;
+ struct siena_vf *vf;
int rc;
sriov = pci_find_ext_capability(pci_dev, PCI_EXT_CAP_ID_SRIOV);
@@ -1250,7 +1258,7 @@ static int efx_siena_sriov_vfs_init(struct efx_nic *efx)
buftbl_base = nic_data->vf_buftbl_base;
devfn = pci_dev->devfn + offset;
for (index = 0; index < efx->vf_count; ++index) {
- vf = efx->vf + index;
+ vf = nic_data->vf + index;
/* Reserve buffer entries */
vf->buftbl_base = buftbl_base;
@@ -1350,7 +1358,7 @@ fail_pci:
fail_vfs:
cancel_work_sync(&nic_data->peer_work);
efx_siena_sriov_free_local(efx);
- kfree(efx->vf);
+ kfree(nic_data->vf);
fail_alloc:
efx_nic_free_buffer(efx, &nic_data->vfdi_status);
fail_status:
@@ -1361,7 +1369,7 @@ fail_cmd:
void efx_siena_sriov_fini(struct efx_nic *efx)
{
- struct efx_vf *vf;
+ struct siena_vf *vf;
unsigned int pos;
struct siena_nic_data *nic_data = efx->nic_data;
@@ -1377,7 +1385,7 @@ void efx_siena_sriov_fini(struct efx_nic *efx)
/* Flush all reconfiguration work */
for (pos = 0; pos < efx->vf_count; ++pos) {
- vf = efx->vf + pos;
+ vf = nic_data->vf + pos;
cancel_work_sync(&vf->req);
cancel_work_sync(&vf->reset_work);
}
@@ -1388,7 +1396,7 @@ void efx_siena_sriov_fini(struct efx_nic *efx)
/* Tear down back-end state */
efx_siena_sriov_vfs_fini(efx);
efx_siena_sriov_free_local(efx);
- kfree(efx->vf);
+ kfree(nic_data->vf);
efx_nic_free_buffer(efx, &nic_data->vfdi_status);
efx_siena_sriov_cmd(efx, false, NULL, NULL);
}
@@ -1396,7 +1404,7 @@ void efx_siena_sriov_fini(struct efx_nic *efx)
void efx_siena_sriov_event(struct efx_channel *channel, efx_qword_t *event)
{
struct efx_nic *efx = channel->efx;
- struct efx_vf *vf;
+ struct siena_vf *vf;
unsigned qid, seq, type, data;
qid = EFX_QWORD_FIELD(*event, FSF_CZ_USER_QID);
@@ -1452,11 +1460,12 @@ error:
void efx_siena_sriov_flr(struct efx_nic *efx, unsigned vf_i)
{
- struct efx_vf *vf;
+ struct siena_nic_data *nic_data = efx->nic_data;
+ struct siena_vf *vf;
if (vf_i > efx->vf_init_count)
return;
- vf = efx->vf + vf_i;
+ vf = nic_data->vf + vf_i;
netif_info(efx, hw, efx->net_dev,
"FLR on VF %s\n", vf->pci_name);
@@ -1467,21 +1476,23 @@ void efx_siena_sriov_flr(struct efx_nic *efx, unsigned vf_i)
vf->evq0_count = 0;
}
-void efx_siena_sriov_mac_address_changed(struct efx_nic *efx)
+int efx_siena_sriov_mac_address_changed(struct efx_nic *efx)
{
struct siena_nic_data *nic_data = efx->nic_data;
struct vfdi_status *vfdi_status = nic_data->vfdi_status.addr;
if (!efx->vf_init_count)
- return;
+ return 0;
ether_addr_copy(vfdi_status->peers[0].mac_addr,
efx->net_dev->dev_addr);
queue_work(vfdi_workqueue, &nic_data->peer_work);
+
+ return 0;
}
void efx_siena_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
{
- struct efx_vf *vf;
+ struct siena_vf *vf;
unsigned queue, qid;
queue = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
@@ -1500,7 +1511,7 @@ void efx_siena_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
void efx_siena_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event)
{
- struct efx_vf *vf;
+ struct siena_vf *vf;
unsigned ev_failed, queue, qid;
queue = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
@@ -1525,7 +1536,7 @@ void efx_siena_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event)
/* Called from napi. Schedule the reset work item */
void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq)
{
- struct efx_vf *vf;
+ struct siena_vf *vf;
unsigned int rel;
if (map_vi_index(efx, dmaq, &vf, &rel))
@@ -1541,9 +1552,10 @@ void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq)
/* Reset all VFs */
void efx_siena_sriov_reset(struct efx_nic *efx)
{
+ struct siena_nic_data *nic_data = efx->nic_data;
unsigned int vf_i;
struct efx_buffer buf;
- struct efx_vf *vf;
+ struct siena_vf *vf;
ASSERT_RTNL();
@@ -1557,7 +1569,7 @@ void efx_siena_sriov_reset(struct efx_nic *efx)
return;
for (vf_i = 0; vf_i < efx->vf_init_count; ++vf_i) {
- vf = efx->vf + vf_i;
+ vf = nic_data->vf + vf_i;
efx_siena_sriov_reset_vf(vf, &buf);
}
@@ -1573,7 +1585,6 @@ int efx_init_sriov(void)
vfdi_workqueue = create_singlethread_workqueue("sfc_vfdi");
if (!vfdi_workqueue)
return -ENOMEM;
-
return 0;
}
@@ -1582,14 +1593,14 @@ void efx_fini_sriov(void)
destroy_workqueue(vfdi_workqueue);
}
-int efx_siena_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac)
+int efx_siena_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, u8 *mac)
{
- struct efx_nic *efx = netdev_priv(net_dev);
- struct efx_vf *vf;
+ struct siena_nic_data *nic_data = efx->nic_data;
+ struct siena_vf *vf;
if (vf_i >= efx->vf_init_count)
return -EINVAL;
- vf = efx->vf + vf_i;
+ vf = nic_data->vf + vf_i;
mutex_lock(&vf->status_lock);
ether_addr_copy(vf->addr.mac_addr, mac);
@@ -1599,16 +1610,16 @@ int efx_siena_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac)
return 0;
}
-int efx_siena_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i,
+int efx_siena_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i,
u16 vlan, u8 qos)
{
- struct efx_nic *efx = netdev_priv(net_dev);
- struct efx_vf *vf;
+ struct siena_nic_data *nic_data = efx->nic_data;
+ struct siena_vf *vf;
u16 tci;
if (vf_i >= efx->vf_init_count)
return -EINVAL;
- vf = efx->vf + vf_i;
+ vf = nic_data->vf + vf_i;
mutex_lock(&vf->status_lock);
tci = (vlan & VLAN_VID_MASK) | ((qos & 0x7) << VLAN_PRIO_SHIFT);
@@ -1619,16 +1630,16 @@ int efx_siena_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i,
return 0;
}
-int efx_siena_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i,
+int efx_siena_sriov_set_vf_spoofchk(struct efx_nic *efx, int vf_i,
bool spoofchk)
{
- struct efx_nic *efx = netdev_priv(net_dev);
- struct efx_vf *vf;
+ struct siena_nic_data *nic_data = efx->nic_data;
+ struct siena_vf *vf;
int rc;
if (vf_i >= efx->vf_init_count)
return -EINVAL;
- vf = efx->vf + vf_i;
+ vf = nic_data->vf + vf_i;
mutex_lock(&vf->txq_lock);
if (vf->txq_count == 0) {
@@ -1643,16 +1654,16 @@ int efx_siena_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i,
return rc;
}
-int efx_siena_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
+int efx_siena_sriov_get_vf_config(struct efx_nic *efx, int vf_i,
struct ifla_vf_info *ivi)
{
- struct efx_nic *efx = netdev_priv(net_dev);
- struct efx_vf *vf;
+ struct siena_nic_data *nic_data = efx->nic_data;
+ struct siena_vf *vf;
u16 tci;
if (vf_i >= efx->vf_init_count)
return -EINVAL;
- vf = efx->vf + vf_i;
+ vf = nic_data->vf + vf_i;
ivi->vf = vf_i;
ether_addr_copy(ivi->mac, vf->addr.mac_addr);
@@ -1666,3 +1677,12 @@ int efx_siena_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
return 0;
}
+bool efx_siena_sriov_wanted(struct efx_nic *efx)
+{
+ return efx->vf_count != 0;
+}
+
+int efx_siena_sriov_configure(struct efx_nic *efx, int num_vfs)
+{
+ return 0;
+}
diff --git a/drivers/net/ethernet/sfc/siena_sriov.h b/drivers/net/ethernet/sfc/siena_sriov.h
new file mode 100644
index 000000000000..d88d4dab170a
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena_sriov.h
@@ -0,0 +1,79 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2015 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef SIENA_SRIOV_H
+#define SIENA_SRIOV_H
+
+#include "net_driver.h"
+
+/* On the SFC9000 family each port is associated with 1 PCI physical
+ * function (PF) handled by sfc and a configurable number of virtual
+ * functions (VFs) that may be handled by some other driver, often in
+ * a VM guest. The queue pointer registers are mapped in both PF and
+ * VF BARs such that an 8K region provides access to a single RX, TX
+ * and event queue (collectively a Virtual Interface, VI or VNIC).
+ *
+ * The PF has access to all 1024 VIs while VFs are mapped to VIs
+ * according to VI_BASE and VI_SCALE: VF i has access to VIs numbered
+ * in range [VI_BASE + i << VI_SCALE, VI_BASE + i + 1 << VI_SCALE).
+ * The number of VIs and the VI_SCALE value are configurable but must
+ * be established at boot time by firmware.
+ */
+
+/* Maximum VI_SCALE parameter supported by Siena */
+#define EFX_VI_SCALE_MAX 6
+/* Base VI to use for SR-IOV. Must be aligned to (1 << EFX_VI_SCALE_MAX),
+ * so this is the smallest allowed value.
+ */
+#define EFX_VI_BASE 128U
+/* Maximum number of VFs allowed */
+#define EFX_VF_COUNT_MAX 127
+/* Limit EVQs on VFs to be only 8k to reduce buffer table reservation */
+#define EFX_MAX_VF_EVQ_SIZE 8192UL
+/* The number of buffer table entries reserved for each VI on a VF */
+#define EFX_VF_BUFTBL_PER_VI \
+ ((EFX_MAX_VF_EVQ_SIZE + 2 * EFX_MAX_DMAQ_SIZE) * \
+ sizeof(efx_qword_t) / EFX_BUF_SIZE)
+
+int efx_siena_sriov_configure(struct efx_nic *efx, int num_vfs);
+int efx_siena_sriov_init(struct efx_nic *efx);
+void efx_siena_sriov_fini(struct efx_nic *efx);
+int efx_siena_sriov_mac_address_changed(struct efx_nic *efx);
+bool efx_siena_sriov_wanted(struct efx_nic *efx);
+void efx_siena_sriov_reset(struct efx_nic *efx);
+void efx_siena_sriov_flr(struct efx_nic *efx, unsigned flr);
+
+int efx_siena_sriov_set_vf_mac(struct efx_nic *efx, int vf, u8 *mac);
+int efx_siena_sriov_set_vf_vlan(struct efx_nic *efx, int vf,
+ u16 vlan, u8 qos);
+int efx_siena_sriov_set_vf_spoofchk(struct efx_nic *efx, int vf,
+ bool spoofchk);
+int efx_siena_sriov_get_vf_config(struct efx_nic *efx, int vf,
+ struct ifla_vf_info *ivf);
+
+#ifdef CONFIG_SFC_SRIOV
+
+static inline bool efx_siena_sriov_enabled(struct efx_nic *efx)
+{
+ return efx->vf_init_count != 0;
+}
+#else /* !CONFIG_SFC_SRIOV */
+static inline bool efx_siena_sriov_enabled(struct efx_nic *efx)
+{
+ return false;
+}
+#endif /* CONFIG_SFC_SRIOV */
+
+void efx_siena_sriov_probe(struct efx_nic *efx);
+void efx_siena_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event);
+void efx_siena_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event);
+void efx_siena_sriov_event(struct efx_channel *channel, efx_qword_t *event);
+void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq);
+
+#endif /* SIENA_SRIOV_H */
diff --git a/drivers/net/ethernet/sfc/sriov.c b/drivers/net/ethernet/sfc/sriov.c
new file mode 100644
index 000000000000..816c44689e67
--- /dev/null
+++ b/drivers/net/ethernet/sfc/sriov.c
@@ -0,0 +1,83 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2014-2015 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+#include <linux/module.h>
+#include "net_driver.h"
+#include "nic.h"
+#include "sriov.h"
+
+int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ if (efx->type->sriov_set_vf_mac)
+ return efx->type->sriov_set_vf_mac(efx, vf_i, mac);
+ else
+ return -EOPNOTSUPP;
+}
+
+int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i, u16 vlan,
+ u8 qos)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ if (efx->type->sriov_set_vf_vlan) {
+ if ((vlan & ~VLAN_VID_MASK) ||
+ (qos & ~(VLAN_PRIO_MASK >> VLAN_PRIO_SHIFT)))
+ return -EINVAL;
+
+ return efx->type->sriov_set_vf_vlan(efx, vf_i, vlan, qos);
+ } else {
+ return -EOPNOTSUPP;
+ }
+}
+
+int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i,
+ bool spoofchk)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ if (efx->type->sriov_set_vf_spoofchk)
+ return efx->type->sriov_set_vf_spoofchk(efx, vf_i, spoofchk);
+ else
+ return -EOPNOTSUPP;
+}
+
+int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
+ struct ifla_vf_info *ivi)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ if (efx->type->sriov_get_vf_config)
+ return efx->type->sriov_get_vf_config(efx, vf_i, ivi);
+ else
+ return -EOPNOTSUPP;
+}
+
+int efx_sriov_set_vf_link_state(struct net_device *net_dev, int vf_i,
+ int link_state)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ if (efx->type->sriov_set_vf_link_state)
+ return efx->type->sriov_set_vf_link_state(efx, vf_i,
+ link_state);
+ else
+ return -EOPNOTSUPP;
+}
+
+int efx_sriov_get_phys_port_id(struct net_device *net_dev,
+ struct netdev_phys_item_id *ppid)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ if (efx->type->sriov_get_phys_port_id)
+ return efx->type->sriov_get_phys_port_id(efx, ppid);
+ else
+ return -EOPNOTSUPP;
+}
diff --git a/drivers/net/ethernet/sfc/sriov.h b/drivers/net/ethernet/sfc/sriov.h
new file mode 100644
index 000000000000..400df526586d
--- /dev/null
+++ b/drivers/net/ethernet/sfc/sriov.h
@@ -0,0 +1,31 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2014-2015 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_SRIOV_H
+#define EFX_SRIOV_H
+
+#include "net_driver.h"
+
+#ifdef CONFIG_SFC_SRIOV
+
+int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac);
+int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i, u16 vlan,
+ u8 qos);
+int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i,
+ bool spoofchk);
+int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
+ struct ifla_vf_info *ivi);
+int efx_sriov_set_vf_link_state(struct net_device *net_dev, int vf_i,
+ int link_state);
+int efx_sriov_get_phys_port_id(struct net_device *net_dev,
+ struct netdev_phys_item_id *ppid);
+
+#endif /* CONFIG_SFC_SRIOV */
+
+#endif /* EFX_SRIOV_H */