summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/broadcom
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/broadcom')
-rw-r--r--drivers/net/ethernet/broadcom/b44.c7
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c4
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c334
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h80
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c108
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-bcma.c8
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-platform.c31
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c44
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h22
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c9
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c17
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c199
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/Makefile2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c959
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h194
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c178
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h23
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h724
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c20
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c240
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h19
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c2
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c6
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c9
25 files changed, 2418 insertions, 823 deletions
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 48707ed76ffc..5b95bb48ce97 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -902,7 +902,7 @@ static int b44_poll(struct napi_struct *napi, int budget)
}
if (work_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
b44_enable_ints(bp);
}
@@ -1674,8 +1674,8 @@ static int b44_close(struct net_device *dev)
return 0;
}
-static struct rtnl_link_stats64 *b44_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *nstat)
+static void b44_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *nstat)
{
struct b44 *bp = netdev_priv(dev);
struct b44_hw_stats *hwstat = &bp->hw_stats;
@@ -1718,7 +1718,6 @@ static struct rtnl_link_stats64 *b44_get_stats64(struct net_device *dev,
#endif
} while (u64_stats_fetch_retry_irq(&hwstat->syncp, start));
- return nstat;
}
static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index c483618b57bd..50d88d3e03b6 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -511,7 +511,7 @@ static int bcm_enet_poll(struct napi_struct *napi, int budget)
/* no more packet in rx/tx queue, remove device from poll
* queue */
- napi_complete(napi);
+ napi_complete_done(napi, rx_work_done);
/* restore rx/tx interrupt */
enet_dmac_writel(priv, priv->dma_chan_int_mask,
@@ -817,7 +817,7 @@ static void bcm_enet_adjust_phy_link(struct net_device *dev)
rx_pause_en = 1;
tx_pause_en = 1;
} else if (!priv->pause_auto) {
- /* pause setting overrided by user */
+ /* pause setting overridden by user */
rx_pause_en = priv->pause_rx;
tx_pause_en = priv->pause_tx;
} else {
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 744ed6ddaf37..a68d4889f5db 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -43,14 +43,43 @@ static inline void name##_writel(struct bcm_sysport_priv *priv, \
BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET);
BCM_SYSPORT_IO_MACRO(intrl2_1, SYS_PORT_INTRL2_1_OFFSET);
BCM_SYSPORT_IO_MACRO(umac, SYS_PORT_UMAC_OFFSET);
+BCM_SYSPORT_IO_MACRO(gib, SYS_PORT_GIB_OFFSET);
BCM_SYSPORT_IO_MACRO(tdma, SYS_PORT_TDMA_OFFSET);
-BCM_SYSPORT_IO_MACRO(rdma, SYS_PORT_RDMA_OFFSET);
BCM_SYSPORT_IO_MACRO(rxchk, SYS_PORT_RXCHK_OFFSET);
BCM_SYSPORT_IO_MACRO(txchk, SYS_PORT_TXCHK_OFFSET);
BCM_SYSPORT_IO_MACRO(rbuf, SYS_PORT_RBUF_OFFSET);
BCM_SYSPORT_IO_MACRO(tbuf, SYS_PORT_TBUF_OFFSET);
BCM_SYSPORT_IO_MACRO(topctrl, SYS_PORT_TOPCTRL_OFFSET);
+/* On SYSTEMPORT Lite, any register after RDMA_STATUS has the exact
+ * same layout, except it has been moved by 4 bytes up, *sigh*
+ */
+static inline u32 rdma_readl(struct bcm_sysport_priv *priv, u32 off)
+{
+ if (priv->is_lite && off >= RDMA_STATUS)
+ off += 4;
+ return __raw_readl(priv->base + SYS_PORT_RDMA_OFFSET + off);
+}
+
+static inline void rdma_writel(struct bcm_sysport_priv *priv, u32 val, u32 off)
+{
+ if (priv->is_lite && off >= RDMA_STATUS)
+ off += 4;
+ __raw_writel(val, priv->base + SYS_PORT_RDMA_OFFSET + off);
+}
+
+static inline u32 tdma_control_bit(struct bcm_sysport_priv *priv, u32 bit)
+{
+ if (!priv->is_lite) {
+ return BIT(bit);
+ } else {
+ if (bit >= ACB_ALGO)
+ return BIT(bit + 1);
+ else
+ return BIT(bit);
+ }
+}
+
/* L2-interrupt masking/unmasking helpers, does automatic saving of the applied
* mask in a software copy to avoid CPU_MASK_STATUS reads in hot-paths.
*/
@@ -143,9 +172,9 @@ static int bcm_sysport_set_tx_csum(struct net_device *dev,
priv->tsb_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
reg = tdma_readl(priv, TDMA_CONTROL);
if (priv->tsb_en)
- reg |= TSB_EN;
+ reg |= tdma_control_bit(priv, TSB_EN);
else
- reg &= ~TSB_EN;
+ reg &= ~tdma_control_bit(priv, TSB_EN);
tdma_writel(priv, reg, TDMA_CONTROL);
return 0;
@@ -281,11 +310,35 @@ static void bcm_sysport_set_msglvl(struct net_device *dev, u32 enable)
priv->msg_enable = enable;
}
+static inline bool bcm_sysport_lite_stat_valid(enum bcm_sysport_stat_type type)
+{
+ switch (type) {
+ case BCM_SYSPORT_STAT_NETDEV:
+ case BCM_SYSPORT_STAT_RXCHK:
+ case BCM_SYSPORT_STAT_RBUF:
+ case BCM_SYSPORT_STAT_SOFT:
+ return true;
+ default:
+ return false;
+ }
+}
+
static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set)
{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ const struct bcm_sysport_stats *s;
+ unsigned int i, j;
+
switch (string_set) {
case ETH_SS_STATS:
- return BCM_SYSPORT_STATS_LEN;
+ for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
+ s = &bcm_sysport_gstrings_stats[i];
+ if (priv->is_lite &&
+ !bcm_sysport_lite_stat_valid(s->type))
+ continue;
+ j++;
+ }
+ return j;
default:
return -EOPNOTSUPP;
}
@@ -294,14 +347,21 @@ static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set)
static void bcm_sysport_get_strings(struct net_device *dev,
u32 stringset, u8 *data)
{
- int i;
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ const struct bcm_sysport_stats *s;
+ int i, j;
switch (stringset) {
case ETH_SS_STATS:
- for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
- memcpy(data + i * ETH_GSTRING_LEN,
- bcm_sysport_gstrings_stats[i].stat_string,
+ for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
+ s = &bcm_sysport_gstrings_stats[i];
+ if (priv->is_lite &&
+ !bcm_sysport_lite_stat_valid(s->type))
+ continue;
+
+ memcpy(data + j * ETH_GSTRING_LEN, s->stat_string,
ETH_GSTRING_LEN);
+ j++;
}
break;
default:
@@ -327,6 +387,9 @@ static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv)
case BCM_SYSPORT_STAT_MIB_RX:
case BCM_SYSPORT_STAT_MIB_TX:
case BCM_SYSPORT_STAT_RUNT:
+ if (priv->is_lite)
+ continue;
+
if (s->type != BCM_SYSPORT_STAT_MIB_RX)
offset = UMAC_MIB_STAT_OFFSET;
val = umac_readl(priv, UMAC_MIB_START + j + offset);
@@ -355,12 +418,12 @@ static void bcm_sysport_get_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
struct bcm_sysport_priv *priv = netdev_priv(dev);
- int i;
+ int i, j;
if (netif_running(dev))
bcm_sysport_update_mib_counters(priv);
- for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
+ for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
const struct bcm_sysport_stats *s;
char *p;
@@ -370,7 +433,8 @@ static void bcm_sysport_get_stats(struct net_device *dev,
else
p = (char *)priv;
p += s->stat_offset;
- data[i] = *(unsigned long *)p;
+ data[j] = *(unsigned long *)p;
+ j++;
}
}
@@ -573,8 +637,14 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
u16 len, status;
struct bcm_rsb *rsb;
- /* Determine how much we should process since last call */
- p_index = rdma_readl(priv, RDMA_PROD_INDEX);
+ /* Determine how much we should process since last call, SYSTEMPORT Lite
+ * groups the producer and consumer indexes into the same 32-bit
+ * which we access using RDMA_CONS_INDEX
+ */
+ if (!priv->is_lite)
+ p_index = rdma_readl(priv, RDMA_PROD_INDEX);
+ else
+ p_index = rdma_readl(priv, RDMA_CONS_INDEX);
p_index &= RDMA_PROD_INDEX_MASK;
if (p_index < priv->rx_c_index)
@@ -791,7 +861,11 @@ static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
if (work_done == 0) {
napi_complete(napi);
/* re-enable TX interrupt */
- intrl2_1_mask_clear(ring->priv, BIT(ring->index));
+ if (!ring->priv->is_lite)
+ intrl2_1_mask_clear(ring->priv, BIT(ring->index));
+ else
+ intrl2_0_mask_clear(ring->priv, BIT(ring->index +
+ INTRL2_0_TDMA_MBDONE_SHIFT));
return 0;
}
@@ -817,7 +891,15 @@ static int bcm_sysport_poll(struct napi_struct *napi, int budget)
priv->rx_c_index += work_done;
priv->rx_c_index &= RDMA_CONS_INDEX_MASK;
- rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX);
+
+ /* SYSTEMPORT Lite groups the producer/consumer index, producer is
+ * maintained by HW, but writes to it will be ignore while RDMA
+ * is active
+ */
+ if (!priv->is_lite)
+ rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX);
+ else
+ rdma_writel(priv, priv->rx_c_index << 16, RDMA_CONS_INDEX);
if (work_done < budget) {
napi_complete_done(napi, work_done);
@@ -848,6 +930,8 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct bcm_sysport_priv *priv = netdev_priv(dev);
+ struct bcm_sysport_tx_ring *txr;
+ unsigned int ring, ring_bit;
priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
@@ -877,6 +961,22 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
bcm_sysport_resume_from_wol(priv);
}
+ if (!priv->is_lite)
+ goto out;
+
+ for (ring = 0; ring < dev->num_tx_queues; ring++) {
+ ring_bit = BIT(ring + INTRL2_0_TDMA_MBDONE_SHIFT);
+ if (!(priv->irq0_stat & ring_bit))
+ continue;
+
+ txr = &priv->tx_rings[ring];
+
+ if (likely(napi_schedule_prep(&txr->napi))) {
+ intrl2_0_mask_set(priv, ring_bit);
+ __napi_schedule(&txr->napi);
+ }
+ }
+out:
return IRQ_HANDLED;
}
@@ -930,9 +1030,11 @@ static void bcm_sysport_poll_controller(struct net_device *dev)
bcm_sysport_rx_isr(priv->irq0, priv);
enable_irq(priv->irq0);
- disable_irq(priv->irq1);
- bcm_sysport_tx_isr(priv->irq1, priv);
- enable_irq(priv->irq1);
+ if (!priv->is_lite) {
+ disable_irq(priv->irq1);
+ bcm_sysport_tx_isr(priv->irq1, priv);
+ enable_irq(priv->irq1);
+ }
}
#endif
@@ -1129,6 +1231,9 @@ static void bcm_sysport_adj_link(struct net_device *dev)
priv->old_duplex = phydev->duplex;
}
+ if (priv->is_lite)
+ goto out;
+
switch (phydev->speed) {
case SPEED_2500:
cmd_bits = CMD_SPEED_2500;
@@ -1169,8 +1274,9 @@ static void bcm_sysport_adj_link(struct net_device *dev)
reg |= cmd_bits;
umac_writel(priv, reg, UMAC_CMD);
}
-
- phy_print_status(phydev);
+out:
+ if (changed)
+ phy_print_status(phydev);
}
static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
@@ -1315,9 +1421,9 @@ static inline int tdma_enable_set(struct bcm_sysport_priv *priv,
reg = tdma_readl(priv, TDMA_CONTROL);
if (enable)
- reg |= TDMA_EN;
+ reg |= tdma_control_bit(priv, TDMA_EN);
else
- reg &= ~TDMA_EN;
+ reg &= ~tdma_control_bit(priv, TDMA_EN);
tdma_writel(priv, reg, TDMA_CONTROL);
/* Poll for TMDA disabling completion */
@@ -1342,7 +1448,7 @@ static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv)
int i;
/* Initialize SW view of the RX ring */
- priv->num_rx_bds = NUM_RX_DESC;
+ priv->num_rx_bds = priv->num_rx_desc_words / WORDS_PER_DESC;
priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET;
priv->rx_c_index = 0;
priv->rx_read_ptr = 0;
@@ -1379,7 +1485,7 @@ static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv)
rdma_writel(priv, 0, RDMA_START_ADDR_HI);
rdma_writel(priv, 0, RDMA_START_ADDR_LO);
rdma_writel(priv, 0, RDMA_END_ADDR_HI);
- rdma_writel(priv, NUM_HW_RX_DESC_WORDS - 1, RDMA_END_ADDR_LO);
+ rdma_writel(priv, priv->num_rx_desc_words - 1, RDMA_END_ADDR_LO);
rdma_writel(priv, 1, RDMA_MBDONE_INTR);
@@ -1421,6 +1527,9 @@ static void bcm_sysport_set_rx_mode(struct net_device *dev)
struct bcm_sysport_priv *priv = netdev_priv(dev);
u32 reg;
+ if (priv->is_lite)
+ return;
+
reg = umac_readl(priv, UMAC_CMD);
if (dev->flags & IFF_PROMISC)
reg |= CMD_PROMISC;
@@ -1438,12 +1547,21 @@ static inline void umac_enable_set(struct bcm_sysport_priv *priv,
{
u32 reg;
- reg = umac_readl(priv, UMAC_CMD);
- if (enable)
- reg |= mask;
- else
- reg &= ~mask;
- umac_writel(priv, reg, UMAC_CMD);
+ if (!priv->is_lite) {
+ reg = umac_readl(priv, UMAC_CMD);
+ if (enable)
+ reg |= mask;
+ else
+ reg &= ~mask;
+ umac_writel(priv, reg, UMAC_CMD);
+ } else {
+ reg = gib_readl(priv, GIB_CONTROL);
+ if (enable)
+ reg |= mask;
+ else
+ reg &= ~mask;
+ gib_writel(priv, reg, GIB_CONTROL);
+ }
/* UniMAC stops on a packet boundary, wait for a full-sized packet
* to be processed (1 msec).
@@ -1456,6 +1574,9 @@ static inline void umac_reset(struct bcm_sysport_priv *priv)
{
u32 reg;
+ if (priv->is_lite)
+ return;
+
reg = umac_readl(priv, UMAC_CMD);
reg |= CMD_SW_RESET;
umac_writel(priv, reg, UMAC_CMD);
@@ -1468,9 +1589,17 @@ static inline void umac_reset(struct bcm_sysport_priv *priv)
static void umac_set_hw_addr(struct bcm_sysport_priv *priv,
unsigned char *addr)
{
- umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) |
- (addr[2] << 8) | addr[3], UMAC_MAC0);
- umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1);
+ u32 mac0 = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) |
+ addr[3];
+ u32 mac1 = (addr[4] << 8) | addr[5];
+
+ if (!priv->is_lite) {
+ umac_writel(priv, mac0, UMAC_MAC0);
+ umac_writel(priv, mac1, UMAC_MAC1);
+ } else {
+ gib_writel(priv, mac0, GIB_MAC0);
+ gib_writel(priv, mac1, GIB_MAC1);
+ }
}
static void topctrl_flush(struct bcm_sysport_priv *priv)
@@ -1515,8 +1644,11 @@ static void bcm_sysport_netif_start(struct net_device *dev)
phy_start(dev->phydev);
- /* Enable TX interrupts for the 32 TXQs */
- intrl2_1_mask_clear(priv, 0xffffffff);
+ /* Enable TX interrupts for the TXQs */
+ if (!priv->is_lite)
+ intrl2_1_mask_clear(priv, 0xffffffff);
+ else
+ intrl2_0_mask_clear(priv, INTRL2_0_TDMA_MBDONE_MASK);
/* Last call before we start the real business */
netif_tx_start_all_queues(dev);
@@ -1528,9 +1660,37 @@ static void rbuf_init(struct bcm_sysport_priv *priv)
reg = rbuf_readl(priv, RBUF_CONTROL);
reg |= RBUF_4B_ALGN | RBUF_RSB_EN;
+ /* Set a correct RSB format on SYSTEMPORT Lite */
+ if (priv->is_lite) {
+ reg &= ~RBUF_RSB_SWAP1;
+ reg |= RBUF_RSB_SWAP0;
+ }
rbuf_writel(priv, reg, RBUF_CONTROL);
}
+static inline void bcm_sysport_mask_all_intrs(struct bcm_sysport_priv *priv)
+{
+ intrl2_0_mask_set(priv, 0xffffffff);
+ intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
+ if (!priv->is_lite) {
+ intrl2_1_mask_set(priv, 0xffffffff);
+ intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
+ }
+}
+
+static inline void gib_set_pad_extension(struct bcm_sysport_priv *priv)
+{
+ u32 __maybe_unused reg;
+
+ /* Include Broadcom tag in pad extension */
+ if (netdev_uses_dsa(priv->netdev)) {
+ reg = gib_readl(priv, GIB_CONTROL);
+ reg &= ~(GIB_PAD_EXTENSION_MASK << GIB_PAD_EXTENSION_SHIFT);
+ reg |= ENET_BRCM_TAG_LEN << GIB_PAD_EXTENSION_SHIFT;
+ gib_writel(priv, reg, GIB_CONTROL);
+ }
+}
+
static int bcm_sysport_open(struct net_device *dev)
{
struct bcm_sysport_priv *priv = netdev_priv(dev);
@@ -1551,13 +1711,20 @@ static int bcm_sysport_open(struct net_device *dev)
rbuf_init(priv);
/* Set maximum frame length */
- umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
+ if (!priv->is_lite)
+ umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
+ else
+ gib_set_pad_extension(priv);
/* Set MAC address */
umac_set_hw_addr(priv, dev->dev_addr);
/* Read CRC forward */
- priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD);
+ if (!priv->is_lite)
+ priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD);
+ else
+ priv->crc_fwd = !!(gib_readl(priv, GIB_CONTROL) &
+ GIB_FCS_STRIP);
phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link,
0, priv->phy_interface);
@@ -1572,12 +1739,7 @@ static int bcm_sysport_open(struct net_device *dev)
priv->old_pause = -1;
/* mask all interrupts and request them */
- intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
- intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
- intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
- intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
- intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
- intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
+ bcm_sysport_mask_all_intrs(priv);
ret = request_irq(priv->irq0, bcm_sysport_rx_isr, 0, dev->name, dev);
if (ret) {
@@ -1585,10 +1747,13 @@ static int bcm_sysport_open(struct net_device *dev)
goto out_phy_disconnect;
}
- ret = request_irq(priv->irq1, bcm_sysport_tx_isr, 0, dev->name, dev);
- if (ret) {
- netdev_err(dev, "failed to request TX interrupt\n");
- goto out_free_irq0;
+ if (!priv->is_lite) {
+ ret = request_irq(priv->irq1, bcm_sysport_tx_isr, 0,
+ dev->name, dev);
+ if (ret) {
+ netdev_err(dev, "failed to request TX interrupt\n");
+ goto out_free_irq0;
+ }
}
/* Initialize both hardware and software ring */
@@ -1635,7 +1800,8 @@ out_free_rx_ring:
out_free_tx_ring:
for (i = 0; i < dev->num_tx_queues; i++)
bcm_sysport_fini_tx_ring(priv, i);
- free_irq(priv->irq1, dev);
+ if (!priv->is_lite)
+ free_irq(priv->irq1, dev);
out_free_irq0:
free_irq(priv->irq0, dev);
out_phy_disconnect:
@@ -1653,10 +1819,7 @@ static void bcm_sysport_netif_stop(struct net_device *dev)
phy_stop(dev->phydev);
/* mask all interrupts */
- intrl2_0_mask_set(priv, 0xffffffff);
- intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
- intrl2_1_mask_set(priv, 0xffffffff);
- intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
+ bcm_sysport_mask_all_intrs(priv);
}
static int bcm_sysport_stop(struct net_device *dev)
@@ -1694,7 +1857,8 @@ static int bcm_sysport_stop(struct net_device *dev)
bcm_sysport_fini_rx_ring(priv);
free_irq(priv->irq0, dev);
- free_irq(priv->irq1, dev);
+ if (!priv->is_lite)
+ free_irq(priv->irq1, dev);
/* Disconnect from PHY */
phy_disconnect(dev->phydev);
@@ -1733,8 +1897,32 @@ static const struct net_device_ops bcm_sysport_netdev_ops = {
#define REV_FMT "v%2x.%02x"
+static const struct bcm_sysport_hw_params bcm_sysport_params[] = {
+ [SYSTEMPORT] = {
+ .is_lite = false,
+ .num_rx_desc_words = SP_NUM_HW_RX_DESC_WORDS,
+ },
+ [SYSTEMPORT_LITE] = {
+ .is_lite = true,
+ .num_rx_desc_words = SP_LT_NUM_HW_RX_DESC_WORDS,
+ },
+};
+
+static const struct of_device_id bcm_sysport_of_match[] = {
+ { .compatible = "brcm,systemportlite-v1.00",
+ .data = &bcm_sysport_params[SYSTEMPORT_LITE] },
+ { .compatible = "brcm,systemport-v1.00",
+ .data = &bcm_sysport_params[SYSTEMPORT] },
+ { .compatible = "brcm,systemport",
+ .data = &bcm_sysport_params[SYSTEMPORT] },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, bcm_sysport_of_match);
+
static int bcm_sysport_probe(struct platform_device *pdev)
{
+ const struct bcm_sysport_hw_params *params;
+ const struct of_device_id *of_id = NULL;
struct bcm_sysport_priv *priv;
struct device_node *dn;
struct net_device *dev;
@@ -1745,6 +1933,12 @@ static int bcm_sysport_probe(struct platform_device *pdev)
dn = pdev->dev.of_node;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ of_id = of_match_node(bcm_sysport_of_match, dn);
+ if (!of_id || !of_id->data)
+ return -EINVAL;
+
+ /* Fairly quickly we need to know the type of adapter we have */
+ params = of_id->data;
/* Read the Transmit/Receive Queue properties */
if (of_property_read_u32(dn, "systemport,num-txq", &txq))
@@ -1752,6 +1946,10 @@ static int bcm_sysport_probe(struct platform_device *pdev)
if (of_property_read_u32(dn, "systemport,num-rxq", &rxq))
rxq = 1;
+ /* Sanity check the number of transmit queues */
+ if (!txq || txq > TDMA_NUM_RINGS)
+ return -EINVAL;
+
dev = alloc_etherdev_mqs(sizeof(*priv), txq, rxq);
if (!dev)
return -ENOMEM;
@@ -1759,10 +1957,21 @@ static int bcm_sysport_probe(struct platform_device *pdev)
/* Initialize private members */
priv = netdev_priv(dev);
+ /* Allocate number of TX rings */
+ priv->tx_rings = devm_kcalloc(&pdev->dev, txq,
+ sizeof(struct bcm_sysport_tx_ring),
+ GFP_KERNEL);
+ if (!priv->tx_rings)
+ return -ENOMEM;
+
+ priv->is_lite = params->is_lite;
+ priv->num_rx_desc_words = params->num_rx_desc_words;
+
priv->irq0 = platform_get_irq(pdev, 0);
- priv->irq1 = platform_get_irq(pdev, 1);
+ if (!priv->is_lite)
+ priv->irq1 = platform_get_irq(pdev, 1);
priv->wol_irq = platform_get_irq(pdev, 2);
- if (priv->irq0 <= 0 || priv->irq1 <= 0) {
+ if (priv->irq0 <= 0 || (priv->irq1 <= 0 && !priv->is_lite)) {
dev_err(&pdev->dev, "invalid interrupts\n");
ret = -EINVAL;
goto err_free_netdev;
@@ -1836,8 +2045,9 @@ static int bcm_sysport_probe(struct platform_device *pdev)
priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
dev_info(&pdev->dev,
- "Broadcom SYSTEMPORT" REV_FMT
+ "Broadcom SYSTEMPORT%s" REV_FMT
" at 0x%p (irqs: %d, %d, TXQs: %d, RXQs: %d)\n",
+ priv->is_lite ? " Lite" : "",
(priv->rev >> 8) & 0xff, priv->rev & 0xff,
priv->base, priv->irq0, priv->irq1, txq, rxq);
@@ -2033,7 +2243,10 @@ static int bcm_sysport_resume(struct device *d)
rbuf_init(priv);
/* Set maximum frame length */
- umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
+ if (!priv->is_lite)
+ umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
+ else
+ gib_set_pad_extension(priv);
/* Set MAC address */
umac_set_hw_addr(priv, dev->dev_addr);
@@ -2069,13 +2282,6 @@ out_free_tx_rings:
static SIMPLE_DEV_PM_OPS(bcm_sysport_pm_ops,
bcm_sysport_suspend, bcm_sysport_resume);
-static const struct of_device_id bcm_sysport_of_match[] = {
- { .compatible = "brcm,systemport-v1.00" },
- { .compatible = "brcm,systemport" },
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, bcm_sysport_of_match);
-
static struct platform_driver bcm_sysport_driver = {
.probe = bcm_sysport_probe,
.remove = bcm_sysport_remove,
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index 1c82e3da69a7..863ddd7870b7 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -127,6 +127,10 @@ struct bcm_rsb {
#define INTRL2_0_DESC_ALLOC_ERR (1 << 10)
#define INTRL2_0_UNEXP_PKTSIZE_ACK (1 << 11)
+/* SYSTEMPORT Lite groups the TX queues interrupts on instance 0 */
+#define INTRL2_0_TDMA_MBDONE_SHIFT 12
+#define INTRL2_0_TDMA_MBDONE_MASK (0xffff << INTRL2_0_TDMA_MBDONE_SHIFT)
+
/* RXCHK offset and defines */
#define SYS_PORT_RXCHK_OFFSET 0x300
@@ -176,7 +180,9 @@ struct bcm_rsb {
#define RBUF_OK_TO_SEND_MASK 0xff
#define RBUF_CRC_REPLACE (1 << 20)
#define RBUF_OK_TO_SEND_MODE (1 << 21)
-#define RBUF_RSB_SWAP (1 << 22)
+/* SYSTEMPORT Lite uses two bits here */
+#define RBUF_RSB_SWAP0 (1 << 22)
+#define RBUF_RSB_SWAP1 (1 << 23)
#define RBUF_ACPI_EN (1 << 23)
#define RBUF_PKT_RDY_THRESH 0x04
@@ -247,6 +253,7 @@ struct bcm_rsb {
#define MIB_RUNT_CNT_RST (1 << 1)
#define MIB_TX_CNT_RST (1 << 2)
+/* These offsets are valid for SYSTEMPORT and SYSTEMPORT Lite */
#define UMAC_MPD_CTRL 0x620
#define MPD_EN (1 << 0)
#define MSEQ_LEN_SHIFT 16
@@ -258,6 +265,34 @@ struct bcm_rsb {
#define UMAC_MDF_CTRL 0x650
#define UMAC_MDF_ADDR 0x654
+/* Only valid on SYSTEMPORT Lite */
+#define SYS_PORT_GIB_OFFSET 0x1000
+
+#define GIB_CONTROL 0x00
+#define GIB_TX_EN (1 << 0)
+#define GIB_RX_EN (1 << 1)
+#define GIB_TX_FLUSH (1 << 2)
+#define GIB_RX_FLUSH (1 << 3)
+#define GIB_GTX_CLK_SEL_SHIFT 4
+#define GIB_GTX_CLK_EXT_CLK (0 << GIB_GTX_CLK_SEL_SHIFT)
+#define GIB_GTX_CLK_125MHZ (1 << GIB_GTX_CLK_SEL_SHIFT)
+#define GIB_GTX_CLK_250MHZ (2 << GIB_GTX_CLK_SEL_SHIFT)
+#define GIB_FCS_STRIP (1 << 6)
+#define GIB_LCL_LOOP_EN (1 << 7)
+#define GIB_LCL_LOOP_TXEN (1 << 8)
+#define GIB_RMT_LOOP_EN (1 << 9)
+#define GIB_RMT_LOOP_RXEN (1 << 10)
+#define GIB_RX_PAUSE_EN (1 << 11)
+#define GIB_PREAMBLE_LEN_SHIFT 12
+#define GIB_PREAMBLE_LEN_MASK 0xf
+#define GIB_IPG_LEN_SHIFT 16
+#define GIB_IPG_LEN_MASK 0x3f
+#define GIB_PAD_EXTENSION_SHIFT 22
+#define GIB_PAD_EXTENSION_MASK 0x3f
+
+#define GIB_MAC1 0x08
+#define GIB_MAC0 0x0c
+
/* Receive DMA offset and defines */
#define SYS_PORT_RDMA_OFFSET 0x2000
@@ -409,16 +444,19 @@ struct bcm_rsb {
RING_PCP_DEI_VID)
#define TDMA_CONTROL 0x600
-#define TDMA_EN (1 << 0)
-#define TSB_EN (1 << 1)
-#define TSB_SWAP (1 << 2)
-#define ACB_ALGO (1 << 3)
+#define TDMA_EN 0
+#define TSB_EN 1
+/* Uses 2 bits on SYSTEMPORT Lite and shifts everything by 1 bit, we
+ * keep the SYSTEMPORT layout here and adjust with tdma_control_bit()
+ */
+#define TSB_SWAP 2
+#define ACB_ALGO 3
#define BUF_DATA_OFFSET_SHIFT 4
#define BUF_DATA_OFFSET_MASK 0x3ff
-#define VLAN_EN (1 << 14)
-#define SW_BRCM_TAG (1 << 15)
-#define WNC_KPT_SIZE_UPDATE (1 << 16)
-#define SYNC_PKT_SIZE (1 << 17)
+#define VLAN_EN 14
+#define SW_BRCM_TAG 15
+#define WNC_KPT_SIZE_UPDATE 16
+#define SYNC_PKT_SIZE 17
#define ACH_TXDONE_DELAY_SHIFT 18
#define ACH_TXDONE_DELAY_MASK 0xff
@@ -475,12 +513,12 @@ struct dma_desc {
};
/* Number of Receive hardware descriptor words */
-#define NUM_HW_RX_DESC_WORDS 1024
-/* Real number of usable descriptors */
-#define NUM_RX_DESC (NUM_HW_RX_DESC_WORDS / WORDS_PER_DESC)
+#define SP_NUM_HW_RX_DESC_WORDS 1024
+#define SP_LT_NUM_HW_RX_DESC_WORDS 256
-/* Internal linked-list RAM has up to 1536 entries */
-#define NUM_TX_DESC 1536
+/* Internal linked-list RAM size */
+#define SP_NUM_TX_DESC 1536
+#define SP_LT_NUM_TX_DESC 256
#define WORDS_PER_DESC (sizeof(struct dma_desc) / sizeof(u32))
@@ -627,6 +665,16 @@ struct bcm_sysport_cb {
DEFINE_DMA_UNMAP_LEN(dma_len);
};
+enum bcm_sysport_type {
+ SYSTEMPORT = 0,
+ SYSTEMPORT_LITE,
+};
+
+struct bcm_sysport_hw_params {
+ bool is_lite;
+ unsigned int num_rx_desc_words;
+};
+
/* Software view of the TX ring */
struct bcm_sysport_tx_ring {
spinlock_t lock; /* Ring lock for tx reclaim/xmit */
@@ -651,6 +699,8 @@ struct bcm_sysport_priv {
u32 irq0_mask;
u32 irq1_stat;
u32 irq1_mask;
+ bool is_lite;
+ unsigned int num_rx_desc_words;
struct napi_struct napi ____cacheline_aligned;
struct net_device *netdev;
struct platform_device *pdev;
@@ -659,7 +709,7 @@ struct bcm_sysport_priv {
int wol_irq;
/* Transmit rings */
- struct bcm_sysport_tx_ring tx_rings[TDMA_NUM_RINGS];
+ struct bcm_sysport_tx_ring *tx_rings;
/* Receive queue */
void __iomem *rx_bds;
diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c b/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c
index 7c19c8e2bf91..6ce80cbcb48e 100644
--- a/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c
+++ b/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c
@@ -12,11 +12,6 @@
#include <linux/brcmphy.h>
#include "bgmac.h"
-struct bcma_mdio {
- struct bcma_device *core;
- u8 phyaddr;
-};
-
static bool bcma_mdio_wait_value(struct bcma_device *core, u16 reg, u32 mask,
u32 value, int timeout)
{
@@ -37,7 +32,7 @@ static bool bcma_mdio_wait_value(struct bcma_device *core, u16 reg, u32 mask,
* PHY ops
**************************************************/
-static u16 bcma_mdio_phy_read(struct bcma_mdio *bcma_mdio, u8 phyaddr, u8 reg)
+static u16 bcma_mdio_phy_read(struct bgmac *bgmac, u8 phyaddr, u8 reg)
{
struct bcma_device *core;
u16 phy_access_addr;
@@ -56,12 +51,12 @@ static u16 bcma_mdio_phy_read(struct bcma_mdio *bcma_mdio, u8 phyaddr, u8 reg)
BUILD_BUG_ON(BGMAC_PC_MCT_SHIFT != BCMA_GMAC_CMN_PC_MCT_SHIFT);
BUILD_BUG_ON(BGMAC_PC_MTE != BCMA_GMAC_CMN_PC_MTE);
- if (bcma_mdio->core->id.id == BCMA_CORE_4706_MAC_GBIT) {
- core = bcma_mdio->core->bus->drv_gmac_cmn.core;
+ if (bgmac->bcma.core->id.id == BCMA_CORE_4706_MAC_GBIT) {
+ core = bgmac->bcma.core->bus->drv_gmac_cmn.core;
phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS;
phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL;
} else {
- core = bcma_mdio->core;
+ core = bgmac->bcma.core;
phy_access_addr = BGMAC_PHY_ACCESS;
phy_ctl_addr = BGMAC_PHY_CNTL;
}
@@ -87,7 +82,7 @@ static u16 bcma_mdio_phy_read(struct bcma_mdio *bcma_mdio, u8 phyaddr, u8 reg)
}
/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphywr */
-static int bcma_mdio_phy_write(struct bcma_mdio *bcma_mdio, u8 phyaddr, u8 reg,
+static int bcma_mdio_phy_write(struct bgmac *bgmac, u8 phyaddr, u8 reg,
u16 value)
{
struct bcma_device *core;
@@ -95,12 +90,12 @@ static int bcma_mdio_phy_write(struct bcma_mdio *bcma_mdio, u8 phyaddr, u8 reg,
u16 phy_ctl_addr;
u32 tmp;
- if (bcma_mdio->core->id.id == BCMA_CORE_4706_MAC_GBIT) {
- core = bcma_mdio->core->bus->drv_gmac_cmn.core;
+ if (bgmac->bcma.core->id.id == BCMA_CORE_4706_MAC_GBIT) {
+ core = bgmac->bcma.core->bus->drv_gmac_cmn.core;
phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS;
phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL;
} else {
- core = bcma_mdio->core;
+ core = bgmac->bcma.core;
phy_access_addr = BGMAC_PHY_ACCESS;
phy_ctl_addr = BGMAC_PHY_CNTL;
}
@@ -110,8 +105,8 @@ static int bcma_mdio_phy_write(struct bcma_mdio *bcma_mdio, u8 phyaddr, u8 reg,
tmp |= phyaddr;
bcma_write32(core, phy_ctl_addr, tmp);
- bcma_write32(bcma_mdio->core, BGMAC_INT_STATUS, BGMAC_IS_MDIO);
- if (bcma_read32(bcma_mdio->core, BGMAC_INT_STATUS) & BGMAC_IS_MDIO)
+ bcma_write32(bgmac->bcma.core, BGMAC_INT_STATUS, BGMAC_IS_MDIO);
+ if (bcma_read32(bgmac->bcma.core, BGMAC_INT_STATUS) & BGMAC_IS_MDIO)
dev_warn(&core->dev, "Error setting MDIO int\n");
tmp = BGMAC_PA_START;
@@ -132,57 +127,67 @@ static int bcma_mdio_phy_write(struct bcma_mdio *bcma_mdio, u8 phyaddr, u8 reg,
}
/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyinit */
-static void bcma_mdio_phy_init(struct bcma_mdio *bcma_mdio)
+static void bcma_mdio_phy_init(struct bgmac *bgmac)
{
- struct bcma_chipinfo *ci = &bcma_mdio->core->bus->chipinfo;
+ struct bcma_chipinfo *ci = &bgmac->bcma.core->bus->chipinfo;
u8 i;
+ /* For some legacy hardware we do chipset-based PHY initialization here
+ * without even detecting PHY ID. It's hacky and should be cleaned as
+ * soon as someone can test it.
+ */
if (ci->id == BCMA_CHIP_ID_BCM5356) {
for (i = 0; i < 5; i++) {
- bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x008b);
- bcma_mdio_phy_write(bcma_mdio, i, 0x15, 0x0100);
- bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000f);
- bcma_mdio_phy_write(bcma_mdio, i, 0x12, 0x2aaa);
- bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000b);
+ bcma_mdio_phy_write(bgmac, i, 0x1f, 0x008b);
+ bcma_mdio_phy_write(bgmac, i, 0x15, 0x0100);
+ bcma_mdio_phy_write(bgmac, i, 0x1f, 0x000f);
+ bcma_mdio_phy_write(bgmac, i, 0x12, 0x2aaa);
+ bcma_mdio_phy_write(bgmac, i, 0x1f, 0x000b);
}
+ return;
}
if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg != 10) ||
(ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg != 10) ||
(ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg != 9)) {
- struct bcma_drv_cc *cc = &bcma_mdio->core->bus->drv_cc;
+ struct bcma_drv_cc *cc = &bgmac->bcma.core->bus->drv_cc;
bcma_chipco_chipctl_maskset(cc, 2, ~0xc0000000, 0);
bcma_chipco_chipctl_maskset(cc, 4, ~0x80000000, 0);
for (i = 0; i < 5; i++) {
- bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000f);
- bcma_mdio_phy_write(bcma_mdio, i, 0x16, 0x5284);
- bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000b);
- bcma_mdio_phy_write(bcma_mdio, i, 0x17, 0x0010);
- bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000f);
- bcma_mdio_phy_write(bcma_mdio, i, 0x16, 0x5296);
- bcma_mdio_phy_write(bcma_mdio, i, 0x17, 0x1073);
- bcma_mdio_phy_write(bcma_mdio, i, 0x17, 0x9073);
- bcma_mdio_phy_write(bcma_mdio, i, 0x16, 0x52b6);
- bcma_mdio_phy_write(bcma_mdio, i, 0x17, 0x9273);
- bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000b);
+ bcma_mdio_phy_write(bgmac, i, 0x1f, 0x000f);
+ bcma_mdio_phy_write(bgmac, i, 0x16, 0x5284);
+ bcma_mdio_phy_write(bgmac, i, 0x1f, 0x000b);
+ bcma_mdio_phy_write(bgmac, i, 0x17, 0x0010);
+ bcma_mdio_phy_write(bgmac, i, 0x1f, 0x000f);
+ bcma_mdio_phy_write(bgmac, i, 0x16, 0x5296);
+ bcma_mdio_phy_write(bgmac, i, 0x17, 0x1073);
+ bcma_mdio_phy_write(bgmac, i, 0x17, 0x9073);
+ bcma_mdio_phy_write(bgmac, i, 0x16, 0x52b6);
+ bcma_mdio_phy_write(bgmac, i, 0x17, 0x9273);
+ bcma_mdio_phy_write(bgmac, i, 0x1f, 0x000b);
}
+ return;
}
+
+ /* For all other hw do initialization using PHY subsystem. */
+ if (bgmac->net_dev && bgmac->net_dev->phydev)
+ phy_init_hw(bgmac->net_dev->phydev);
}
/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyreset */
static int bcma_mdio_phy_reset(struct mii_bus *bus)
{
- struct bcma_mdio *bcma_mdio = bus->priv;
- u8 phyaddr = bcma_mdio->phyaddr;
+ struct bgmac *bgmac = bus->priv;
+ u8 phyaddr = bgmac->phyaddr;
- if (bcma_mdio->phyaddr == BGMAC_PHY_NOREGS)
+ if (phyaddr == BGMAC_PHY_NOREGS)
return 0;
- bcma_mdio_phy_write(bcma_mdio, phyaddr, MII_BMCR, BMCR_RESET);
+ bcma_mdio_phy_write(bgmac, phyaddr, MII_BMCR, BMCR_RESET);
udelay(100);
- if (bcma_mdio_phy_read(bcma_mdio, phyaddr, MII_BMCR) & BMCR_RESET)
- dev_err(&bcma_mdio->core->dev, "PHY reset failed\n");
- bcma_mdio_phy_init(bcma_mdio);
+ if (bcma_mdio_phy_read(bgmac, phyaddr, MII_BMCR) & BMCR_RESET)
+ dev_err(bgmac->dev, "PHY reset failed\n");
+ bcma_mdio_phy_init(bgmac);
return 0;
}
@@ -202,16 +207,12 @@ static int bcma_mdio_mii_write(struct mii_bus *bus, int mii_id, int regnum,
return bcma_mdio_phy_write(bus->priv, mii_id, regnum, value);
}
-struct mii_bus *bcma_mdio_mii_register(struct bcma_device *core, u8 phyaddr)
+struct mii_bus *bcma_mdio_mii_register(struct bgmac *bgmac)
{
- struct bcma_mdio *bcma_mdio;
+ struct bcma_device *core = bgmac->bcma.core;
struct mii_bus *mii_bus;
int err;
- bcma_mdio = kzalloc(sizeof(*bcma_mdio), GFP_KERNEL);
- if (!bcma_mdio)
- return ERR_PTR(-ENOMEM);
-
mii_bus = mdiobus_alloc();
if (!mii_bus) {
err = -ENOMEM;
@@ -221,15 +222,12 @@ struct mii_bus *bcma_mdio_mii_register(struct bcma_device *core, u8 phyaddr)
mii_bus->name = "bcma_mdio mii bus";
sprintf(mii_bus->id, "%s-%d-%d", "bcma_mdio", core->bus->num,
core->core_unit);
- mii_bus->priv = bcma_mdio;
+ mii_bus->priv = bgmac;
mii_bus->read = bcma_mdio_mii_read;
mii_bus->write = bcma_mdio_mii_write;
mii_bus->reset = bcma_mdio_phy_reset;
mii_bus->parent = &core->dev;
- mii_bus->phy_mask = ~(1 << phyaddr);
-
- bcma_mdio->core = core;
- bcma_mdio->phyaddr = phyaddr;
+ mii_bus->phy_mask = ~(1 << bgmac->phyaddr);
err = mdiobus_register(mii_bus);
if (err) {
@@ -242,23 +240,17 @@ struct mii_bus *bcma_mdio_mii_register(struct bcma_device *core, u8 phyaddr)
err_free_bus:
mdiobus_free(mii_bus);
err:
- kfree(bcma_mdio);
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(bcma_mdio_mii_register);
void bcma_mdio_mii_unregister(struct mii_bus *mii_bus)
{
- struct bcma_mdio *bcma_mdio;
-
if (!mii_bus)
return;
- bcma_mdio = mii_bus->priv;
-
mdiobus_unregister(mii_bus);
mdiobus_free(mii_bus);
- kfree(bcma_mdio);
}
EXPORT_SYMBOL_GPL(bcma_mdio_mii_unregister);
diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c
index 4a4ffc0c4c65..d59cfcc4c4d5 100644
--- a/drivers/net/ethernet/broadcom/bgmac-bcma.c
+++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c
@@ -117,12 +117,11 @@ static int bgmac_probe(struct bcma_device *core)
u8 *mac;
int err;
- bgmac = kzalloc(sizeof(*bgmac), GFP_KERNEL);
+ bgmac = bgmac_alloc(&core->dev);
if (!bgmac)
return -ENOMEM;
bgmac->bcma.core = core;
- bgmac->dev = &core->dev;
bgmac->dma_dev = core->dma_dev;
bgmac->irq = core->irq;
@@ -145,7 +144,7 @@ static int bgmac_probe(struct bcma_device *core)
goto err;
}
- ether_addr_copy(bgmac->mac_addr, mac);
+ ether_addr_copy(bgmac->net_dev->dev_addr, mac);
/* On BCM4706 we need common core to access PHY */
if (core->id.id == BCMA_CORE_4706_MAC_GBIT &&
@@ -178,7 +177,7 @@ static int bgmac_probe(struct bcma_device *core)
if (!bgmac_is_bcm4707_family(core) &&
!(ci->id == BCMA_CHIP_ID_BCM53573 && core->core_unit == 1)) {
- mii_bus = bcma_mdio_mii_register(core, bgmac->phyaddr);
+ mii_bus = bcma_mdio_mii_register(bgmac);
if (IS_ERR(mii_bus)) {
err = PTR_ERR(mii_bus);
goto err;
@@ -307,7 +306,6 @@ static int bgmac_probe(struct bcma_device *core)
err1:
bcma_mdio_mii_unregister(bgmac->mii_bus);
err:
- kfree(bgmac);
bcma_set_drvdata(core, NULL);
return err;
diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c
index 6f736c19872f..da1b8b225eb9 100644
--- a/drivers/net/ethernet/broadcom/bgmac-platform.c
+++ b/drivers/net/ethernet/broadcom/bgmac-platform.c
@@ -51,8 +51,7 @@ static void platform_bgmac_idm_write(struct bgmac *bgmac, u16 offset, u32 value)
static bool platform_bgmac_clk_enabled(struct bgmac *bgmac)
{
- if ((bgmac_idm_read(bgmac, BCMA_IOCTL) &
- (BCMA_IOCTL_CLK | BCMA_IOCTL_FGC)) != BCMA_IOCTL_CLK)
+ if ((bgmac_idm_read(bgmac, BCMA_IOCTL) & BGMAC_CLK_EN) != BGMAC_CLK_EN)
return false;
if (bgmac_idm_read(bgmac, BCMA_RESET_CTL) & BCMA_RESET_CTL_RESET)
return false;
@@ -61,15 +60,25 @@ static bool platform_bgmac_clk_enabled(struct bgmac *bgmac)
static void platform_bgmac_clk_enable(struct bgmac *bgmac, u32 flags)
{
- bgmac_idm_write(bgmac, BCMA_IOCTL,
- (BCMA_IOCTL_CLK | BCMA_IOCTL_FGC | flags));
- bgmac_idm_read(bgmac, BCMA_IOCTL);
+ u32 val;
- bgmac_idm_write(bgmac, BCMA_RESET_CTL, 0);
- bgmac_idm_read(bgmac, BCMA_RESET_CTL);
- udelay(1);
+ /* The Reset Control register only contains a single bit to show if the
+ * controller is currently in reset. Do a sanity check here, just in
+ * case the bootloader happened to leave the device in reset.
+ */
+ val = bgmac_idm_read(bgmac, BCMA_RESET_CTL);
+ if (val) {
+ bgmac_idm_write(bgmac, BCMA_RESET_CTL, 0);
+ bgmac_idm_read(bgmac, BCMA_RESET_CTL);
+ udelay(1);
+ }
- bgmac_idm_write(bgmac, BCMA_IOCTL, (BCMA_IOCTL_CLK | flags));
+ val = bgmac_idm_read(bgmac, BCMA_IOCTL);
+ /* Some bits of BCMA_IOCTL set by HW/ATF and should not change */
+ val |= flags & ~(BGMAC_AWCACHE | BGMAC_ARCACHE | BGMAC_AWUSER |
+ BGMAC_ARUSER);
+ val |= BGMAC_CLK_EN;
+ bgmac_idm_write(bgmac, BCMA_IOCTL, val);
bgmac_idm_read(bgmac, BCMA_IOCTL);
udelay(1);
}
@@ -151,7 +160,7 @@ static int bgmac_probe(struct platform_device *pdev)
struct resource *regs;
const u8 *mac_addr;
- bgmac = devm_kzalloc(&pdev->dev, sizeof(*bgmac), GFP_KERNEL);
+ bgmac = bgmac_alloc(&pdev->dev);
if (!bgmac)
return -ENOMEM;
@@ -169,7 +178,7 @@ static int bgmac_probe(struct platform_device *pdev)
mac_addr = of_get_mac_address(np);
if (mac_addr)
- ether_addr_copy(bgmac->mac_addr, mac_addr);
+ ether_addr_copy(bgmac->net_dev->dev_addr, mac_addr);
else
dev_warn(&pdev->dev, "MAC address not present in device tree\n");
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 0e066dc6b8cc..fd66fca00e01 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -12,6 +12,8 @@
#include <linux/bcma/bcma.h>
#include <linux/etherdevice.h>
#include <linux/bcm47xx_nvram.h>
+#include <linux/phy.h>
+#include <linux/phy_fixed.h>
#include "bgmac.h"
static bool bgmac_wait_value(struct bgmac *bgmac, u16 reg, u32 mask,
@@ -1148,7 +1150,7 @@ static int bgmac_poll(struct napi_struct *napi, int weight)
return weight;
if (handled < weight) {
- napi_complete(napi);
+ napi_complete_done(napi, handled);
bgmac_chip_intrs_on(bgmac);
}
@@ -1221,12 +1223,16 @@ static netdev_tx_t bgmac_start_xmit(struct sk_buff *skb,
static int bgmac_set_mac_address(struct net_device *net_dev, void *addr)
{
struct bgmac *bgmac = netdev_priv(net_dev);
+ struct sockaddr *sa = addr;
int ret;
ret = eth_prepare_mac_addr_change(net_dev, addr);
if (ret < 0)
return ret;
- bgmac_write_mac_address(bgmac, (u8 *)addr);
+
+ ether_addr_copy(net_dev->dev_addr, sa->sa_data);
+ bgmac_write_mac_address(bgmac, net_dev->dev_addr);
+
eth_commit_mac_addr_change(net_dev, addr);
return 0;
}
@@ -1446,33 +1452,42 @@ int bgmac_phy_connect_direct(struct bgmac *bgmac)
}
EXPORT_SYMBOL_GPL(bgmac_phy_connect_direct);
-int bgmac_enet_probe(struct bgmac *info)
+struct bgmac *bgmac_alloc(struct device *dev)
{
struct net_device *net_dev;
struct bgmac *bgmac;
- int err;
/* Allocation and references */
- net_dev = alloc_etherdev(sizeof(*bgmac));
+ net_dev = devm_alloc_etherdev(dev, sizeof(*bgmac));
if (!net_dev)
- return -ENOMEM;
+ return NULL;
net_dev->netdev_ops = &bgmac_netdev_ops;
net_dev->ethtool_ops = &bgmac_ethtool_ops;
+
bgmac = netdev_priv(net_dev);
- memcpy(bgmac, info, sizeof(*bgmac));
+ bgmac->dev = dev;
bgmac->net_dev = net_dev;
+
+ return bgmac;
+}
+EXPORT_SYMBOL_GPL(bgmac_alloc);
+
+int bgmac_enet_probe(struct bgmac *bgmac)
+{
+ struct net_device *net_dev = bgmac->net_dev;
+ int err;
+
net_dev->irq = bgmac->irq;
SET_NETDEV_DEV(net_dev, bgmac->dev);
- if (!is_valid_ether_addr(bgmac->mac_addr)) {
+ if (!is_valid_ether_addr(net_dev->dev_addr)) {
dev_err(bgmac->dev, "Invalid MAC addr: %pM\n",
- bgmac->mac_addr);
- eth_random_addr(bgmac->mac_addr);
+ net_dev->dev_addr);
+ eth_hw_addr_random(net_dev);
dev_warn(bgmac->dev, "Using random MAC: %pM\n",
- bgmac->mac_addr);
+ net_dev->dev_addr);
}
- ether_addr_copy(net_dev->dev_addr, bgmac->mac_addr);
/* This (reset &) enable is not preset in specs or reference driver but
* Broadcom does it in arch PCI code when enabling fake PCI device.
@@ -1488,7 +1503,7 @@ int bgmac_enet_probe(struct bgmac *info)
err = bgmac_dma_alloc(bgmac);
if (err) {
dev_err(bgmac->dev, "Unable to alloc memory for DMA\n");
- goto err_netdev_free;
+ goto err_out;
}
bgmac->int_mask = BGMAC_IS_ERRMASK | BGMAC_IS_RX | BGMAC_IS_TX_MASK;
@@ -1521,8 +1536,7 @@ err_phy_disconnect:
phy_disconnect(net_dev->phydev);
err_dma_free:
bgmac_dma_free(bgmac);
-err_netdev_free:
- free_netdev(net_dev);
+err_out:
return err;
}
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index 71f493f2451f..6d1c6ff1ed96 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -213,6 +213,22 @@
/* BCMA GMAC core specific IO Control (BCMA_IOCTL) flags */
#define BGMAC_BCMA_IOCTL_SW_CLKEN 0x00000004 /* PHY Clock Enable */
#define BGMAC_BCMA_IOCTL_SW_RESET 0x00000008 /* PHY Reset */
+/* The IOCTL values appear to be different in NS, NSP, and NS2, and do not match
+ * the values directly above
+ */
+#define BGMAC_CLK_EN BIT(0)
+#define BGMAC_RESERVED_0 BIT(1)
+#define BGMAC_SOURCE_SYNC_MODE_EN BIT(2)
+#define BGMAC_DEST_SYNC_MODE_EN BIT(3)
+#define BGMAC_TX_CLK_OUT_INVERT_EN BIT(4)
+#define BGMAC_DIRECT_GMII_MODE BIT(5)
+#define BGMAC_CLK_250_SEL BIT(6)
+#define BGMAC_AWCACHE (0xf << 7)
+#define BGMAC_RESERVED_1 (0x1f << 11)
+#define BGMAC_ARCACHE (0xf << 16)
+#define BGMAC_AWUSER (0x3f << 20)
+#define BGMAC_ARUSER (0x3f << 26)
+#define BGMAC_RESERVED BIT(31)
/* BCMA GMAC core specific IO status (BCMA_IOST) flags */
#define BGMAC_BCMA_IOST_ATTACHED 0x00000800
@@ -474,7 +490,6 @@ struct bgmac {
struct device *dev;
struct device *dma_dev;
- unsigned char mac_addr[ETH_ALEN];
u32 feature_flags;
struct net_device *net_dev;
@@ -517,12 +532,13 @@ struct bgmac {
int (*phy_connect)(struct bgmac *bgmac);
};
-int bgmac_enet_probe(struct bgmac *info);
+struct bgmac *bgmac_alloc(struct device *dev);
+int bgmac_enet_probe(struct bgmac *bgmac);
void bgmac_enet_remove(struct bgmac *bgmac);
void bgmac_adjust_link(struct net_device *net_dev);
int bgmac_phy_connect_direct(struct bgmac *bgmac);
-struct mii_bus *bcma_mdio_mii_register(struct bcma_device *core, u8 phyaddr);
+struct mii_bus *bcma_mdio_mii_register(struct bgmac *bgmac);
void bcma_mdio_mii_unregister(struct mii_bus *mii_bus);
static inline u32 bgmac_read(struct bgmac *bgmac, u16 offset)
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index d5d1026be4b7..e3af1f3cb61f 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -3515,7 +3515,7 @@ static int bnx2_poll_msix(struct napi_struct *napi, int budget)
rmb();
if (likely(!bnx2_has_fast_work(bnapi))) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
bnapi->last_status_idx);
@@ -3552,7 +3552,7 @@ static int bnx2_poll(struct napi_struct *napi, int budget)
rmb();
if (likely(!bnx2_has_work(bnapi))) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
@@ -6821,13 +6821,13 @@ bnx2_save_stats(struct bnx2 *bp)
(unsigned long) (bp->stats_blk->ctr + \
bp->temp_stats_blk->ctr)
-static struct rtnl_link_stats64 *
+static void
bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
{
struct bnx2 *bp = netdev_priv(dev);
if (bp->stats_blk == NULL)
- return net_stats;
+ return;
net_stats->rx_packets =
GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
@@ -6891,7 +6891,6 @@ bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
GET_32BIT_NET_STATS(stat_FwRxDrop);
- return net_stats;
}
/* All ethtool functions called with rtnl_lock */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 3e199d3e461e..9e8c06130c09 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -549,14 +549,7 @@ static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
struct bnx2x_alloc_pool *pool = &fp->page_pool;
dma_addr_t mapping;
- if (!pool->page || (PAGE_SIZE - pool->offset) < SGE_PAGE_SIZE) {
-
- /* put page reference used by the memory pool, since we
- * won't be using this page as the mempool anymore.
- */
- if (pool->page)
- put_page(pool->page);
-
+ if (!pool->page) {
pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
if (unlikely(!pool->page))
return -ENOMEM;
@@ -571,7 +564,6 @@ static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
return -ENOMEM;
}
- get_page(pool->page);
sw_buf->page = pool->page;
sw_buf->offset = pool->offset;
@@ -581,7 +573,10 @@ static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
sge->addr_lo = cpu_to_le32(U64_LO(mapping));
pool->offset += SGE_PAGE_SIZE;
-
+ if (PAGE_SIZE - pool->offset >= SGE_PAGE_SIZE)
+ get_page(pool->page);
+ else
+ pool->page = NULL;
return 0;
}
@@ -3229,7 +3224,7 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
* has been updated when NAPI was scheduled.
*/
if (IS_FCOE_FP(fp)) {
- napi_complete(napi);
+ napi_complete_done(napi, rx_work_done);
} else {
bnx2x_update_fpsb_idx(fp);
/* bnx2x_has_rx_work() reads the status block,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 5f19427c7b27..43423744fdfa 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -216,165 +216,184 @@ static int bnx2x_get_port_type(struct bnx2x *bp)
return port_type;
}
-static int bnx2x_get_vf_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
+static int bnx2x_get_vf_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct bnx2x *bp = netdev_priv(dev);
+ u32 supported, advertising;
+
+ ethtool_convert_link_mode_to_legacy_u32(&supported,
+ cmd->link_modes.supported);
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
if (bp->state == BNX2X_STATE_OPEN) {
if (test_bit(BNX2X_LINK_REPORT_FD,
&bp->vf_link_vars.link_report_flags))
- cmd->duplex = DUPLEX_FULL;
+ cmd->base.duplex = DUPLEX_FULL;
else
- cmd->duplex = DUPLEX_HALF;
+ cmd->base.duplex = DUPLEX_HALF;
- ethtool_cmd_speed_set(cmd, bp->vf_link_vars.line_speed);
+ cmd->base.speed = bp->vf_link_vars.line_speed;
} else {
- cmd->duplex = DUPLEX_UNKNOWN;
- ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
+ cmd->base.duplex = DUPLEX_UNKNOWN;
+ cmd->base.speed = SPEED_UNKNOWN;
}
- cmd->port = PORT_OTHER;
- cmd->phy_address = 0;
- cmd->transceiver = XCVR_INTERNAL;
- cmd->autoneg = AUTONEG_DISABLE;
- cmd->maxtxpkt = 0;
- cmd->maxrxpkt = 0;
+ cmd->base.port = PORT_OTHER;
+ cmd->base.phy_address = 0;
+ cmd->base.autoneg = AUTONEG_DISABLE;
DP(BNX2X_MSG_ETHTOOL, "ethtool_cmd: cmd %d\n"
" supported 0x%x advertising 0x%x speed %u\n"
- " duplex %d port %d phy_address %d transceiver %d\n"
- " autoneg %d maxtxpkt %d maxrxpkt %d\n",
- cmd->cmd, cmd->supported, cmd->advertising,
- ethtool_cmd_speed(cmd),
- cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
- cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
+ " duplex %d port %d phy_address %d\n"
+ " autoneg %d\n",
+ cmd->base.cmd, supported, advertising,
+ cmd->base.speed,
+ cmd->base.duplex, cmd->base.port, cmd->base.phy_address,
+ cmd->base.autoneg);
return 0;
}
-static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int bnx2x_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct bnx2x *bp = netdev_priv(dev);
int cfg_idx = bnx2x_get_link_cfg_idx(bp);
u32 media_type;
+ u32 supported, advertising, lp_advertising;
+
+ ethtool_convert_link_mode_to_legacy_u32(&lp_advertising,
+ cmd->link_modes.lp_advertising);
/* Dual Media boards present all available port types */
- cmd->supported = bp->port.supported[cfg_idx] |
+ supported = bp->port.supported[cfg_idx] |
(bp->port.supported[cfg_idx ^ 1] &
(SUPPORTED_TP | SUPPORTED_FIBRE));
- cmd->advertising = bp->port.advertising[cfg_idx];
+ advertising = bp->port.advertising[cfg_idx];
media_type = bp->link_params.phy[bnx2x_get_cur_phy_idx(bp)].media_type;
if (media_type == ETH_PHY_SFP_1G_FIBER) {
- cmd->supported &= ~(SUPPORTED_10000baseT_Full);
- cmd->advertising &= ~(ADVERTISED_10000baseT_Full);
+ supported &= ~(SUPPORTED_10000baseT_Full);
+ advertising &= ~(ADVERTISED_10000baseT_Full);
}
if ((bp->state == BNX2X_STATE_OPEN) && bp->link_vars.link_up &&
!(bp->flags & MF_FUNC_DIS)) {
- cmd->duplex = bp->link_vars.duplex;
+ cmd->base.duplex = bp->link_vars.duplex;
if (IS_MF(bp) && !BP_NOMCP(bp))
- ethtool_cmd_speed_set(cmd, bnx2x_get_mf_speed(bp));
+ cmd->base.speed = bnx2x_get_mf_speed(bp);
else
- ethtool_cmd_speed_set(cmd, bp->link_vars.line_speed);
+ cmd->base.speed = bp->link_vars.line_speed;
} else {
- cmd->duplex = DUPLEX_UNKNOWN;
- ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
+ cmd->base.duplex = DUPLEX_UNKNOWN;
+ cmd->base.speed = SPEED_UNKNOWN;
}
- cmd->port = bnx2x_get_port_type(bp);
+ cmd->base.port = bnx2x_get_port_type(bp);
- cmd->phy_address = bp->mdio.prtad;
- cmd->transceiver = XCVR_INTERNAL;
+ cmd->base.phy_address = bp->mdio.prtad;
if (bp->link_params.req_line_speed[cfg_idx] == SPEED_AUTO_NEG)
- cmd->autoneg = AUTONEG_ENABLE;
+ cmd->base.autoneg = AUTONEG_ENABLE;
else
- cmd->autoneg = AUTONEG_DISABLE;
+ cmd->base.autoneg = AUTONEG_DISABLE;
/* Publish LP advertised speeds and FC */
if (bp->link_vars.link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) {
u32 status = bp->link_vars.link_status;
- cmd->lp_advertising |= ADVERTISED_Autoneg;
+ lp_advertising |= ADVERTISED_Autoneg;
if (status & LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE)
- cmd->lp_advertising |= ADVERTISED_Pause;
+ lp_advertising |= ADVERTISED_Pause;
if (status & LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE)
- cmd->lp_advertising |= ADVERTISED_Asym_Pause;
+ lp_advertising |= ADVERTISED_Asym_Pause;
if (status & LINK_STATUS_LINK_PARTNER_10THD_CAPABLE)
- cmd->lp_advertising |= ADVERTISED_10baseT_Half;
+ lp_advertising |= ADVERTISED_10baseT_Half;
if (status & LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE)
- cmd->lp_advertising |= ADVERTISED_10baseT_Full;
+ lp_advertising |= ADVERTISED_10baseT_Full;
if (status & LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE)
- cmd->lp_advertising |= ADVERTISED_100baseT_Half;
+ lp_advertising |= ADVERTISED_100baseT_Half;
if (status & LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE)
- cmd->lp_advertising |= ADVERTISED_100baseT_Full;
+ lp_advertising |= ADVERTISED_100baseT_Full;
if (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE)
- cmd->lp_advertising |= ADVERTISED_1000baseT_Half;
+ lp_advertising |= ADVERTISED_1000baseT_Half;
if (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) {
if (media_type == ETH_PHY_KR) {
- cmd->lp_advertising |=
+ lp_advertising |=
ADVERTISED_1000baseKX_Full;
} else {
- cmd->lp_advertising |=
+ lp_advertising |=
ADVERTISED_1000baseT_Full;
}
}
if (status & LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE)
- cmd->lp_advertising |= ADVERTISED_2500baseX_Full;
+ lp_advertising |= ADVERTISED_2500baseX_Full;
if (status & LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE) {
if (media_type == ETH_PHY_KR) {
- cmd->lp_advertising |=
+ lp_advertising |=
ADVERTISED_10000baseKR_Full;
} else {
- cmd->lp_advertising |=
+ lp_advertising |=
ADVERTISED_10000baseT_Full;
}
}
if (status & LINK_STATUS_LINK_PARTNER_20GXFD_CAPABLE)
- cmd->lp_advertising |= ADVERTISED_20000baseKR2_Full;
+ lp_advertising |= ADVERTISED_20000baseKR2_Full;
}
- cmd->maxtxpkt = 0;
- cmd->maxrxpkt = 0;
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
+ lp_advertising);
DP(BNX2X_MSG_ETHTOOL, "ethtool_cmd: cmd %d\n"
" supported 0x%x advertising 0x%x speed %u\n"
- " duplex %d port %d phy_address %d transceiver %d\n"
- " autoneg %d maxtxpkt %d maxrxpkt %d\n",
- cmd->cmd, cmd->supported, cmd->advertising,
- ethtool_cmd_speed(cmd),
- cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
- cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
+ " duplex %d port %d phy_address %d\n"
+ " autoneg %d\n",
+ cmd->base.cmd, supported, advertising,
+ cmd->base.speed,
+ cmd->base.duplex, cmd->base.port, cmd->base.phy_address,
+ cmd->base.autoneg);
return 0;
}
-static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int bnx2x_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct bnx2x *bp = netdev_priv(dev);
u32 advertising, cfg_idx, old_multi_phy_config, new_multi_phy_config;
u32 speed, phy_idx;
+ u32 supported;
+ u8 duplex = cmd->base.duplex;
+
+ ethtool_convert_link_mode_to_legacy_u32(&supported,
+ cmd->link_modes.supported);
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
if (IS_MF_SD(bp))
return 0;
DP(BNX2X_MSG_ETHTOOL, "ethtool_cmd: cmd %d\n"
" supported 0x%x advertising 0x%x speed %u\n"
- " duplex %d port %d phy_address %d transceiver %d\n"
- " autoneg %d maxtxpkt %d maxrxpkt %d\n",
- cmd->cmd, cmd->supported, cmd->advertising,
- ethtool_cmd_speed(cmd),
- cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
- cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
+ " duplex %d port %d phy_address %d\n"
+ " autoneg %d\n",
+ cmd->base.cmd, supported, advertising,
+ cmd->base.speed,
+ cmd->base.duplex, cmd->base.port, cmd->base.phy_address,
+ cmd->base.autoneg);
- speed = ethtool_cmd_speed(cmd);
+ speed = cmd->base.speed;
/* If received a request for an unknown duplex, assume full*/
- if (cmd->duplex == DUPLEX_UNKNOWN)
- cmd->duplex = DUPLEX_FULL;
+ if (duplex == DUPLEX_UNKNOWN)
+ duplex = DUPLEX_FULL;
if (IS_MF_SI(bp)) {
u32 part;
@@ -410,8 +429,8 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
cfg_idx = bnx2x_get_link_cfg_idx(bp);
old_multi_phy_config = bp->link_params.multi_phy_config;
- if (cmd->port != bnx2x_get_port_type(bp)) {
- switch (cmd->port) {
+ if (cmd->base.port != bnx2x_get_port_type(bp)) {
+ switch (cmd->base.port) {
case PORT_TP:
if (!(bp->port.supported[0] & SUPPORTED_TP ||
bp->port.supported[1] & SUPPORTED_TP)) {
@@ -461,7 +480,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
bp->link_params.multi_phy_config = old_multi_phy_config;
DP(BNX2X_MSG_ETHTOOL, "cfg_idx = %x\n", cfg_idx);
- if (cmd->autoneg == AUTONEG_ENABLE) {
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
u32 an_supported_speed = bp->port.supported[cfg_idx];
if (bp->link_params.phy[EXT_PHY1].type ==
PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
@@ -473,51 +492,51 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
}
/* advertise the requested speed and duplex if supported */
- if (cmd->advertising & ~an_supported_speed) {
+ if (advertising & ~an_supported_speed) {
DP(BNX2X_MSG_ETHTOOL,
"Advertisement parameters are not supported\n");
return -EINVAL;
}
bp->link_params.req_line_speed[cfg_idx] = SPEED_AUTO_NEG;
- bp->link_params.req_duplex[cfg_idx] = cmd->duplex;
+ bp->link_params.req_duplex[cfg_idx] = duplex;
bp->port.advertising[cfg_idx] = (ADVERTISED_Autoneg |
- cmd->advertising);
- if (cmd->advertising) {
+ advertising);
+ if (advertising) {
bp->link_params.speed_cap_mask[cfg_idx] = 0;
- if (cmd->advertising & ADVERTISED_10baseT_Half) {
+ if (advertising & ADVERTISED_10baseT_Half) {
bp->link_params.speed_cap_mask[cfg_idx] |=
PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF;
}
- if (cmd->advertising & ADVERTISED_10baseT_Full)
+ if (advertising & ADVERTISED_10baseT_Full)
bp->link_params.speed_cap_mask[cfg_idx] |=
PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL;
- if (cmd->advertising & ADVERTISED_100baseT_Full)
+ if (advertising & ADVERTISED_100baseT_Full)
bp->link_params.speed_cap_mask[cfg_idx] |=
PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL;
- if (cmd->advertising & ADVERTISED_100baseT_Half) {
+ if (advertising & ADVERTISED_100baseT_Half) {
bp->link_params.speed_cap_mask[cfg_idx] |=
PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF;
}
- if (cmd->advertising & ADVERTISED_1000baseT_Half) {
+ if (advertising & ADVERTISED_1000baseT_Half) {
bp->link_params.speed_cap_mask[cfg_idx] |=
PORT_HW_CFG_SPEED_CAPABILITY_D0_1G;
}
- if (cmd->advertising & (ADVERTISED_1000baseT_Full |
+ if (advertising & (ADVERTISED_1000baseT_Full |
ADVERTISED_1000baseKX_Full))
bp->link_params.speed_cap_mask[cfg_idx] |=
PORT_HW_CFG_SPEED_CAPABILITY_D0_1G;
- if (cmd->advertising & (ADVERTISED_10000baseT_Full |
+ if (advertising & (ADVERTISED_10000baseT_Full |
ADVERTISED_10000baseKX4_Full |
ADVERTISED_10000baseKR_Full))
bp->link_params.speed_cap_mask[cfg_idx] |=
PORT_HW_CFG_SPEED_CAPABILITY_D0_10G;
- if (cmd->advertising & ADVERTISED_20000baseKR2_Full)
+ if (advertising & ADVERTISED_20000baseKR2_Full)
bp->link_params.speed_cap_mask[cfg_idx] |=
PORT_HW_CFG_SPEED_CAPABILITY_D0_20G;
}
@@ -525,7 +544,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
/* advertise the requested speed and duplex if supported */
switch (speed) {
case SPEED_10:
- if (cmd->duplex == DUPLEX_FULL) {
+ if (duplex == DUPLEX_FULL) {
if (!(bp->port.supported[cfg_idx] &
SUPPORTED_10baseT_Full)) {
DP(BNX2X_MSG_ETHTOOL,
@@ -549,7 +568,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
break;
case SPEED_100:
- if (cmd->duplex == DUPLEX_FULL) {
+ if (duplex == DUPLEX_FULL) {
if (!(bp->port.supported[cfg_idx] &
SUPPORTED_100baseT_Full)) {
DP(BNX2X_MSG_ETHTOOL,
@@ -573,7 +592,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
break;
case SPEED_1000:
- if (cmd->duplex != DUPLEX_FULL) {
+ if (duplex != DUPLEX_FULL) {
DP(BNX2X_MSG_ETHTOOL,
"1G half not supported\n");
return -EINVAL;
@@ -596,7 +615,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
break;
case SPEED_2500:
- if (cmd->duplex != DUPLEX_FULL) {
+ if (duplex != DUPLEX_FULL) {
DP(BNX2X_MSG_ETHTOOL,
"2.5G half not supported\n");
return -EINVAL;
@@ -614,7 +633,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
break;
case SPEED_10000:
- if (cmd->duplex != DUPLEX_FULL) {
+ if (duplex != DUPLEX_FULL) {
DP(BNX2X_MSG_ETHTOOL,
"10G half not supported\n");
return -EINVAL;
@@ -644,7 +663,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
}
bp->link_params.req_line_speed[cfg_idx] = speed;
- bp->link_params.req_duplex[cfg_idx] = cmd->duplex;
+ bp->link_params.req_duplex[cfg_idx] = duplex;
bp->port.advertising[cfg_idx] = advertising;
}
@@ -3605,8 +3624,6 @@ static int bnx2x_get_ts_info(struct net_device *dev,
}
static const struct ethtool_ops bnx2x_ethtool_ops = {
- .get_settings = bnx2x_get_settings,
- .set_settings = bnx2x_set_settings,
.get_drvinfo = bnx2x_get_drvinfo,
.get_regs_len = bnx2x_get_regs_len,
.get_regs = bnx2x_get_regs,
@@ -3646,10 +3663,11 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
.get_eee = bnx2x_get_eee,
.set_eee = bnx2x_set_eee,
.get_ts_info = bnx2x_get_ts_info,
+ .get_link_ksettings = bnx2x_get_link_ksettings,
+ .set_link_ksettings = bnx2x_set_link_ksettings,
};
static const struct ethtool_ops bnx2x_vf_ethtool_ops = {
- .get_settings = bnx2x_get_vf_settings,
.get_drvinfo = bnx2x_get_drvinfo,
.get_msglevel = bnx2x_get_msglevel,
.set_msglevel = bnx2x_set_msglevel,
@@ -3667,6 +3685,7 @@ static const struct ethtool_ops bnx2x_vf_ethtool_ops = {
.set_rxfh = bnx2x_set_rxfh,
.get_channels = bnx2x_get_channels,
.set_channels = bnx2x_set_channels,
+ .get_link_ksettings = bnx2x_get_vf_link_ksettings,
};
void bnx2x_set_ethtool_ops(struct bnx2x *bp, struct net_device *netdev)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 05356efdbf93..b209b7f6093e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -6957,7 +6957,7 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
* hence its link is expected to be down
* - SECOND_PHY means that first phy should not be able
* to link up by itself (using configuration)
- * - DEFAULT should be overriden during initialiazation
+ * - DEFAULT should be overridden during initialization
*/
DP(NETIF_MSG_LINK, "Invalid link indication"
"mpc=0x%x. DISABLING LINK !!!\n",
diff --git a/drivers/net/ethernet/broadcom/bnxt/Makefile b/drivers/net/ethernet/broadcom/bnxt/Makefile
index 6082ed1b5ea0..a7ca45b251cb 100644
--- a/drivers/net/ethernet/broadcom/bnxt/Makefile
+++ b/drivers/net/ethernet/broadcom/bnxt/Makefile
@@ -1,3 +1,3 @@
obj-$(CONFIG_BNXT) += bnxt_en.o
-bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o
+bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 4fcc6a84a087..235733e91c79 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1,6 +1,7 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2014-2016 Broadcom Corporation
+ * Copyright (c) 2016-2017 Broadcom Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -33,15 +34,13 @@
#include <linux/if.h>
#include <linux/if_vlan.h>
#include <linux/rtc.h>
+#include <linux/bpf.h>
#include <net/ip.h>
#include <net/tcp.h>
#include <net/udp.h>
#include <net/checksum.h>
#include <net/ip6_checksum.h>
#include <net/udp_tunnel.h>
-#ifdef CONFIG_NET_RX_BUSY_POLL
-#include <net/busy_poll.h>
-#endif
#include <linux/workqueue.h>
#include <linux/prefetch.h>
#include <linux/cache.h>
@@ -56,6 +55,7 @@
#include "bnxt_sriov.h"
#include "bnxt_ethtool.h"
#include "bnxt_dcb.h"
+#include "bnxt_xdp.h"
#define BNXT_TX_TIMEOUT (5 * HZ)
@@ -99,6 +99,8 @@ enum board_idx {
BCM57407_NPAR,
BCM57414_NPAR,
BCM57416_NPAR,
+ BCM57452,
+ BCM57454,
NETXTREME_E_VF,
NETXTREME_C_VF,
};
@@ -133,6 +135,8 @@ static const struct {
{ "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
{ "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
{ "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
+ { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
+ { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
{ "Broadcom NetXtreme-E Ethernet Virtual Function" },
{ "Broadcom NetXtreme-C Ethernet Virtual Function" },
};
@@ -168,6 +172,8 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
{ PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
+ { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
#ifdef CONFIG_BNXT_SRIOV
{ PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
{ PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
@@ -213,16 +219,7 @@ static bool bnxt_vf_pciid(enum board_idx idx)
#define BNXT_CP_DB_IRQ_DIS(db) \
writel(DB_CP_IRQ_DIS_FLAGS, db)
-static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
-{
- /* Tell compiler to fetch tx indices from memory. */
- barrier();
-
- return bp->tx_ring_size -
- ((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask);
-}
-
-static const u16 bnxt_lhint_arr[] = {
+const u16 bnxt_lhint_arr[] = {
TX_BD_FLAGS_LHINT_512_AND_SMALLER,
TX_BD_FLAGS_LHINT_512_TO_1023,
TX_BD_FLAGS_LHINT_1024_TO_2047,
@@ -265,8 +262,8 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
- txr = &bp->tx_ring[i];
txq = netdev_get_tx_queue(dev, i);
+ txr = &bp->tx_ring[bp->tx_ring_map[i]];
prod = txr->tx_prod;
free_size = bnxt_tx_avail(bp, txr);
@@ -512,8 +509,7 @@ tx_dma_error:
static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
{
struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
- int index = txr - &bp->tx_ring[0];
- struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, index);
+ struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
u16 cons = txr->tx_cons;
struct pci_dev *pdev = bp->pdev;
int i;
@@ -576,6 +572,25 @@ next_tx_int:
}
}
+static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
+ gfp_t gfp)
+{
+ struct device *dev = &bp->pdev->dev;
+ struct page *page;
+
+ page = alloc_page(gfp);
+ if (!page)
+ return NULL;
+
+ *mapping = dma_map_page(dev, page, 0, PAGE_SIZE, bp->rx_dir);
+ if (dma_mapping_error(dev, *mapping)) {
+ __free_page(page);
+ return NULL;
+ }
+ *mapping += bp->rx_dma_offset;
+ return page;
+}
+
static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
gfp_t gfp)
{
@@ -586,8 +601,8 @@ static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
if (!data)
return NULL;
- *mapping = dma_map_single(&pdev->dev, data + BNXT_RX_DMA_OFFSET,
- bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
+ *mapping = dma_map_single(&pdev->dev, data + bp->rx_dma_offset,
+ bp->rx_buf_use_size, bp->rx_dir);
if (dma_mapping_error(&pdev->dev, *mapping)) {
kfree(data);
@@ -596,29 +611,37 @@ static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
return data;
}
-static inline int bnxt_alloc_rx_data(struct bnxt *bp,
- struct bnxt_rx_ring_info *rxr,
- u16 prod, gfp_t gfp)
+int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
+ u16 prod, gfp_t gfp)
{
struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
- u8 *data;
dma_addr_t mapping;
- data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
- if (!data)
- return -ENOMEM;
+ if (BNXT_RX_PAGE_MODE(bp)) {
+ struct page *page = __bnxt_alloc_rx_page(bp, &mapping, gfp);
- rx_buf->data = data;
- dma_unmap_addr_set(rx_buf, mapping, mapping);
+ if (!page)
+ return -ENOMEM;
- rxbd->rx_bd_haddr = cpu_to_le64(mapping);
+ rx_buf->data = page;
+ rx_buf->data_ptr = page_address(page) + bp->rx_offset;
+ } else {
+ u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
+ if (!data)
+ return -ENOMEM;
+
+ rx_buf->data = data;
+ rx_buf->data_ptr = data + bp->rx_offset;
+ }
+ rx_buf->mapping = mapping;
+
+ rxbd->rx_bd_haddr = cpu_to_le64(mapping);
return 0;
}
-static void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons,
- u8 *data)
+void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
{
u16 prod = rxr->rx_prod;
struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
@@ -628,9 +651,9 @@ static void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons,
cons_rx_buf = &rxr->rx_buf_ring[cons];
prod_rx_buf->data = data;
+ prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
- dma_unmap_addr_set(prod_rx_buf, mapping,
- dma_unmap_addr(cons_rx_buf, mapping));
+ prod_rx_buf->mapping = cons_rx_buf->mapping;
prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
@@ -756,13 +779,60 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons,
rxr->rx_sw_agg_prod = sw_prod;
}
+static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr,
+ u16 cons, void *data, u8 *data_ptr,
+ dma_addr_t dma_addr,
+ unsigned int offset_and_len)
+{
+ unsigned int payload = offset_and_len >> 16;
+ unsigned int len = offset_and_len & 0xffff;
+ struct skb_frag_struct *frag;
+ struct page *page = data;
+ u16 prod = rxr->rx_prod;
+ struct sk_buff *skb;
+ int off, err;
+
+ err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
+ if (unlikely(err)) {
+ bnxt_reuse_rx_data(rxr, cons, data);
+ return NULL;
+ }
+ dma_addr -= bp->rx_dma_offset;
+ dma_unmap_page(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir);
+
+ if (unlikely(!payload))
+ payload = eth_get_headlen(data_ptr, len);
+
+ skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
+ if (!skb) {
+ __free_page(page);
+ return NULL;
+ }
+
+ off = (void *)data_ptr - page_address(page);
+ skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
+ memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
+ payload + NET_IP_ALIGN);
+
+ frag = &skb_shinfo(skb)->frags[0];
+ skb_frag_size_sub(frag, payload);
+ frag->page_offset += payload;
+ skb->data_len -= payload;
+ skb->tail += payload;
+
+ return skb;
+}
+
static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
struct bnxt_rx_ring_info *rxr, u16 cons,
- u16 prod, u8 *data, dma_addr_t dma_addr,
- unsigned int len)
+ void *data, u8 *data_ptr,
+ dma_addr_t dma_addr,
+ unsigned int offset_and_len)
{
- int err;
+ u16 prod = rxr->rx_prod;
struct sk_buff *skb;
+ int err;
err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
if (unlikely(err)) {
@@ -772,14 +842,14 @@ static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
skb = build_skb(data, 0);
dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
- PCI_DMA_FROMDEVICE);
+ bp->rx_dir);
if (!skb) {
kfree(data);
return NULL;
}
- skb_reserve(skb, BNXT_RX_OFFSET);
- skb_put(skb, len);
+ skb_reserve(skb, bp->rx_offset);
+ skb_put(skb, offset_and_len & 0xffff);
return skb;
}
@@ -815,7 +885,7 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi,
* a sw_prod index that equals the cons index, so we
* need to clear the cons entry now.
*/
- mapping = dma_unmap_addr(cons_rx_buf, mapping);
+ mapping = cons_rx_buf->mapping;
page = cons_rx_buf->page;
cons_rx_buf->page = NULL;
@@ -878,14 +948,14 @@ static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
if (!skb)
return NULL;
- dma_sync_single_for_cpu(&pdev->dev, mapping,
- bp->rx_copy_thresh, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
+ bp->rx_dir);
- memcpy(skb->data - BNXT_RX_OFFSET, data, len + BNXT_RX_OFFSET);
+ memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
+ len + NET_IP_ALIGN);
- dma_sync_single_for_device(&pdev->dev, mapping,
- bp->rx_copy_thresh,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
+ bp->rx_dir);
skb_put(skb, len);
return skb;
@@ -954,17 +1024,19 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
}
prod_rx_buf->data = tpa_info->data;
+ prod_rx_buf->data_ptr = tpa_info->data_ptr;
mapping = tpa_info->mapping;
- dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
+ prod_rx_buf->mapping = mapping;
prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
tpa_info->data = cons_rx_buf->data;
+ tpa_info->data_ptr = cons_rx_buf->data_ptr;
cons_rx_buf->data = NULL;
- tpa_info->mapping = dma_unmap_addr(cons_rx_buf, mapping);
+ tpa_info->mapping = cons_rx_buf->mapping;
tpa_info->len =
le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
@@ -1130,7 +1202,6 @@ static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
dev_kfree_skb_any(skb);
return NULL;
}
- tcp_gro_complete(skb);
if (nw_off) { /* tunnel */
struct udphdr *uh = NULL;
@@ -1180,6 +1251,8 @@ static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
RX_TPA_END_CMP_PAYLOAD_OFFSET) >>
RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT;
skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
+ if (likely(skb))
+ tcp_gro_complete(skb);
#endif
return skb;
}
@@ -1189,17 +1262,18 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
u32 *raw_cons,
struct rx_tpa_end_cmp *tpa_end,
struct rx_tpa_end_cmp_ext *tpa_end1,
- bool *agg_event)
+ u8 *event)
{
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
u8 agg_id = TPA_END_AGG_ID(tpa_end);
- u8 *data, agg_bufs;
+ u8 *data_ptr, agg_bufs;
u16 cp_cons = RING_CMP(*raw_cons);
unsigned int len;
struct bnxt_tpa_info *tpa_info;
dma_addr_t mapping;
struct sk_buff *skb;
+ void *data;
if (unlikely(bnapi->in_reset)) {
int rc = bnxt_discard_rx(bp, bnapi, raw_cons, tpa_end);
@@ -1211,7 +1285,8 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
tpa_info = &rxr->rx_tpa[agg_id];
data = tpa_info->data;
- prefetch(data);
+ data_ptr = tpa_info->data_ptr;
+ prefetch(data_ptr);
len = tpa_info->len;
mapping = tpa_info->mapping;
@@ -1222,7 +1297,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
return ERR_PTR(-EBUSY);
- *agg_event = true;
+ *event |= BNXT_AGG_EVENT;
cp_cons = NEXT_CMP(cp_cons);
}
@@ -1234,7 +1309,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
}
if (len <= bp->rx_copy_thresh) {
- skb = bnxt_copy_skb(bnapi, data, len, mapping);
+ skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
if (!skb) {
bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
return NULL;
@@ -1250,18 +1325,19 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
}
tpa_info->data = new_data;
+ tpa_info->data_ptr = new_data + bp->rx_offset;
tpa_info->mapping = new_mapping;
skb = build_skb(data, 0);
dma_unmap_single(&bp->pdev->dev, mapping, bp->rx_buf_use_size,
- PCI_DMA_FROMDEVICE);
+ bp->rx_dir);
if (!skb) {
kfree(data);
bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
return NULL;
}
- skb_reserve(skb, BNXT_RX_OFFSET);
+ skb_reserve(skb, bp->rx_offset);
skb_put(skb, len);
}
@@ -1307,7 +1383,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
* -EIO - packet aborted due to hw error indicated in BD
*/
static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
- bool *agg_event)
+ u8 *event)
{
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
@@ -1318,10 +1394,12 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
struct bnxt_sw_rx_bd *rx_buf;
unsigned int len;
- u8 *data, agg_bufs, cmp_type;
+ u8 *data_ptr, agg_bufs, cmp_type;
dma_addr_t dma_addr;
struct sk_buff *skb;
+ void *data;
int rc = 0;
+ u32 misc;
rxcmp = (struct rx_cmp *)
&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
@@ -1342,13 +1420,13 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
(struct rx_tpa_start_cmp_ext *)rxcmp1);
+ *event |= BNXT_RX_EVENT;
goto next_rx_no_prod;
} else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
skb = bnxt_tpa_end(bp, bnapi, &tmp_raw_cons,
(struct rx_tpa_end_cmp *)rxcmp,
- (struct rx_tpa_end_cmp_ext *)rxcmp1,
- agg_event);
+ (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
if (unlikely(IS_ERR(skb)))
return -EBUSY;
@@ -1356,37 +1434,36 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
rc = -ENOMEM;
if (likely(skb)) {
skb_record_rx_queue(skb, bnapi->index);
- skb_mark_napi_id(skb, &bnapi->napi);
- if (bnxt_busy_polling(bnapi))
- netif_receive_skb(skb);
- else
- napi_gro_receive(&bnapi->napi, skb);
+ napi_gro_receive(&bnapi->napi, skb);
rc = 1;
}
+ *event |= BNXT_RX_EVENT;
goto next_rx_no_prod;
}
cons = rxcmp->rx_cmp_opaque;
rx_buf = &rxr->rx_buf_ring[cons];
data = rx_buf->data;
+ data_ptr = rx_buf->data_ptr;
if (unlikely(cons != rxr->rx_next_cons)) {
int rc1 = bnxt_discard_rx(bp, bnapi, raw_cons, rxcmp);
bnxt_sched_reset(bp, rxr);
return rc1;
}
- prefetch(data);
+ prefetch(data_ptr);
- agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & RX_CMP_AGG_BUFS) >>
- RX_CMP_AGG_BUFS_SHIFT;
+ misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
+ agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
if (agg_bufs) {
if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
return -EBUSY;
cp_cons = NEXT_CMP(cp_cons);
- *agg_event = true;
+ *event |= BNXT_AGG_EVENT;
}
+ *event |= BNXT_RX_EVENT;
rx_buf->data = NULL;
if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
@@ -1399,17 +1476,29 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
}
len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
- dma_addr = dma_unmap_addr(rx_buf, mapping);
+ dma_addr = rx_buf->mapping;
+
+ if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) {
+ rc = 1;
+ goto next_rx;
+ }
if (len <= bp->rx_copy_thresh) {
- skb = bnxt_copy_skb(bnapi, data, len, dma_addr);
+ skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
bnxt_reuse_rx_data(rxr, cons, data);
if (!skb) {
rc = -ENOMEM;
goto next_rx;
}
} else {
- skb = bnxt_rx_skb(bp, rxr, cons, prod, data, dma_addr, len);
+ u32 payload;
+
+ if (rx_buf->data_ptr == data_ptr)
+ payload = misc & RX_CMP_PAYLOAD_OFFSET;
+ else
+ payload = 0;
+ skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
+ payload | len);
if (!skb) {
rc = -ENOMEM;
goto next_rx;
@@ -1460,11 +1549,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
}
skb_record_rx_queue(skb, bnapi->index);
- skb_mark_napi_id(skb, &bnapi->napi);
- if (bnxt_busy_polling(bnapi))
- netif_receive_skb(skb);
- else
- napi_gro_receive(&bnapi->napi, skb);
+ napi_gro_receive(&bnapi->napi, skb);
rc = 1;
next_rx:
@@ -1637,8 +1722,7 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
u32 cons;
int tx_pkts = 0;
int rx_pkts = 0;
- bool rx_event = false;
- bool agg_event = false;
+ u8 event = 0;
struct tx_cmp *txcmp;
while (1) {
@@ -1660,12 +1744,11 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
if (unlikely(tx_pkts > bp->tx_wake_thresh))
rx_pkts = budget;
} else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
- rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &agg_event);
+ rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event);
if (likely(rc >= 0))
rx_pkts += rc;
else if (rc == -EBUSY) /* partial completion */
break;
- rx_event = true;
} else if (unlikely((TX_CMP_TYPE(txcmp) ==
CMPL_BASE_TYPE_HWRM_DONE) ||
(TX_CMP_TYPE(txcmp) ==
@@ -1680,6 +1763,18 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
break;
}
+ if (event & BNXT_TX_EVENT) {
+ struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
+ void __iomem *db = txr->tx_doorbell;
+ u16 prod = txr->tx_prod;
+
+ /* Sync BD data before updating doorbell */
+ wmb();
+
+ writel(DB_KEY_TX | prod, db);
+ writel(DB_KEY_TX | prod, db);
+ }
+
cpr->cp_raw_cons = raw_cons;
/* ACK completion ring before freeing tx ring and producing new
* buffers in rx/agg rings to prevent overflowing the completion
@@ -1688,14 +1783,14 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
if (tx_pkts)
- bnxt_tx_int(bp, bnapi, tx_pkts);
+ bnapi->tx_int(bp, bnapi, tx_pkts);
- if (rx_event) {
+ if (event & BNXT_RX_EVENT) {
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
- if (agg_event) {
+ if (event & BNXT_AGG_EVENT) {
writel(DB_KEY_RX | rxr->rx_agg_prod,
rxr->rx_agg_doorbell);
writel(DB_KEY_RX | rxr->rx_agg_prod,
@@ -1716,7 +1811,7 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
u32 cp_cons, tmp_raw_cons;
u32 raw_cons = cpr->cp_raw_cons;
u32 rx_pkts = 0;
- bool agg_event = false;
+ u8 event = 0;
while (1) {
int rc;
@@ -1740,7 +1835,7 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
rxcmp1->rx_cmp_cfa_code_errors_v2 |=
cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
- rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &agg_event);
+ rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event);
if (likely(rc == -EIO))
rx_pkts++;
else if (rc == -EBUSY) /* partial completion */
@@ -1763,13 +1858,13 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
- if (agg_event) {
+ if (event & BNXT_AGG_EVENT) {
writel(DB_KEY_RX | rxr->rx_agg_prod, rxr->rx_agg_doorbell);
writel(DB_KEY_RX | rxr->rx_agg_prod, rxr->rx_agg_doorbell);
}
if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, rx_pkts);
BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
}
return rx_pkts;
@@ -1782,9 +1877,6 @@ static int bnxt_poll(struct napi_struct *napi, int budget)
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
int work_done = 0;
- if (!bnxt_lock_napi(bnapi))
- return budget;
-
while (1) {
work_done += bnxt_poll_work(bp, bnapi, budget - work_done);
@@ -1792,42 +1884,16 @@ static int bnxt_poll(struct napi_struct *napi, int budget)
break;
if (!bnxt_has_work(bp, cpr)) {
- napi_complete(napi);
- BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
+ if (napi_complete_done(napi, work_done))
+ BNXT_CP_DB_REARM(cpr->cp_doorbell,
+ cpr->cp_raw_cons);
break;
}
}
mmiowb();
- bnxt_unlock_napi(bnapi);
return work_done;
}
-#ifdef CONFIG_NET_RX_BUSY_POLL
-static int bnxt_busy_poll(struct napi_struct *napi)
-{
- struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
- struct bnxt *bp = bnapi->bp;
- struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
- int rx_work, budget = 4;
-
- if (atomic_read(&bp->intr_sem) != 0)
- return LL_FLUSH_FAILED;
-
- if (!bp->link_info.link_up)
- return LL_FLUSH_FAILED;
-
- if (!bnxt_lock_poll(bnapi))
- return LL_FLUSH_BUSY;
-
- rx_work = bnxt_poll_work(bp, bnapi, budget);
-
- BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
-
- bnxt_unlock_poll(bnapi);
- return rx_work;
-}
-#endif
-
static void bnxt_free_tx_skbs(struct bnxt *bp)
{
int i, max_idx;
@@ -1905,11 +1971,9 @@ static void bnxt_free_rx_skbs(struct bnxt *bp)
if (!data)
continue;
- dma_unmap_single(
- &pdev->dev,
- dma_unmap_addr(tpa_info, mapping),
- bp->rx_buf_use_size,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&pdev->dev, tpa_info->mapping,
+ bp->rx_buf_use_size,
+ bp->rx_dir);
tpa_info->data = NULL;
@@ -1919,19 +1983,20 @@ static void bnxt_free_rx_skbs(struct bnxt *bp)
for (j = 0; j < max_idx; j++) {
struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j];
- u8 *data = rx_buf->data;
+ void *data = rx_buf->data;
if (!data)
continue;
- dma_unmap_single(&pdev->dev,
- dma_unmap_addr(rx_buf, mapping),
- bp->rx_buf_use_size,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&pdev->dev, rx_buf->mapping,
+ bp->rx_buf_use_size, bp->rx_dir);
rx_buf->data = NULL;
- kfree(data);
+ if (BNXT_RX_PAGE_MODE(bp))
+ __free_page(data);
+ else
+ kfree(data);
}
for (j = 0; j < max_agg_idx; j++) {
@@ -1942,8 +2007,7 @@ static void bnxt_free_rx_skbs(struct bnxt *bp)
if (!page)
continue;
- dma_unmap_page(&pdev->dev,
- dma_unmap_addr(rx_agg_buf, mapping),
+ dma_unmap_page(&pdev->dev, rx_agg_buf->mapping,
BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE);
rx_agg_buf->page = NULL;
@@ -2034,6 +2098,9 @@ static void bnxt_free_rx_rings(struct bnxt *bp)
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
struct bnxt_ring_struct *ring;
+ if (rxr->xdp_prog)
+ bpf_prog_put(rxr->xdp_prog);
+
kfree(rxr->rx_tpa);
rxr->rx_tpa = NULL;
@@ -2172,6 +2239,8 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp)
memset(txr->tx_push, 0, sizeof(struct tx_push_bd));
}
ring->queue_id = bp->q_info[j].queue_id;
+ if (i < bp->tx_nr_rings_xdp)
+ continue;
if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
j++;
}
@@ -2319,6 +2388,15 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
ring = &rxr->rx_ring_struct;
bnxt_init_rxbd_pages(ring, type);
+ if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
+ rxr->xdp_prog = bpf_prog_add(bp->xdp_prog, 1);
+ if (IS_ERR(rxr->xdp_prog)) {
+ int rc = PTR_ERR(rxr->xdp_prog);
+
+ rxr->xdp_prog = NULL;
+ return rc;
+ }
+ }
prod = rxr->rx_prod;
for (i = 0; i < bp->rx_ring_size; i++) {
if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) {
@@ -2365,6 +2443,7 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
return -ENOMEM;
rxr->rx_tpa[i].data = data;
+ rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
rxr->rx_tpa[i].mapping = mapping;
}
} else {
@@ -2380,6 +2459,14 @@ static int bnxt_init_rx_rings(struct bnxt *bp)
{
int i, rc = 0;
+ if (BNXT_RX_PAGE_MODE(bp)) {
+ bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
+ bp->rx_dma_offset = XDP_PACKET_HEADROOM;
+ } else {
+ bp->rx_offset = BNXT_RX_OFFSET;
+ bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
+ }
+
for (i = 0; i < bp->rx_nr_rings; i++) {
rc = bnxt_init_one_rx_ring(bp, i);
if (rc)
@@ -2503,9 +2590,11 @@ static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
return pages;
}
-static void bnxt_set_tpa_flags(struct bnxt *bp)
+void bnxt_set_tpa_flags(struct bnxt *bp)
{
bp->flags &= ~BNXT_FLAG_TPA;
+ if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
+ return;
if (bp->dev->features & NETIF_F_LRO)
bp->flags |= BNXT_FLAG_LRO;
if (bp->dev->features & NETIF_F_GRO)
@@ -2535,7 +2624,7 @@ void bnxt_set_ring_params(struct bnxt *bp)
agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
bp->flags &= ~BNXT_FLAG_JUMBO;
- if (rx_space > PAGE_SIZE) {
+ if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
u32 jumbo_factor;
bp->flags |= BNXT_FLAG_JUMBO;
@@ -2587,6 +2676,27 @@ void bnxt_set_ring_params(struct bnxt *bp)
bp->cp_ring_mask = bp->cp_bit - 1;
}
+int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
+{
+ if (page_mode) {
+ if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU)
+ return -EOPNOTSUPP;
+ bp->dev->max_mtu = BNXT_MAX_PAGE_MODE_MTU;
+ bp->flags &= ~BNXT_FLAG_AGG_RINGS;
+ bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
+ bp->dev->hw_features &= ~NETIF_F_LRO;
+ bp->dev->features &= ~NETIF_F_LRO;
+ bp->rx_dir = DMA_BIDIRECTIONAL;
+ bp->rx_skb_func = bnxt_rx_page_skb;
+ } else {
+ bp->dev->max_mtu = BNXT_MAX_MTU;
+ bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
+ bp->rx_dir = DMA_FROM_DEVICE;
+ bp->rx_skb_func = bnxt_rx_skb;
+ }
+ return 0;
+}
+
static void bnxt_free_vnic_attributes(struct bnxt *bp)
{
int i;
@@ -2669,6 +2779,10 @@ static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
goto out;
}
+ if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
+ !(vnic->flags & BNXT_VNIC_RSS_FLAG))
+ continue;
+
/* Allocate rss table and hash key */
vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
&vnic->rss_table_dma_addr,
@@ -2892,6 +3006,8 @@ static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
bnxt_free_stats(bp);
bnxt_free_ring_grps(bp);
bnxt_free_vnics(bp);
+ kfree(bp->tx_ring_map);
+ bp->tx_ring_map = NULL;
kfree(bp->tx_ring);
bp->tx_ring = NULL;
kfree(bp->rx_ring);
@@ -2944,6 +3060,12 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
if (!bp->tx_ring)
return -ENOMEM;
+ bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
+ GFP_KERNEL);
+
+ if (!bp->tx_ring_map)
+ return -ENOMEM;
+
if (bp->flags & BNXT_FLAG_SHARED_RINGS)
j = 0;
else
@@ -2952,6 +3074,15 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
for (i = 0; i < bp->tx_nr_rings; i++, j++) {
bp->tx_ring[i].bnapi = bp->bnapi[j];
bp->bnapi[j]->tx_ring = &bp->tx_ring[i];
+ bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
+ if (i >= bp->tx_nr_rings_xdp) {
+ bp->tx_ring[i].txq_index = i -
+ bp->tx_nr_rings_xdp;
+ bp->bnapi[j]->tx_int = bnxt_tx_int;
+ } else {
+ bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
+ bp->bnapi[j]->tx_int = bnxt_tx_int_xdp;
+ }
}
rc = bnxt_alloc_stats(bp);
@@ -2993,6 +3124,47 @@ alloc_mem_err:
return rc;
}
+static void bnxt_disable_int(struct bnxt *bp)
+{
+ int i;
+
+ if (!bp->bnapi)
+ return;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
+
+ if (ring->fw_ring_id != INVALID_HW_RING_ID)
+ BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
+ }
+}
+
+static void bnxt_disable_int_sync(struct bnxt *bp)
+{
+ int i;
+
+ atomic_inc(&bp->intr_sem);
+
+ bnxt_disable_int(bp);
+ for (i = 0; i < bp->cp_nr_rings; i++)
+ synchronize_irq(bp->irq_tbl[i].vector);
+}
+
+static void bnxt_enable_int(struct bnxt *bp)
+{
+ int i;
+
+ atomic_set(&bp->intr_sem, 0);
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+
+ BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
+ }
+}
+
void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
u16 cmpl_ring, u16 target_id)
{
@@ -3292,6 +3464,9 @@ static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \
CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
+#define BNXT_NTP_TUNNEL_FLTR_FLAG \
+ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
+
static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
struct bnxt_ntuple_filter *fltr)
{
@@ -3312,10 +3487,31 @@ static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
req.ip_protocol = keys->basic.ip_proto;
- req.src_ipaddr[0] = keys->addrs.v4addrs.src;
- req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
- req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
- req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
+ if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
+ int i;
+
+ req.ethertype = htons(ETH_P_IPV6);
+ req.ip_addr_type =
+ CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
+ *(struct in6_addr *)&req.src_ipaddr[0] =
+ keys->addrs.v6addrs.src;
+ *(struct in6_addr *)&req.dst_ipaddr[0] =
+ keys->addrs.v6addrs.dst;
+ for (i = 0; i < 4; i++) {
+ req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
+ req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
+ }
+ } else {
+ req.src_ipaddr[0] = keys->addrs.v4addrs.src;
+ req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
+ req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
+ req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
+ }
+ if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
+ req.enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
+ req.tunnel_type =
+ CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
+ }
req.src_port = keys->ports.src;
req.src_port_mask = cpu_to_be16(0xffff);
@@ -3562,6 +3758,12 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
VNIC_CFG_REQ_ENABLES_MRU);
+ } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
+ req.rss_rule =
+ cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
+ req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
+ VNIC_CFG_REQ_ENABLES_MRU);
+ req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
} else {
req.rss_rule = cpu_to_le16(0xffff);
}
@@ -3665,6 +3867,27 @@ static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
return rc;
}
+static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
+{
+ struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_vnic_qcaps_input req = {0};
+ int rc;
+
+ if (bp->hwrm_spec_code < 0x10600)
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc) {
+ if (resp->flags &
+ cpu_to_le32(VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
+ bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
{
u16 i;
@@ -3768,7 +3991,7 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
break;
case HWRM_RING_ALLOC_CMPL:
- req.ring_type = RING_ALLOC_REQ_RING_TYPE_CMPL;
+ req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
req.length = cpu_to_le32(bp->cp_ring_mask + 1);
if (bp->flags & BNXT_FLAG_USING_MSIX)
req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
@@ -3787,7 +4010,7 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
if (rc || err) {
switch (ring_type) {
- case RING_FREE_REQ_RING_TYPE_CMPL:
+ case RING_FREE_REQ_RING_TYPE_L2_CMPL:
netdev_err(bp->dev, "hwrm_ring_alloc cp failed. rc:%x err:%x\n",
rc, err);
return -1;
@@ -3811,6 +4034,30 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
return rc;
}
+static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
+{
+ int rc;
+
+ if (BNXT_PF(bp)) {
+ struct hwrm_func_cfg_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+ req.fid = cpu_to_le16(0xffff);
+ req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
+ req.async_event_cr = cpu_to_le16(idx);
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ } else {
+ struct hwrm_func_vf_cfg_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
+ req.enables =
+ cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
+ req.async_event_cr = cpu_to_le16(idx);
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ }
+ return rc;
+}
+
static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
{
int i, rc = 0;
@@ -3827,6 +4074,12 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
goto err_out;
BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
+
+ if (!i) {
+ rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
+ if (rc)
+ netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
+ }
}
for (i = 0; i < bp->tx_nr_rings; i++) {
@@ -3901,7 +4154,7 @@ static int hwrm_ring_free_send_msg(struct bnxt *bp,
if (rc || error_code) {
switch (ring_type) {
- case RING_FREE_REQ_RING_TYPE_CMPL:
+ case RING_FREE_REQ_RING_TYPE_L2_CMPL:
netdev_err(bp->dev, "hwrm_ring_free cp failed. rc:%d\n",
rc);
return rc;
@@ -3977,6 +4230,12 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
}
}
+ /* The completion rings are about to be freed. After that the
+ * IRQ doorbell will not work anymore. So we need to disable
+ * IRQ here.
+ */
+ bnxt_disable_int_sync(bp);
+
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
@@ -3984,7 +4243,7 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
if (ring->fw_ring_id != INVALID_HW_RING_ID) {
hwrm_ring_free_send_msg(bp, ring,
- RING_FREE_REQ_RING_TYPE_CMPL,
+ RING_FREE_REQ_RING_TYPE_L2_CMPL,
INVALID_HW_RING_ID);
ring->fw_ring_id = INVALID_HW_RING_ID;
bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
@@ -3992,6 +4251,50 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
}
}
+/* Caller must hold bp->hwrm_cmd_lock */
+int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
+{
+ struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_func_qcfg_input req = {0};
+ int rc;
+
+ if (bp->hwrm_spec_code < 0x10601)
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
+ req.fid = cpu_to_le16(fid);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc)
+ *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
+
+ return rc;
+}
+
+static int bnxt_hwrm_reserve_tx_rings(struct bnxt *bp, int *tx_rings)
+{
+ struct hwrm_func_cfg_input req = {0};
+ int rc;
+
+ if (bp->hwrm_spec_code < 0x10601)
+ return 0;
+
+ if (BNXT_VF(bp))
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+ req.fid = cpu_to_le16(0xffff);
+ req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS);
+ req.num_tx_rings = cpu_to_le16(*tx_rings);
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ return rc;
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = __bnxt_hwrm_get_tx_rings(bp, 0xffff, tx_rings);
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
static void bnxt_hwrm_set_coal_params(struct bnxt *bp, u32 max_bufs,
u32 buf_tmrs, u16 flags,
struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
@@ -4249,7 +4552,7 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
/* overwrite netdev dev_adr with admin VF MAC */
memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
} else {
- random_ether_addr(bp->dev->dev_addr);
+ eth_hw_addr_random(bp->dev);
rc = bnxt_approve_mac(bp, bp->dev->dev_addr);
}
return rc;
@@ -4463,8 +4766,12 @@ static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
{
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
int rc;
+ if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
+ goto skip_rss_ctx;
+
/* allocate context for vnic */
rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
if (rc) {
@@ -4484,6 +4791,7 @@ static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
bp->rsscos_nr_ctxs++;
}
+skip_rss_ctx:
/* configure default vnic, ring grp */
rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
if (rc) {
@@ -4518,13 +4826,17 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
int i, rc = 0;
for (i = 0; i < bp->rx_nr_rings; i++) {
+ struct bnxt_vnic_info *vnic;
u16 vnic_id = i + 1;
u16 ring_id = i;
if (vnic_id >= bp->nr_vnics)
break;
- bp->vnic_info[vnic_id].flags |= BNXT_VNIC_RFS_FLAG;
+ vnic = &bp->vnic_info[vnic_id];
+ vnic->flags |= BNXT_VNIC_RFS_FLAG;
+ if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
+ vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
if (rc) {
netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
@@ -4698,40 +5010,13 @@ static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
return bnxt_init_chip(bp, irq_re_init);
}
-static void bnxt_disable_int(struct bnxt *bp)
-{
- int i;
-
- if (!bp->bnapi)
- return;
-
- for (i = 0; i < bp->cp_nr_rings; i++) {
- struct bnxt_napi *bnapi = bp->bnapi[i];
- struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
-
- BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
- }
-}
-
-static void bnxt_enable_int(struct bnxt *bp)
-{
- int i;
-
- atomic_set(&bp->intr_sem, 0);
- for (i = 0; i < bp->cp_nr_rings; i++) {
- struct bnxt_napi *bnapi = bp->bnapi[i];
- struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
-
- BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
- }
-}
-
static int bnxt_set_real_num_queues(struct bnxt *bp)
{
int rc;
struct net_device *dev = bp->dev;
- rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings);
+ rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
+ bp->tx_nr_rings_xdp);
if (rc)
return rc;
@@ -4779,19 +5064,12 @@ static void bnxt_setup_msix(struct bnxt *bp)
tcs = netdev_get_num_tc(dev);
if (tcs > 1) {
- bp->tx_nr_rings_per_tc = bp->tx_nr_rings / tcs;
- if (bp->tx_nr_rings_per_tc == 0) {
- netdev_reset_tc(dev);
- bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
- } else {
- int i, off, count;
+ int i, off, count;
- bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs;
- for (i = 0; i < tcs; i++) {
- count = bp->tx_nr_rings_per_tc;
- off = i * count;
- netdev_set_tc_queue(dev, i, count, off);
- }
+ for (i = 0; i < tcs; i++) {
+ count = bp->tx_nr_rings_per_tc;
+ off = i * count;
+ netdev_set_tc_queue(dev, i, count, off);
}
}
@@ -4836,6 +5114,26 @@ static int bnxt_setup_int_mode(struct bnxt *bp)
return rc;
}
+#ifdef CONFIG_RFS_ACCEL
+static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
+{
+#if defined(CONFIG_BNXT_SRIOV)
+ if (BNXT_VF(bp))
+ return bp->vf.max_rsscos_ctxs;
+#endif
+ return bp->pf.max_rsscos_ctxs;
+}
+
+static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
+{
+#if defined(CONFIG_BNXT_SRIOV)
+ if (BNXT_VF(bp))
+ return bp->vf.max_vnics;
+#endif
+ return bp->pf.max_vnics;
+}
+#endif
+
unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
{
#if defined(CONFIG_BNXT_SRIOV)
@@ -5094,10 +5392,8 @@ static void bnxt_disable_napi(struct bnxt *bp)
if (!bp->bnapi)
return;
- for (i = 0; i < bp->cp_nr_rings; i++) {
+ for (i = 0; i < bp->cp_nr_rings; i++)
napi_disable(&bp->bnapi[i]->napi);
- bnxt_disable_poll(bp->bnapi[i]);
- }
}
static void bnxt_enable_napi(struct bnxt *bp)
@@ -5106,7 +5402,6 @@ static void bnxt_enable_napi(struct bnxt *bp)
for (i = 0; i < bp->cp_nr_rings; i++) {
bp->bnapi[i]->in_reset = false;
- bnxt_enable_poll(bp->bnapi[i]);
napi_enable(&bp->bnapi[i]->napi);
}
}
@@ -5150,7 +5445,7 @@ static void bnxt_report_link(struct bnxt *bp)
if (bp->link_info.link_up) {
const char *duplex;
const char *flow_ctrl;
- u16 speed;
+ u16 speed, fec;
netif_carrier_on(bp->dev);
if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
@@ -5172,6 +5467,12 @@ static void bnxt_report_link(struct bnxt *bp)
netdev_info(bp->dev, "EEE is %s\n",
bp->eee.eee_active ? "active" :
"not active");
+ fec = bp->link_info.fec_cfg;
+ if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
+ netdev_info(bp->dev, "FEC autoneg %s encodings: %s\n",
+ (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
+ (fec & BNXT_FEC_ENC_BASE_R) ? "BaseR" :
+ (fec & BNXT_FEC_ENC_RS) ? "RS" : "None");
} else {
netif_carrier_off(bp->dev);
netdev_err(bp->dev, "NIC Link is Down\n");
@@ -5296,6 +5597,11 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
}
}
}
+
+ link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
+ if (bp->hwrm_spec_code >= 0x10504)
+ link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
+
/* TODO: need to add more logic to report VF link */
if (chng_link_state) {
if (link_info->phy_link_status == BNXT_LINK_LINK)
@@ -5384,7 +5690,7 @@ static void bnxt_hwrm_set_link_common(struct bnxt *bp,
{
u8 autoneg = bp->link_info.autoneg;
u16 fw_link_speed = bp->link_info.req_link_speed;
- u32 advertising = bp->link_info.advertising;
+ u16 advertising = bp->link_info.advertising;
if (autoneg & BNXT_AUTONEG_SPEED) {
req->auto_mode |=
@@ -5489,6 +5795,45 @@ static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
+static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
+{
+ struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_port_led_qcaps_input req = {0};
+ struct bnxt_pf_info *pf = &bp->pf;
+ int rc;
+
+ if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1);
+ req.port_id = cpu_to_le16(pf->port_id);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc) {
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+ }
+ if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
+ int i;
+
+ bp->num_leds = resp->num_leds;
+ memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
+ bp->num_leds);
+ for (i = 0; i < bp->num_leds; i++) {
+ struct bnxt_led_info *led = &bp->leds[i];
+ __le16 caps = led->led_state_caps;
+
+ if (!led->led_group_id ||
+ !BNXT_LED_ALT_BLINK_CAP(caps)) {
+ bp->num_leds = 0;
+ break;
+ }
+ }
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return 0;
+}
+
static bool bnxt_eee_config_ok(struct bnxt *bp)
{
struct ethtool_eee *eee = &bp->eee;
@@ -5527,6 +5872,9 @@ static int bnxt_update_phy_setting(struct bnxt *bp)
rc);
return rc;
}
+ if (!BNXT_SINGLE_PF(bp))
+ return 0;
+
if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
(link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
link_info->req_flow_ctrl)
@@ -5678,19 +6026,6 @@ static int bnxt_open(struct net_device *dev)
return __bnxt_open_nic(bp, true, true);
}
-static void bnxt_disable_int_sync(struct bnxt *bp)
-{
- int i;
-
- atomic_inc(&bp->intr_sem);
- if (!netif_running(bp->dev))
- return;
-
- bnxt_disable_int(bp);
- for (i = 0; i < bp->cp_nr_rings; i++)
- synchronize_irq(bp->irq_tbl[i].vector);
-}
-
int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
{
int rc = 0;
@@ -5712,13 +6047,12 @@ int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
while (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state))
msleep(20);
- /* Flush rings before disabling interrupts */
+ /* Flush rings and and disable interrupts */
bnxt_shutdown_nic(bp, irq_re_init);
/* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
bnxt_disable_napi(bp);
- bnxt_disable_int_sync(bp);
del_timer_sync(&bp->timer);
bnxt_free_skbs(bp);
@@ -5765,16 +6099,14 @@ static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return -EOPNOTSUPP;
}
-static struct rtnl_link_stats64 *
+static void
bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
u32 i;
struct bnxt *bp = netdev_priv(dev);
- memset(stats, 0, sizeof(struct rtnl_link_stats64));
-
if (!bp->bnapi)
- return stats;
+ return;
/* TODO check if we need to synchronize with bnxt_close path */
for (i = 0; i < bp->cp_nr_rings; i++) {
@@ -5821,8 +6153,6 @@ bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns);
stats->tx_errors = le64_to_cpu(tx->tx_err);
}
-
- return stats;
}
static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
@@ -5975,20 +6305,36 @@ skip_uc:
return rc;
}
+/* If the chip and firmware supports RFS */
+static bool bnxt_rfs_supported(struct bnxt *bp)
+{
+ if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
+ return true;
+ if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
+ return true;
+ return false;
+}
+
+/* If runtime conditions support RFS */
static bool bnxt_rfs_capable(struct bnxt *bp)
{
#ifdef CONFIG_RFS_ACCEL
- struct bnxt_pf_info *pf = &bp->pf;
- int vnics;
+ int vnics, max_vnics, max_rss_ctxs;
- if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_MSIX_CAP))
+ if (!(bp->flags & BNXT_FLAG_MSIX_CAP))
return false;
vnics = 1 + bp->rx_nr_rings;
- if (vnics > pf->max_rsscos_ctxs || vnics > pf->max_vnics) {
+ max_vnics = bnxt_get_max_func_vnics(bp);
+ max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
+
+ /* RSS contexts not a limiting factor */
+ if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
+ max_rss_ctxs = max_vnics;
+ if (vnics > max_vnics || vnics > max_rss_ctxs) {
netdev_warn(bp->dev,
"Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
- min(pf->max_rsscos_ctxs - 1, pf->max_vnics - 1));
+ min(max_rss_ctxs - 1, max_vnics - 1));
return false;
}
@@ -6044,6 +6390,9 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
if (features & NETIF_F_LRO)
flags |= BNXT_FLAG_LRO;
+ if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
+ flags &= ~BNXT_FLAG_TPA;
+
if (features & NETIF_F_HW_VLAN_CTAG_RX)
flags |= BNXT_FLAG_STRIP_VLAN;
@@ -6296,6 +6645,62 @@ static void bnxt_sp_task(struct work_struct *work)
clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
}
+/* Under rtnl_lock */
+int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, int tcs, int tx_xdp)
+{
+ int max_rx, max_tx, tx_sets = 1;
+ int tx_rings_needed;
+ bool sh = true;
+ int rc;
+
+ if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
+ sh = false;
+
+ if (tcs)
+ tx_sets = tcs;
+
+ rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh);
+ if (rc)
+ return rc;
+
+ if (max_rx < rx)
+ return -ENOMEM;
+
+ tx_rings_needed = tx * tx_sets + tx_xdp;
+ if (max_tx < tx_rings_needed)
+ return -ENOMEM;
+
+ if (bnxt_hwrm_reserve_tx_rings(bp, &tx_rings_needed) ||
+ tx_rings_needed < (tx * tx_sets + tx_xdp))
+ return -ENOMEM;
+ return 0;
+}
+
+static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
+{
+ if (bp->bar2) {
+ pci_iounmap(pdev, bp->bar2);
+ bp->bar2 = NULL;
+ }
+
+ if (bp->bar1) {
+ pci_iounmap(pdev, bp->bar1);
+ bp->bar1 = NULL;
+ }
+
+ if (bp->bar0) {
+ pci_iounmap(pdev, bp->bar0);
+ bp->bar0 = NULL;
+ }
+}
+
+static void bnxt_cleanup_pci(struct bnxt *bp)
+{
+ bnxt_unmap_bars(bp, bp->pdev);
+ pci_release_regions(bp->pdev);
+ pci_disable_device(bp->pdev);
+}
+
static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
{
int rc;
@@ -6383,25 +6788,10 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->current_interval = BNXT_TIMER_INTERVAL;
clear_bit(BNXT_STATE_OPEN, &bp->state);
-
return 0;
init_err_release:
- if (bp->bar2) {
- pci_iounmap(pdev, bp->bar2);
- bp->bar2 = NULL;
- }
-
- if (bp->bar1) {
- pci_iounmap(pdev, bp->bar1);
- bp->bar1 = NULL;
- }
-
- if (bp->bar0) {
- pci_iounmap(pdev, bp->bar0);
- bp->bar0 = NULL;
- }
-
+ bnxt_unmap_bars(bp, pdev);
pci_release_regions(pdev);
init_err_disable:
@@ -6458,9 +6848,10 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
{
struct bnxt *bp = netdev_priv(dev);
bool sh = false;
+ int rc;
if (tc > bp->max_tc) {
- netdev_err(dev, "too many traffic classes requested: %d Max supported is %d\n",
+ netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
tc, bp->max_tc);
return -EINVAL;
}
@@ -6471,13 +6862,10 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
if (bp->flags & BNXT_FLAG_SHARED_RINGS)
sh = true;
- if (tc) {
- int max_rx_rings, max_tx_rings, rc;
-
- rc = bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, sh);
- if (rc || bp->tx_nr_rings_per_tc * tc > max_tx_rings)
- return -ENOMEM;
- }
+ rc = bnxt_reserve_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
+ tc, bp->tx_nr_rings_xdp);
+ if (rc)
+ return rc;
/* Needs to close the device and do hw resource re-allocations */
if (netif_running(bp->dev))
@@ -6521,6 +6909,7 @@ static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
keys1->ports.ports == keys2->ports.ports &&
keys1->basic.ip_proto == keys2->basic.ip_proto &&
keys1->basic.n_proto == keys2->basic.n_proto &&
+ keys1->control.flags == keys2->control.flags &&
ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
return true;
@@ -6538,9 +6927,6 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
int rc = 0, idx, bit_id, l2_idx = 0;
struct hlist_head *head;
- if (skb->encapsulation)
- return -EPROTONOSUPPORT;
-
if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
int off = 0, j;
@@ -6567,12 +6953,23 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
goto err_free;
}
- if ((fkeys->basic.n_proto != htons(ETH_P_IP)) ||
+ if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
+ fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
((fkeys->basic.ip_proto != IPPROTO_TCP) &&
(fkeys->basic.ip_proto != IPPROTO_UDP))) {
rc = -EPROTONOSUPPORT;
goto err_free;
}
+ if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
+ bp->hwrm_spec_code < 0x10601) {
+ rc = -EPROTONOSUPPORT;
+ goto err_free;
+ }
+ if ((fkeys->control.flags & FLOW_DIS_ENCAPSULATION) &&
+ bp->hwrm_spec_code < 0x10601) {
+ rc = -EPROTONOSUPPORT;
+ goto err_free;
+ }
memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
@@ -6779,9 +7176,7 @@ static const struct net_device_ops bnxt_netdev_ops = {
#endif
.ndo_udp_tunnel_add = bnxt_udp_tunnel_add,
.ndo_udp_tunnel_del = bnxt_udp_tunnel_del,
-#ifdef CONFIG_NET_RX_BUSY_POLL
- .ndo_busy_poll = bnxt_busy_poll,
-#endif
+ .ndo_xdp = bnxt_xdp,
};
static void bnxt_remove_one(struct pci_dev *pdev)
@@ -6801,15 +7196,12 @@ static void bnxt_remove_one(struct pci_dev *pdev)
bnxt_hwrm_func_drv_unrgtr(bp);
bnxt_free_hwrm_resources(bp);
bnxt_dcb_free(bp);
- pci_iounmap(pdev, bp->bar2);
- pci_iounmap(pdev, bp->bar1);
- pci_iounmap(pdev, bp->bar0);
kfree(bp->edev);
bp->edev = NULL;
+ if (bp->xdp_prog)
+ bpf_prog_put(bp->xdp_prog);
+ bnxt_cleanup_pci(bp);
free_netdev(dev);
-
- pci_release_regions(pdev);
- pci_disable_device(pdev);
}
static int bnxt_probe_phy(struct bnxt *bp)
@@ -6920,8 +7312,17 @@ static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
int rc;
rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
- if (rc)
- return rc;
+ if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
+ /* Not enough rings, try disabling agg rings. */
+ bp->flags &= ~BNXT_FLAG_AGG_RINGS;
+ rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
+ if (rc)
+ return rc;
+ bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
+ bp->dev->hw_features &= ~NETIF_F_LRO;
+ bp->dev->features &= ~NETIF_F_LRO;
+ bnxt_set_ring_params(bp);
+ }
if (bp->flags & BNXT_FLAG_ROCE_CAP) {
int max_cp, max_stat, max_irq;
@@ -6960,6 +7361,11 @@ static int bnxt_set_dflt_rings(struct bnxt *bp)
return rc;
bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
+
+ rc = bnxt_hwrm_reserve_tx_rings(bp, &bp->tx_nr_rings_per_tc);
+ if (rc)
+ netdev_warn(bp->dev, "Unable to reserve tx rings\n");
+
bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
bp->tx_nr_rings + bp->rx_nr_rings;
@@ -7001,7 +7407,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
struct bnxt *bp;
int rc, max_irqs;
- if (pdev->device == 0x16cd && pci_is_bridge(pdev))
+ if (pci_is_bridge(pdev))
return -ENODEV;
if (version_printed++ == 0)
@@ -7027,17 +7433,16 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->netdev_ops = &bnxt_netdev_ops;
dev->watchdog_timeo = BNXT_TX_TIMEOUT;
dev->ethtool_ops = &bnxt_ethtool_ops;
-
pci_set_drvdata(pdev, dev);
rc = bnxt_alloc_hwrm_resources(bp);
if (rc)
- goto init_err;
+ goto init_err_pci_clean;
mutex_init(&bp->hwrm_cmd_lock);
rc = bnxt_hwrm_ver_get(bp);
if (rc)
- goto init_err;
+ goto init_err_pci_clean;
bnxt_hwrm_fw_set_time(bp);
@@ -7068,7 +7473,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* MTU range: 60 - 9500 */
dev->min_mtu = ETH_ZLEN;
- dev->max_mtu = 9500;
+ dev->max_mtu = BNXT_MAX_MTU;
bnxt_dcb_init(bp);
@@ -7081,11 +7486,11 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rc = bnxt_hwrm_func_drv_rgtr(bp);
if (rc)
- goto init_err;
+ goto init_err_pci_clean;
rc = bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0);
if (rc)
- goto init_err;
+ goto init_err_pci_clean;
bp->ulp_probe = bnxt_ulp_probe;
@@ -7095,7 +7500,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
rc);
rc = -1;
- goto init_err;
+ goto init_err_pci_clean;
}
rc = bnxt_hwrm_queue_qportcfg(bp);
@@ -7103,15 +7508,22 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n",
rc);
rc = -1;
- goto init_err;
+ goto init_err_pci_clean;
}
bnxt_hwrm_func_qcfg(bp);
+ bnxt_hwrm_port_led_qcaps(bp);
+ bnxt_set_rx_skb_mode(bp, false);
bnxt_set_tpa_flags(bp);
bnxt_set_ring_params(bp);
bnxt_set_max_func_irqs(bp, max_irqs);
- bnxt_set_dflt_rings(bp);
+ rc = bnxt_set_dflt_rings(bp);
+ if (rc) {
+ netdev_err(bp->dev, "Not enough rings available.\n");
+ rc = -ENOMEM;
+ goto init_err_pci_clean;
+ }
/* Default RSS hash cfg. */
bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
@@ -7126,7 +7538,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
}
- if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp)) {
+ bnxt_hwrm_vnic_qcaps(bp);
+ if (bnxt_rfs_supported(bp)) {
dev->hw_features |= NETIF_F_NTUPLE;
if (bnxt_rfs_capable(bp)) {
bp->flags |= BNXT_FLAG_RFS;
@@ -7139,15 +7552,15 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rc = bnxt_probe_phy(bp);
if (rc)
- goto init_err;
+ goto init_err_pci_clean;
rc = bnxt_hwrm_func_reset(bp);
if (rc)
- goto init_err;
+ goto init_err_pci_clean;
rc = bnxt_init_int_mode(bp);
if (rc)
- goto init_err;
+ goto init_err_pci_clean;
rc = register_netdev(dev);
if (rc)
@@ -7164,10 +7577,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
init_err_clr_int:
bnxt_clear_int_mode(bp);
-init_err:
- pci_iounmap(pdev, bp->bar0);
- pci_release_regions(pdev);
- pci_disable_device(pdev);
+init_err_pci_clean:
+ bnxt_cleanup_pci(bp);
init_err_free:
free_netdev(dev);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 16defe9ececc..faf26a2f726b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1,6 +1,7 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2014-2016 Broadcom Corporation
+ * Copyright (c) 2016-2017 Broadcom Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -11,10 +12,10 @@
#define BNXT_H
#define DRV_MODULE_NAME "bnxt_en"
-#define DRV_MODULE_VERSION "1.6.0"
+#define DRV_MODULE_VERSION "1.7.0"
#define DRV_VER_MAJ 1
-#define DRV_VER_MIN 6
+#define DRV_VER_MIN 7
#define DRV_VER_UPD 0
struct tx_bd {
@@ -416,6 +417,11 @@ struct rx_tpa_end_cmp_ext {
#define BNXT_RX_PAGE_SIZE (1 << BNXT_RX_PAGE_SHIFT)
+#define BNXT_MAX_MTU 9500
+#define BNXT_MAX_PAGE_MODE_MTU \
+ ((unsigned int)PAGE_SIZE - VLAN_ETH_HLEN - NET_IP_ALIGN - \
+ XDP_PACKET_HEADROOM)
+
#define BNXT_MIN_PKT_SIZE 52
#define BNXT_NUM_TESTS(bp) 0
@@ -507,17 +513,25 @@ struct rx_tpa_end_cmp_ext {
#define BNXT_HWRM_REQS_PER_PAGE (BNXT_PAGE_SIZE / \
BNXT_HWRM_REQ_MAX_SIZE)
+#define BNXT_RX_EVENT 1
+#define BNXT_AGG_EVENT 2
+#define BNXT_TX_EVENT 4
+
struct bnxt_sw_tx_bd {
struct sk_buff *skb;
DEFINE_DMA_UNMAP_ADDR(mapping);
u8 is_gso;
u8 is_push;
- unsigned short nr_frags;
+ union {
+ unsigned short nr_frags;
+ u16 rx_prod;
+ };
};
struct bnxt_sw_rx_bd {
- u8 *data;
- DEFINE_DMA_UNMAP_ADDR(mapping);
+ void *data;
+ u8 *data_ptr;
+ dma_addr_t mapping;
};
struct bnxt_sw_rx_agg_bd {
@@ -558,6 +572,7 @@ struct bnxt_tx_ring_info {
struct bnxt_napi *bnapi;
u16 tx_prod;
u16 tx_cons;
+ u16 txq_index;
void __iomem *tx_doorbell;
struct tx_bd *tx_desc_ring[MAX_TX_PAGES];
@@ -576,7 +591,8 @@ struct bnxt_tx_ring_info {
};
struct bnxt_tpa_info {
- u8 *data;
+ void *data;
+ u8 *data_ptr;
dma_addr_t mapping;
u16 len;
unsigned short gso_type;
@@ -608,6 +624,8 @@ struct bnxt_rx_ring_info {
void __iomem *rx_doorbell;
void __iomem *rx_agg_doorbell;
+ struct bpf_prog *xdp_prog;
+
struct rx_bd *rx_desc_ring[MAX_RX_PAGES];
struct bnxt_sw_rx_bd *rx_buf_ring;
@@ -654,20 +672,13 @@ struct bnxt_napi {
struct bnxt_rx_ring_info *rx_ring;
struct bnxt_tx_ring_info *tx_ring;
-#ifdef CONFIG_NET_RX_BUSY_POLL
- atomic_t poll_state;
-#endif
- bool in_reset;
-};
+ void (*tx_int)(struct bnxt *, struct bnxt_napi *,
+ int);
+ u32 flags;
+#define BNXT_NAPI_FLAG_XDP 0x1
-#ifdef CONFIG_NET_RX_BUSY_POLL
-enum bnxt_poll_state_t {
- BNXT_STATE_IDLE = 0,
- BNXT_STATE_NAPI,
- BNXT_STATE_POLL,
- BNXT_STATE_DISABLE,
+ bool in_reset;
};
-#endif
struct bnxt_irq {
irq_handler_t handler;
@@ -720,6 +731,7 @@ struct bnxt_vnic_info {
#define BNXT_VNIC_RFS_FLAG 2
#define BNXT_VNIC_MCAST_FLAG 4
#define BNXT_VNIC_UCAST_FLAG 8
+#define BNXT_VNIC_RFS_NEW_RSS_FLAG 0x10
};
#if defined(CONFIG_BNXT_SRIOV)
@@ -840,7 +852,7 @@ struct bnxt_link_info {
#define BNXT_LINK_SPEED_40GB PORT_PHY_QCFG_RESP_LINK_SPEED_40GB
#define BNXT_LINK_SPEED_50GB PORT_PHY_QCFG_RESP_LINK_SPEED_50GB
u16 support_speeds;
- u16 auto_link_speeds;
+ u16 auto_link_speeds; /* fw adv setting */
#define BNXT_LINK_SPEED_MSK_100MB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB
#define BNXT_LINK_SPEED_MSK_1GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB
#define BNXT_LINK_SPEED_MSK_2GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2GB
@@ -855,6 +867,10 @@ struct bnxt_link_info {
u16 force_link_speed;
u32 preemphasis;
u8 module_status;
+ u16 fec_cfg;
+#define BNXT_FEC_AUTONEG PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED
+#define BNXT_FEC_ENC_BASE_R PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED
+#define BNXT_FEC_ENC_RS PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED
/* copy of requested setting from ethtool cmd */
u8 autoneg;
@@ -863,7 +879,7 @@ struct bnxt_link_info {
u8 req_duplex;
u8 req_flow_ctrl;
u16 req_link_speed;
- u32 advertising;
+ u16 advertising; /* user adv setting */
bool force_link_chng;
/* a copy of phy_qcfg output used to report link
@@ -879,6 +895,20 @@ struct bnxt_queue_info {
u8 queue_profile;
};
+#define BNXT_MAX_LED 4
+
+struct bnxt_led_info {
+ u8 led_id;
+ u8 led_type;
+ u8 led_group_id;
+ u8 unused;
+ __le16 led_state_caps;
+#define BNXT_LED_ALT_BLINK_CAP(x) ((x) & \
+ cpu_to_le16(PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_ALT_SUPPORTED))
+
+ __le16 led_color_caps;
+};
+
#define BNXT_GRCPF_REG_WINDOW_BASE_OUT 0x400
#define BNXT_CAG_REG_LEGACY_INT_STATUS 0x4014
#define BNXT_CAG_REG_BASE 0x300000
@@ -956,10 +986,13 @@ struct bnxt {
#define BNXT_FLAG_PORT_STATS 0x400
#define BNXT_FLAG_UDP_RSS_CAP 0x800
#define BNXT_FLAG_EEE_CAP 0x1000
+ #define BNXT_FLAG_NEW_RSS_CAP 0x2000
#define BNXT_FLAG_ROCEV1_CAP 0x8000
#define BNXT_FLAG_ROCEV2_CAP 0x10000
#define BNXT_FLAG_ROCE_CAP (BNXT_FLAG_ROCEV1_CAP | \
BNXT_FLAG_ROCEV2_CAP)
+ #define BNXT_FLAG_NO_AGG_RINGS 0x20000
+ #define BNXT_FLAG_RX_PAGE_MODE 0x40000
#define BNXT_FLAG_CHIP_NITRO_A0 0x1000000
#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
@@ -971,6 +1004,7 @@ struct bnxt {
#define BNXT_NPAR(bp) ((bp)->port_partition_type)
#define BNXT_SINGLE_PF(bp) (BNXT_PF(bp) && !BNXT_NPAR(bp))
#define BNXT_CHIP_TYPE_NITRO_A0(bp) ((bp)->flags & BNXT_FLAG_CHIP_NITRO_A0)
+#define BNXT_RX_PAGE_MODE(bp) ((bp)->flags & BNXT_FLAG_RX_PAGE_MODE)
struct bnxt_en_dev *edev;
struct bnxt_en_dev * (*ulp_probe)(struct net_device *);
@@ -979,12 +1013,21 @@ struct bnxt {
struct bnxt_rx_ring_info *rx_ring;
struct bnxt_tx_ring_info *tx_ring;
+ u16 *tx_ring_map;
struct sk_buff * (*gro_func)(struct bnxt_tpa_info *, int, int,
struct sk_buff *);
+ struct sk_buff * (*rx_skb_func)(struct bnxt *,
+ struct bnxt_rx_ring_info *,
+ u16, void *, u8 *, dma_addr_t,
+ unsigned int);
+
u32 rx_buf_size;
u32 rx_buf_use_size; /* useable size */
+ u16 rx_offset;
+ u16 rx_dma_offset;
+ enum dma_data_direction rx_dir;
u32 rx_ring_size;
u32 rx_agg_ring_size;
u32 rx_copy_thresh;
@@ -1000,6 +1043,7 @@ struct bnxt {
int tx_nr_pages;
int tx_nr_rings;
int tx_nr_rings_per_tc;
+ int tx_nr_rings_xdp;
int tx_wake_thresh;
int tx_push_thresh;
@@ -1132,6 +1176,11 @@ struct bnxt {
struct ethtool_eee eee;
u32 lpi_tmr_lo;
u32 lpi_tmr_hi;
+
+ u8 num_leds;
+ struct bnxt_led_info leds[BNXT_MAX_LED];
+
+ struct bpf_prog *xdp_prog;
};
#define BNXT_RX_STATS_OFFSET(counter) \
@@ -1141,93 +1190,6 @@ struct bnxt {
((offsetof(struct tx_port_stats, counter) + \
sizeof(struct rx_port_stats) + 512) / 8)
-#ifdef CONFIG_NET_RX_BUSY_POLL
-static inline void bnxt_enable_poll(struct bnxt_napi *bnapi)
-{
- atomic_set(&bnapi->poll_state, BNXT_STATE_IDLE);
-}
-
-/* called from the NAPI poll routine to get ownership of a bnapi */
-static inline bool bnxt_lock_napi(struct bnxt_napi *bnapi)
-{
- int rc = atomic_cmpxchg(&bnapi->poll_state, BNXT_STATE_IDLE,
- BNXT_STATE_NAPI);
-
- return rc == BNXT_STATE_IDLE;
-}
-
-static inline void bnxt_unlock_napi(struct bnxt_napi *bnapi)
-{
- atomic_set(&bnapi->poll_state, BNXT_STATE_IDLE);
-}
-
-/* called from the busy poll routine to get ownership of a bnapi */
-static inline bool bnxt_lock_poll(struct bnxt_napi *bnapi)
-{
- int rc = atomic_cmpxchg(&bnapi->poll_state, BNXT_STATE_IDLE,
- BNXT_STATE_POLL);
-
- return rc == BNXT_STATE_IDLE;
-}
-
-static inline void bnxt_unlock_poll(struct bnxt_napi *bnapi)
-{
- atomic_set(&bnapi->poll_state, BNXT_STATE_IDLE);
-}
-
-static inline bool bnxt_busy_polling(struct bnxt_napi *bnapi)
-{
- return atomic_read(&bnapi->poll_state) == BNXT_STATE_POLL;
-}
-
-static inline void bnxt_disable_poll(struct bnxt_napi *bnapi)
-{
- int old;
-
- while (1) {
- old = atomic_cmpxchg(&bnapi->poll_state, BNXT_STATE_IDLE,
- BNXT_STATE_DISABLE);
- if (old == BNXT_STATE_IDLE)
- break;
- usleep_range(500, 5000);
- }
-}
-
-#else
-
-static inline void bnxt_enable_poll(struct bnxt_napi *bnapi)
-{
-}
-
-static inline bool bnxt_lock_napi(struct bnxt_napi *bnapi)
-{
- return true;
-}
-
-static inline void bnxt_unlock_napi(struct bnxt_napi *bnapi)
-{
-}
-
-static inline bool bnxt_lock_poll(struct bnxt_napi *bnapi)
-{
- return false;
-}
-
-static inline void bnxt_unlock_poll(struct bnxt_napi *bnapi)
-{
-}
-
-static inline bool bnxt_busy_polling(struct bnxt_napi *bnapi)
-{
- return false;
-}
-
-static inline void bnxt_disable_poll(struct bnxt_napi *bnapi)
-{
-}
-
-#endif
-
#define I2C_DEV_ADDR_A0 0xa0
#define I2C_DEV_ADDR_A2 0xa2
#define SFP_EEPROM_SFF_8472_COMP_ADDR 0x5e
@@ -1238,7 +1200,23 @@ static inline void bnxt_disable_poll(struct bnxt_napi *bnapi)
#define SFF_MODULE_ID_QSFP28 0x11
#define BNXT_MAX_PHY_I2C_RESP_SIZE 64
+static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
+{
+ /* Tell compiler to fetch tx indices from memory. */
+ barrier();
+
+ return bp->tx_ring_size -
+ ((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask);
+}
+
+extern const u16 bnxt_lhint_arr[];
+
+int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
+ u16 prod, gfp_t gfp);
+void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data);
+void bnxt_set_tpa_flags(struct bnxt *bp);
void bnxt_set_ring_params(struct bnxt *);
+int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode);
void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16);
int _hwrm_send_message(struct bnxt *, void *, u32, int);
int hwrm_send_message(struct bnxt *, void *, u32, int);
@@ -1246,6 +1224,7 @@ int hwrm_send_message_silent(struct bnxt *, void *, u32, int);
int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap,
int bmap_size);
int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id);
+int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings);
int bnxt_hwrm_set_coal(struct bnxt *);
unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp);
void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max);
@@ -1259,6 +1238,7 @@ int bnxt_hwrm_set_link_setting(struct bnxt *, bool, bool);
int bnxt_hwrm_fw_set_time(struct bnxt *);
int bnxt_open_nic(struct bnxt *, bool, bool);
int bnxt_close_nic(struct bnxt *, bool, bool);
+int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, int tcs, int tx_xdp);
int bnxt_setup_mq_tc(struct net_device *dev, u8 tc);
int bnxt_get_max_rings(struct bnxt *, int *, int *, bool);
void bnxt_restore_pf_fw_resources(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 784aa77610bc..6903a873f072 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -357,7 +357,7 @@ static void bnxt_get_channels(struct net_device *dev,
int max_rx_rings, max_tx_rings, tcs;
bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, true);
- channel->max_combined = max_t(int, max_rx_rings, max_tx_rings);
+ channel->max_combined = min_t(int, max_rx_rings, max_tx_rings);
if (bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false)) {
max_rx_rings = 0;
@@ -387,9 +387,10 @@ static int bnxt_set_channels(struct net_device *dev,
struct ethtool_channels *channel)
{
struct bnxt *bp = netdev_priv(dev);
- int max_rx_rings, max_tx_rings, tcs;
- u32 rc = 0;
+ int req_tx_rings, req_rx_rings, tcs;
bool sh = false;
+ int tx_xdp = 0;
+ int rc = 0;
if (channel->other_count)
return -EINVAL;
@@ -409,19 +410,22 @@ static int bnxt_set_channels(struct net_device *dev,
if (channel->combined_count)
sh = true;
- bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, sh);
-
tcs = netdev_get_num_tc(dev);
- if (tcs > 1)
- max_tx_rings /= tcs;
-
- if (sh &&
- channel->combined_count > max_t(int, max_rx_rings, max_tx_rings))
- return -ENOMEM;
- if (!sh && (channel->rx_count > max_rx_rings ||
- channel->tx_count > max_tx_rings))
- return -ENOMEM;
+ req_tx_rings = sh ? channel->combined_count : channel->tx_count;
+ req_rx_rings = sh ? channel->combined_count : channel->rx_count;
+ if (bp->tx_nr_rings_xdp) {
+ if (!sh) {
+ netdev_err(dev, "Only combined mode supported when XDP is enabled.\n");
+ return -EINVAL;
+ }
+ tx_xdp = req_rx_rings;
+ }
+ rc = bnxt_reserve_rings(bp, req_tx_rings, req_rx_rings, tcs, tx_xdp);
+ if (rc) {
+ netdev_warn(dev, "Unable to allocate the requested rings\n");
+ return rc;
+ }
if (netif_running(dev)) {
if (BNXT_PF(bp)) {
@@ -439,19 +443,17 @@ static int bnxt_set_channels(struct net_device *dev,
if (sh) {
bp->flags |= BNXT_FLAG_SHARED_RINGS;
- bp->rx_nr_rings = min_t(int, channel->combined_count,
- max_rx_rings);
- bp->tx_nr_rings_per_tc = min_t(int, channel->combined_count,
- max_tx_rings);
+ bp->rx_nr_rings = channel->combined_count;
+ bp->tx_nr_rings_per_tc = channel->combined_count;
} else {
bp->flags &= ~BNXT_FLAG_SHARED_RINGS;
bp->rx_nr_rings = channel->rx_count;
bp->tx_nr_rings_per_tc = channel->tx_count;
}
-
- bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
+ bp->tx_nr_rings_xdp = tx_xdp;
+ bp->tx_nr_rings = bp->tx_nr_rings_per_tc + tx_xdp;
if (tcs > 1)
- bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs;
+ bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs + tx_xdp;
bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
bp->tx_nr_rings + bp->rx_nr_rings;
@@ -524,24 +526,49 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
fltr_found:
fkeys = &fltr->fkeys;
- if (fkeys->basic.ip_proto == IPPROTO_TCP)
- fs->flow_type = TCP_V4_FLOW;
- else if (fkeys->basic.ip_proto == IPPROTO_UDP)
- fs->flow_type = UDP_V4_FLOW;
- else
- goto fltr_err;
+ if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
+ if (fkeys->basic.ip_proto == IPPROTO_TCP)
+ fs->flow_type = TCP_V4_FLOW;
+ else if (fkeys->basic.ip_proto == IPPROTO_UDP)
+ fs->flow_type = UDP_V4_FLOW;
+ else
+ goto fltr_err;
- fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src;
- fs->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0);
+ fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src;
+ fs->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0);
- fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst;
- fs->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0);
+ fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst;
+ fs->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0);
+
+ fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src;
+ fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0);
+
+ fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst;
+ fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0);
+ } else {
+ int i;
- fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src;
- fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0);
+ if (fkeys->basic.ip_proto == IPPROTO_TCP)
+ fs->flow_type = TCP_V6_FLOW;
+ else if (fkeys->basic.ip_proto == IPPROTO_UDP)
+ fs->flow_type = UDP_V6_FLOW;
+ else
+ goto fltr_err;
+
+ *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6src[0] =
+ fkeys->addrs.v6addrs.src;
+ *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6dst[0] =
+ fkeys->addrs.v6addrs.dst;
+ for (i = 0; i < 4; i++) {
+ fs->m_u.tcp_ip6_spec.ip6src[i] = cpu_to_be32(~0);
+ fs->m_u.tcp_ip6_spec.ip6dst[i] = cpu_to_be32(~0);
+ }
+ fs->h_u.tcp_ip6_spec.psrc = fkeys->ports.src;
+ fs->m_u.tcp_ip6_spec.psrc = cpu_to_be16(~0);
- fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst;
- fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0);
+ fs->h_u.tcp_ip6_spec.pdst = fkeys->ports.dst;
+ fs->m_u.tcp_ip6_spec.pdst = cpu_to_be16(~0);
+ }
fs->ring_cookie = fltr->rxq;
rc = 0;
@@ -893,7 +920,7 @@ u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause)
static void bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info,
struct ethtool_link_ksettings *lk_ksettings)
{
- u16 fw_speeds = link_info->auto_link_speeds;
+ u16 fw_speeds = link_info->advertising;
u8 fw_pause = 0;
if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
@@ -1090,8 +1117,9 @@ static int bnxt_set_link_ksettings(struct net_device *dev,
struct bnxt *bp = netdev_priv(dev);
struct bnxt_link_info *link_info = &bp->link_info;
const struct ethtool_link_settings *base = &lk_ksettings->base;
- u32 speed, fw_advertising = 0;
bool set_pause = false;
+ u16 fw_advertising = 0;
+ u32 speed;
int rc = 0;
if (!BNXT_SINGLE_PF(bp))
@@ -1550,17 +1578,37 @@ static int bnxt_flash_package_from_file(struct net_device *dev,
bnxt_hwrm_cmd_hdr_init(bp, &install, HWRM_NVM_INSTALL_UPDATE, -1, -1);
install.install_type = cpu_to_le32(install_type);
- rc = hwrm_send_message(bp, &install, sizeof(install),
- INSTALL_PACKAGE_TIMEOUT);
- if (rc)
- return -EOPNOTSUPP;
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &install, sizeof(install),
+ INSTALL_PACKAGE_TIMEOUT);
+ if (rc) {
+ rc = -EOPNOTSUPP;
+ goto flash_pkg_exit;
+ }
+
+ if (resp->error_code) {
+ u8 error_code = ((struct hwrm_err_output *)resp)->cmd_err;
+
+ if (error_code == NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) {
+ install.flags |= cpu_to_le16(
+ NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG);
+ rc = _hwrm_send_message(bp, &install, sizeof(install),
+ INSTALL_PACKAGE_TIMEOUT);
+ if (rc) {
+ rc = -EOPNOTSUPP;
+ goto flash_pkg_exit;
+ }
+ }
+ }
if (resp->result) {
netdev_err(dev, "PKG install error = %d, problem_item = %d\n",
(s8)resp->result, (int)resp->problem_item);
- return -ENOPKG;
+ rc = -ENOPKG;
}
- return 0;
+flash_pkg_exit:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
}
static int bnxt_flash_device(struct net_device *dev,
@@ -2039,6 +2087,47 @@ static int bnxt_nway_reset(struct net_device *dev)
return rc;
}
+static int bnxt_set_phys_id(struct net_device *dev,
+ enum ethtool_phys_id_state state)
+{
+ struct hwrm_port_led_cfg_input req = {0};
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_pf_info *pf = &bp->pf;
+ struct bnxt_led_cfg *led_cfg;
+ u8 led_state;
+ __le16 duration;
+ int i, rc;
+
+ if (!bp->num_leds || BNXT_VF(bp))
+ return -EOPNOTSUPP;
+
+ if (state == ETHTOOL_ID_ACTIVE) {
+ led_state = PORT_LED_CFG_REQ_LED0_STATE_BLINKALT;
+ duration = cpu_to_le16(500);
+ } else if (state == ETHTOOL_ID_INACTIVE) {
+ led_state = PORT_LED_CFG_REQ_LED1_STATE_DEFAULT;
+ duration = cpu_to_le16(0);
+ } else {
+ return -EINVAL;
+ }
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_CFG, -1, -1);
+ req.port_id = cpu_to_le16(pf->port_id);
+ req.num_leds = bp->num_leds;
+ led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
+ for (i = 0; i < bp->num_leds; i++, led_cfg++) {
+ req.enables |= BNXT_LED_DFLT_ENABLES(i);
+ led_cfg->led_id = bp->leds[i].led_id;
+ led_cfg->led_state = led_state;
+ led_cfg->led_blink_on = duration;
+ led_cfg->led_blink_off = duration;
+ led_cfg->led_group_id = bp->leds[i].led_group_id;
+ }
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ rc = -EIO;
+ return rc;
+}
+
const struct ethtool_ops bnxt_ethtool_ops = {
.get_link_ksettings = bnxt_get_link_ksettings,
.set_link_ksettings = bnxt_set_link_ksettings,
@@ -2070,5 +2159,6 @@ const struct ethtool_ops bnxt_ethtool_ops = {
.set_eee = bnxt_set_eee,
.get_module_info = bnxt_get_module_info,
.get_module_eeprom = bnxt_get_module_eeprom,
- .nway_reset = bnxt_nway_reset
+ .nway_reset = bnxt_nway_reset,
+ .set_phys_id = bnxt_set_phys_id,
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
index 3abc03b60dbc..ed1e555292e9 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
@@ -10,6 +10,29 @@
#ifndef BNXT_ETHTOOL_H
#define BNXT_ETHTOOL_H
+struct bnxt_led_cfg {
+ u8 led_id;
+ u8 led_state;
+ u8 led_color;
+ u8 unused;
+ __le16 led_blink_on;
+ __le16 led_blink_off;
+ u8 led_group_id;
+ u8 rsvd;
+};
+
+#define BNXT_LED_DFLT_ENA \
+ (PORT_LED_CFG_REQ_ENABLES_LED0_ID | \
+ PORT_LED_CFG_REQ_ENABLES_LED0_STATE | \
+ PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_ON | \
+ PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_OFF | \
+ PORT_LED_CFG_REQ_ENABLES_LED0_GROUP_ID)
+
+#define BNXT_LED_DFLT_ENA_SHIFT 6
+
+#define BNXT_LED_DFLT_ENABLES(x) \
+ cpu_to_le32(BNXT_LED_DFLT_ENA << (BNXT_LED_DFLT_ENA_SHIFT * (x)))
+
extern const struct ethtool_ops bnxt_ethtool_ops;
u32 _bnxt_fw_to_ethtool_adv_spds(u16, u8);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index 2ddfa51519a1..6e275c23d68b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -1,7 +1,7 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2014-2016 Broadcom Corporation
- * Copyright (c) 2016 Broadcom Limited
+ * Copyright (c) 2016-2017 Broadcom Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -11,12 +11,12 @@
#ifndef BNXT_HSI_H
#define BNXT_HSI_H
-/* HSI and HWRM Specification 1.6.0 */
+/* HSI and HWRM Specification 1.7.0 */
#define HWRM_VERSION_MAJOR 1
-#define HWRM_VERSION_MINOR 6
+#define HWRM_VERSION_MINOR 7
#define HWRM_VERSION_UPDATE 0
-#define HWRM_VERSION_STR "1.6.0"
+#define HWRM_VERSION_STR "1.7.0"
/*
* Following is the signature for HWRM message field that indicates not
* applicable (All F's). Need to cast it the size of the field if needed.
@@ -549,6 +549,8 @@ struct hwrm_ver_get_output {
__le32 dev_caps_cfg;
#define VER_GET_RESP_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED 0x1UL
#define VER_GET_RESP_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED 0x2UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED 0x4UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED 0x8UL
u8 roce_fw_maj;
u8 roce_fw_min;
u8 roce_fw_bld;
@@ -832,20 +834,32 @@ struct hwrm_func_qcfg_output {
__le32 min_bw;
#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_MASK 0xfffffffUL
#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_SFT 0
- #define FUNC_QCFG_RESP_MIN_BW_RSVD 0x10000000UL
+ #define FUNC_QCFG_RESP_MIN_BW_SCALE 0x10000000UL
+ #define FUNC_QCFG_RESP_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_QCFG_RESP_MIN_BW_SCALE_LAST FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES
#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 max_bw;
#define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_MASK 0xfffffffUL
#define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_SFT 0
- #define FUNC_QCFG_RESP_MAX_BW_RSVD 0x10000000UL
+ #define FUNC_QCFG_RESP_MAX_BW_SCALE 0x10000000UL
+ #define FUNC_QCFG_RESP_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_QCFG_RESP_MAX_BW_SCALE_LAST FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES
#define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID
@@ -921,20 +935,32 @@ struct hwrm_func_cfg_input {
__le32 min_bw;
#define FUNC_CFG_REQ_MIN_BW_BW_VALUE_MASK 0xfffffffUL
#define FUNC_CFG_REQ_MIN_BW_BW_VALUE_SFT 0
- #define FUNC_CFG_REQ_MIN_BW_RSVD 0x10000000UL
+ #define FUNC_CFG_REQ_MIN_BW_SCALE 0x10000000UL
+ #define FUNC_CFG_REQ_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_CFG_REQ_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_CFG_REQ_MIN_BW_SCALE_LAST FUNC_CFG_REQ_MIN_BW_SCALE_BYTES
#define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 max_bw;
#define FUNC_CFG_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL
#define FUNC_CFG_REQ_MAX_BW_BW_VALUE_SFT 0
- #define FUNC_CFG_REQ_MAX_BW_RSVD 0x10000000UL
+ #define FUNC_CFG_REQ_MAX_BW_SCALE 0x10000000UL
+ #define FUNC_CFG_REQ_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_CFG_REQ_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_CFG_REQ_MAX_BW_SCALE_LAST FUNC_CFG_REQ_MAX_BW_SCALE_BYTES
#define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID
@@ -1529,6 +1555,20 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASET 0x8UL
#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE 0x9UL
#define PORT_PHY_QCFG_RESP_PHY_TYPE_SGMIIEXTPHY 0xaUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_L 0xbUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_S 0xcUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_N 0xdUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASESR 0xeUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR4 0xfUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR4 0x10UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR4 0x11UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER4 0x12UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR10 0x13UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASECR4 0x14UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASESR4 0x15UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASELR4 0x16UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASEER4 0x17UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_ACTIVE_CABLE 0x18UL
u8 media_type;
#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL
#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP 0x1UL
@@ -1919,6 +1959,219 @@ struct hwrm_port_phy_i2c_read_output {
u8 valid;
};
+/* hwrm_port_led_cfg */
+/* Input (64 bytes) */
+struct hwrm_port_led_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_ID 0x1UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_STATE 0x2UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_COLOR 0x4UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_ON 0x8UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_OFF 0x10UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_GROUP_ID 0x20UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_ID 0x40UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_STATE 0x80UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_COLOR 0x100UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_ON 0x200UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_OFF 0x400UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_GROUP_ID 0x800UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_ID 0x1000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_STATE 0x2000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_COLOR 0x4000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_ON 0x8000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_OFF 0x10000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_GROUP_ID 0x20000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_ID 0x40000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_STATE 0x80000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_COLOR 0x100000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_ON 0x200000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_OFF 0x400000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_GROUP_ID 0x800000UL
+ __le16 port_id;
+ u8 num_leds;
+ u8 rsvd;
+ u8 led0_id;
+ u8 led0_state;
+ #define PORT_LED_CFG_REQ_LED0_STATE_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED0_STATE_OFF 0x1UL
+ #define PORT_LED_CFG_REQ_LED0_STATE_ON 0x2UL
+ #define PORT_LED_CFG_REQ_LED0_STATE_BLINK 0x3UL
+ #define PORT_LED_CFG_REQ_LED0_STATE_BLINKALT 0x4UL
+ u8 led0_color;
+ #define PORT_LED_CFG_REQ_LED0_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED0_COLOR_AMBER 0x1UL
+ #define PORT_LED_CFG_REQ_LED0_COLOR_GREEN 0x2UL
+ #define PORT_LED_CFG_REQ_LED0_COLOR_GREENAMBER 0x3UL
+ u8 unused_0;
+ __le16 led0_blink_on;
+ __le16 led0_blink_off;
+ u8 led0_group_id;
+ u8 rsvd0;
+ u8 led1_id;
+ u8 led1_state;
+ #define PORT_LED_CFG_REQ_LED1_STATE_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED1_STATE_OFF 0x1UL
+ #define PORT_LED_CFG_REQ_LED1_STATE_ON 0x2UL
+ #define PORT_LED_CFG_REQ_LED1_STATE_BLINK 0x3UL
+ #define PORT_LED_CFG_REQ_LED1_STATE_BLINKALT 0x4UL
+ u8 led1_color;
+ #define PORT_LED_CFG_REQ_LED1_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED1_COLOR_AMBER 0x1UL
+ #define PORT_LED_CFG_REQ_LED1_COLOR_GREEN 0x2UL
+ #define PORT_LED_CFG_REQ_LED1_COLOR_GREENAMBER 0x3UL
+ u8 unused_1;
+ __le16 led1_blink_on;
+ __le16 led1_blink_off;
+ u8 led1_group_id;
+ u8 rsvd1;
+ u8 led2_id;
+ u8 led2_state;
+ #define PORT_LED_CFG_REQ_LED2_STATE_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED2_STATE_OFF 0x1UL
+ #define PORT_LED_CFG_REQ_LED2_STATE_ON 0x2UL
+ #define PORT_LED_CFG_REQ_LED2_STATE_BLINK 0x3UL
+ #define PORT_LED_CFG_REQ_LED2_STATE_BLINKALT 0x4UL
+ u8 led2_color;
+ #define PORT_LED_CFG_REQ_LED2_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED2_COLOR_AMBER 0x1UL
+ #define PORT_LED_CFG_REQ_LED2_COLOR_GREEN 0x2UL
+ #define PORT_LED_CFG_REQ_LED2_COLOR_GREENAMBER 0x3UL
+ u8 unused_2;
+ __le16 led2_blink_on;
+ __le16 led2_blink_off;
+ u8 led2_group_id;
+ u8 rsvd2;
+ u8 led3_id;
+ u8 led3_state;
+ #define PORT_LED_CFG_REQ_LED3_STATE_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED3_STATE_OFF 0x1UL
+ #define PORT_LED_CFG_REQ_LED3_STATE_ON 0x2UL
+ #define PORT_LED_CFG_REQ_LED3_STATE_BLINK 0x3UL
+ #define PORT_LED_CFG_REQ_LED3_STATE_BLINKALT 0x4UL
+ u8 led3_color;
+ #define PORT_LED_CFG_REQ_LED3_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED3_COLOR_AMBER 0x1UL
+ #define PORT_LED_CFG_REQ_LED3_COLOR_GREEN 0x2UL
+ #define PORT_LED_CFG_REQ_LED3_COLOR_GREENAMBER 0x3UL
+ u8 unused_3;
+ __le16 led3_blink_on;
+ __le16 led3_blink_off;
+ u8 led3_group_id;
+ u8 rsvd3;
+};
+
+/* Output (16 bytes) */
+struct hwrm_port_led_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_port_led_qcaps */
+/* Input (24 bytes) */
+struct hwrm_port_led_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ __le16 unused_0[3];
+};
+
+/* Output (48 bytes) */
+struct hwrm_port_led_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 num_leds;
+ u8 unused_0[3];
+ u8 led0_id;
+ u8 led0_type;
+ #define PORT_LED_QCAPS_RESP_LED0_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCAPS_RESP_LED0_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED0_TYPE_INVALID 0xffUL
+ u8 led0_group_id;
+ u8 unused_1;
+ __le16 led0_state_caps;
+ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ENABLED 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_OFF_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ON_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_SUPPORTED 0x8UL
+ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
+ __le16 led0_color_caps;
+ #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_RSVD 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+ u8 led1_id;
+ u8 led1_type;
+ #define PORT_LED_QCAPS_RESP_LED1_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCAPS_RESP_LED1_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED1_TYPE_INVALID 0xffUL
+ u8 led1_group_id;
+ u8 unused_2;
+ __le16 led1_state_caps;
+ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ENABLED 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_OFF_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ON_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_SUPPORTED 0x8UL
+ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
+ __le16 led1_color_caps;
+ #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_RSVD 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+ u8 led2_id;
+ u8 led2_type;
+ #define PORT_LED_QCAPS_RESP_LED2_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCAPS_RESP_LED2_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED2_TYPE_INVALID 0xffUL
+ u8 led2_group_id;
+ u8 unused_3;
+ __le16 led2_state_caps;
+ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ENABLED 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_OFF_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ON_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_SUPPORTED 0x8UL
+ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
+ __le16 led2_color_caps;
+ #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_RSVD 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+ u8 led3_id;
+ u8 led3_type;
+ #define PORT_LED_QCAPS_RESP_LED3_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCAPS_RESP_LED3_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED3_TYPE_INVALID 0xffUL
+ u8 led3_group_id;
+ u8 unused_4;
+ __le16 led3_state_caps;
+ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ENABLED 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_OFF_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ON_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_SUPPORTED 0x8UL
+ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
+ __le16 led3_color_caps;
+ #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_RSVD 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+ u8 unused_5;
+ u8 unused_6;
+ u8 unused_7;
+ u8 valid;
+};
+
/* hwrm_queue_qportcfg */
/* Input (24 bytes) */
struct hwrm_queue_qportcfg_input {
@@ -2216,20 +2469,32 @@ struct hwrm_queue_cos2bw_qcfg_output {
__le32 queue_id0_min_bw;
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 queue_id0_max_bw;
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
@@ -2244,20 +2509,32 @@ struct hwrm_queue_cos2bw_qcfg_output {
__le32 queue_id1_min_bw;
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BYTES
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 queue_id1_max_bw;
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BYTES
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID
@@ -2272,20 +2549,32 @@ struct hwrm_queue_cos2bw_qcfg_output {
__le32 queue_id2_min_bw;
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BYTES
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 queue_id2_max_bw;
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BYTES
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID
@@ -2300,20 +2589,32 @@ struct hwrm_queue_cos2bw_qcfg_output {
__le32 queue_id3_min_bw;
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BYTES
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 queue_id3_max_bw;
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BYTES
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID
@@ -2328,20 +2629,32 @@ struct hwrm_queue_cos2bw_qcfg_output {
__le32 queue_id4_min_bw;
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BYTES
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 queue_id4_max_bw;
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BYTES
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID
@@ -2356,20 +2669,32 @@ struct hwrm_queue_cos2bw_qcfg_output {
__le32 queue_id5_min_bw;
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BYTES
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 queue_id5_max_bw;
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BYTES
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID
@@ -2384,20 +2709,32 @@ struct hwrm_queue_cos2bw_qcfg_output {
__le32 queue_id6_min_bw;
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BYTES
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 queue_id6_max_bw;
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BYTES
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID
@@ -2412,20 +2749,32 @@ struct hwrm_queue_cos2bw_qcfg_output {
__le32 queue_id7_min_bw;
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BYTES
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 queue_id7_max_bw;
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BYTES
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID
@@ -2467,20 +2816,32 @@ struct hwrm_queue_cos2bw_cfg_input {
__le32 queue_id0_min_bw;
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 queue_id0_max_bw;
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
@@ -2495,20 +2856,32 @@ struct hwrm_queue_cos2bw_cfg_input {
__le32 queue_id1_min_bw;
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BYTES
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 queue_id1_max_bw;
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BYTES
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID
@@ -2523,20 +2896,32 @@ struct hwrm_queue_cos2bw_cfg_input {
__le32 queue_id2_min_bw;
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BYTES
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 queue_id2_max_bw;
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BYTES
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID
@@ -2551,20 +2936,32 @@ struct hwrm_queue_cos2bw_cfg_input {
__le32 queue_id3_min_bw;
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BYTES
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 queue_id3_max_bw;
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BYTES
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID
@@ -2579,20 +2976,32 @@ struct hwrm_queue_cos2bw_cfg_input {
__le32 queue_id4_min_bw;
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BYTES
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 queue_id4_max_bw;
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BYTES
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID
@@ -2607,20 +3016,32 @@ struct hwrm_queue_cos2bw_cfg_input {
__le32 queue_id5_min_bw;
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BYTES
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 queue_id5_max_bw;
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BYTES
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID
@@ -2635,20 +3056,32 @@ struct hwrm_queue_cos2bw_cfg_input {
__le32 queue_id6_min_bw;
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BYTES
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 queue_id6_max_bw;
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BYTES
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID
@@ -2663,20 +3096,32 @@ struct hwrm_queue_cos2bw_cfg_input {
__le32 queue_id7_min_bw;
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BYTES
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 queue_id7_max_bw;
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BYTES
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID
@@ -2797,6 +3242,41 @@ struct hwrm_vnic_cfg_output {
u8 valid;
};
+/* hwrm_vnic_qcaps */
+/* Input (24 bytes) */
+struct hwrm_vnic_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ __le32 unused_0;
+};
+
+/* Output (24 bytes) */
+struct hwrm_vnic_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 mru;
+ u8 unused_0;
+ u8 unused_1;
+ __le32 flags;
+ #define VNIC_QCAPS_RESP_FLAGS_UNUSED 0x1UL
+ #define VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP 0x2UL
+ #define VNIC_QCAPS_RESP_FLAGS_BD_STALL_CAP 0x4UL
+ #define VNIC_QCAPS_RESP_FLAGS_ROCE_DUAL_VNIC_CAP 0x8UL
+ #define VNIC_QCAPS_RESP_FLAGS_ROCE_ONLY_VNIC_CAP 0x10UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP 0x20UL
+ __le32 unused_2;
+ u8 unused_3;
+ u8 unused_4;
+ u8 unused_5;
+ u8 valid;
+};
+
/* hwrm_vnic_tpa_cfg */
/* Input (40 bytes) */
struct hwrm_vnic_tpa_cfg_input {
@@ -2992,9 +3472,10 @@ struct hwrm_ring_alloc_input {
#define RING_ALLOC_REQ_ENABLES_RESERVED4 0x10UL
#define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL
u8 ring_type;
- #define RING_ALLOC_REQ_RING_TYPE_CMPL 0x0UL
+ #define RING_ALLOC_REQ_RING_TYPE_L2_CMPL 0x0UL
#define RING_ALLOC_REQ_RING_TYPE_TX 0x1UL
#define RING_ALLOC_REQ_RING_TYPE_RX 0x2UL
+ #define RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL 0x3UL
u8 unused_0;
__le16 unused_1;
__le64 page_tbl_addr;
@@ -3028,10 +3509,16 @@ struct hwrm_ring_alloc_input {
__le32 max_bw;
#define RING_ALLOC_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL
#define RING_ALLOC_REQ_MAX_BW_BW_VALUE_SFT 0
- #define RING_ALLOC_REQ_MAX_BW_RSVD 0x10000000UL
+ #define RING_ALLOC_REQ_MAX_BW_SCALE 0x10000000UL
+ #define RING_ALLOC_REQ_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define RING_ALLOC_REQ_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define RING_ALLOC_REQ_MAX_BW_SCALE_LAST RING_ALLOC_REQ_MAX_BW_SCALE_BYTES
#define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_LAST RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID
@@ -3066,9 +3553,10 @@ struct hwrm_ring_free_input {
__le16 target_id;
__le64 resp_addr;
u8 ring_type;
- #define RING_FREE_REQ_RING_TYPE_CMPL 0x0UL
+ #define RING_FREE_REQ_RING_TYPE_L2_CMPL 0x0UL
#define RING_FREE_REQ_RING_TYPE_TX 0x1UL
#define RING_FREE_REQ_RING_TYPE_RX 0x2UL
+ #define RING_FREE_REQ_RING_TYPE_ROCE_CMPL 0x3UL
u8 unused_0;
__le16 ring_id;
__le32 unused_1;
@@ -3166,9 +3654,10 @@ struct hwrm_ring_reset_input {
__le16 target_id;
__le64 resp_addr;
u8 ring_type;
- #define RING_RESET_REQ_RING_TYPE_CMPL 0x0UL
+ #define RING_RESET_REQ_RING_TYPE_L2_CMPL 0x0UL
#define RING_RESET_REQ_RING_TYPE_TX 0x1UL
#define RING_RESET_REQ_RING_TYPE_RX 0x2UL
+ #define RING_RESET_REQ_RING_TYPE_ROCE_CMPL 0x3UL
u8 unused_0;
__le16 ring_id;
__le32 unused_1;
@@ -3597,6 +4086,7 @@ struct hwrm_cfa_ntuple_filter_alloc_input {
__le32 flags;
#define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP 0x2UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_METER 0x4UL
__le32 enables;
#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x2UL
@@ -3697,7 +4187,7 @@ struct hwrm_cfa_ntuple_filter_free_output {
};
/* hwrm_cfa_ntuple_filter_cfg */
-/* Input (40 bytes) */
+/* Input (48 bytes) */
struct hwrm_cfa_ntuple_filter_cfg_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -3707,10 +4197,14 @@ struct hwrm_cfa_ntuple_filter_cfg_input {
__le32 enables;
#define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_DST_ID 0x1UL
#define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL
+ #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_METER_INSTANCE_ID 0x4UL
__le32 unused_0;
__le64 ntuple_filter_id;
__le32 new_dst_id;
__le32 new_mirror_vnic_id;
+ __le16 new_meter_instance_id;
+ #define CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_INVALID 0xffffUL
+ __le16 unused_1[3];
};
/* Output (16 bytes) */
@@ -4058,9 +4552,7 @@ struct hwrm_fw_set_structured_data_input {
__le64 src_data_addr;
__le16 data_len;
u8 hdr_cnt;
- u8 unused_0;
- __le16 port_id;
- __le16 unused_1;
+ u8 unused_0[5];
};
/* Output (16 bytes) */
@@ -4077,7 +4569,7 @@ struct hwrm_fw_set_structured_data_output {
};
/* hwrm_fw_get_structured_data */
-/* Input (40 bytes) */
+/* Input (32 bytes) */
struct hwrm_fw_get_structured_data_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -4095,10 +4587,9 @@ struct hwrm_fw_get_structured_data_input {
#define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_ADMIN 0x200UL
#define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_PEER 0x201UL
#define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_OPERATIONAL 0x202UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_HOST_OPERATIONAL 0x300UL
u8 count;
u8 unused_0;
- __le16 port_id;
- __le16 unused_1[3];
};
/* Output (16 bytes) */
@@ -4582,7 +5073,11 @@ struct hwrm_nvm_install_update_input {
__le32 install_type;
#define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_NORMAL 0x0UL
#define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_ALL 0xffffffffUL
- __le32 unused_0;
+ __le16 flags;
+ #define NVM_INSTALL_UPDATE_REQ_FLAGS_ERASE_UNUSED_SPACE 0x1UL
+ #define NVM_INSTALL_UPDATE_REQ_FLAGS_REMOVE_UNUSED_PKG 0x2UL
+ #define NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG 0x4UL
+ __le16 unused_0;
};
/* Output (24 bytes) */
@@ -4608,6 +5103,15 @@ struct hwrm_nvm_install_update_output {
u8 valid;
};
+/* Command specific Error Codes (8 bytes) */
+struct hwrm_nvm_install_update_cmd_err {
+ u8 code;
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR 0x1UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE 0x2UL
+ u8 unused_0[7];
+};
+
/* Hardware Resource Manager Specification */
/* Input (16 bytes) */
struct input {
@@ -4735,11 +5239,26 @@ struct cmd_nums {
#define HWRM_WOL_FILTER_FREE (0xf1UL)
#define HWRM_WOL_FILTER_QCFG (0xf2UL)
#define HWRM_WOL_REASON_QCFG (0xf3UL)
+ #define HWRM_CFA_METER_PROFILE_ALLOC (0xf5UL)
+ #define HWRM_CFA_METER_PROFILE_FREE (0xf6UL)
+ #define HWRM_CFA_METER_PROFILE_CFG (0xf7UL)
+ #define HWRM_CFA_METER_INSTANCE_ALLOC (0xf8UL)
+ #define HWRM_CFA_METER_INSTANCE_FREE (0xf9UL)
+ #define HWRM_CFA_VF_PAIR_ALLOC (0x100UL)
+ #define HWRM_CFA_VF_PAIR_FREE (0x101UL)
+ #define HWRM_CFA_VF_PAIR_INFO (0x102UL)
+ #define HWRM_CFA_FLOW_ALLOC (0x103UL)
+ #define HWRM_CFA_FLOW_FREE (0x104UL)
+ #define HWRM_CFA_FLOW_FLUSH (0x105UL)
+ #define HWRM_CFA_FLOW_STATS (0x106UL)
+ #define HWRM_CFA_FLOW_INFO (0x107UL)
#define HWRM_DBG_READ_DIRECT (0xff10UL)
#define HWRM_DBG_READ_INDIRECT (0xff11UL)
#define HWRM_DBG_WRITE_DIRECT (0xff12UL)
#define HWRM_DBG_WRITE_INDIRECT (0xff13UL)
#define HWRM_DBG_DUMP (0xff14UL)
+ #define HWRM_NVM_VALIDATE_OPTION (0xffefUL)
+ #define HWRM_NVM_FLUSH (0xfff0UL)
#define HWRM_NVM_GET_VARIABLE (0xfff1UL)
#define HWRM_NVM_SET_VARIABLE (0xfff2UL)
#define HWRM_NVM_INSTALL_UPDATE (0xfff3UL)
@@ -4939,12 +5458,13 @@ struct ctx_hw_stats {
struct hwrm_struct_hdr {
__le16 struct_id;
#define STRUCT_HDR_STRUCT_ID_LLDP_CFG 0x41bUL
- #define STRUCT_HDR_STRUCT_ID_DCBX_ETS_CFG 0x41dUL
- #define STRUCT_HDR_STRUCT_ID_DCBX_PFC_CFG 0x41fUL
- #define STRUCT_HDR_STRUCT_ID_DCBX_APP_CFG 0x421UL
- #define STRUCT_HDR_STRUCT_ID_DCBX_STATE_CFG 0x422UL
- #define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC_CFG 0x424UL
- #define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE_CFG 0x426UL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_ETS 0x41dUL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_PFC 0x41fUL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_APP 0x421UL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_FEATURE_STATE 0x422UL
+ #define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC 0x424UL
+ #define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE 0x426UL
+ #define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION 0xaUL
__le16 len;
u8 version;
u8 count;
@@ -4954,14 +5474,14 @@ struct hwrm_struct_hdr {
__le16 unused_0[3];
};
-/* DCBX Application configuration structure (8 bytes) */
-struct hwrm_struct_data_dcbx_app_cfg {
- __le16 protocol_id;
+/* DCBX Application configuration structure (1057) (8 bytes) */
+struct hwrm_struct_data_dcbx_app {
+ __be16 protocol_id;
u8 protocol_selector;
- #define STRUCT_DATA_DCBX_APP_CFG_PROTOCOL_SELECTOR_ETHER_TYPE 0x1UL
- #define STRUCT_DATA_DCBX_APP_CFG_PROTOCOL_SELECTOR_TCP_PORT 0x2UL
- #define STRUCT_DATA_DCBX_APP_CFG_PROTOCOL_SELECTOR_UDP_PORT 0x3UL
- #define STRUCT_DATA_DCBX_APP_CFG_PROTOCOL_SELECTOR_TCP_UDP_PORT 0x4UL
+ #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_ETHER_TYPE 0x1UL
+ #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_PORT 0x2UL
+ #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_UDP_PORT 0x3UL
+ #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_UDP_PORT 0x4UL
u8 priority;
u8 valid;
u8 unused_0[3];
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index c69602508666..0b8cd7443843 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -15,6 +15,7 @@
#include <linux/etherdevice.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
+#include "bnxt_ulp.h"
#include "bnxt_sriov.h"
#include "bnxt_ethtool.h"
@@ -416,6 +417,7 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
u16 vf_ring_grps;
struct hwrm_func_cfg_input req = {0};
struct bnxt_pf_info *pf = &bp->pf;
+ int total_vf_tx_rings = 0;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
@@ -429,6 +431,8 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
vf_rx_rings = (pf->max_rx_rings - bp->rx_nr_rings) / num_vfs;
vf_ring_grps = (bp->pf.max_hw_ring_grps - bp->rx_nr_rings) / num_vfs;
vf_tx_rings = (pf->max_tx_rings - bp->tx_nr_rings) / num_vfs;
+ vf_vnics = (pf->max_vnics - bp->nr_vnics) / num_vfs;
+ vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU |
FUNC_CFG_REQ_ENABLES_MRU |
@@ -451,7 +455,6 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
req.num_rx_rings = cpu_to_le16(vf_rx_rings);
req.num_hw_ring_grps = cpu_to_le16(vf_ring_grps);
req.num_l2_ctxs = cpu_to_le16(4);
- vf_vnics = 1;
req.num_vnics = cpu_to_le16(vf_vnics);
/* FIXME spec currently uses 1 bit for stats ctx */
@@ -459,6 +462,8 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
mutex_lock(&bp->hwrm_cmd_lock);
for (i = 0; i < num_vfs; i++) {
+ int vf_tx_rsvd = vf_tx_rings;
+
req.fid = cpu_to_le16(pf->first_vf_id + i);
rc = _hwrm_send_message(bp, &req, sizeof(req),
HWRM_CMD_TIMEOUT);
@@ -466,10 +471,15 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
break;
pf->active_vfs = i + 1;
pf->vf[i].fw_fid = le16_to_cpu(req.fid);
+ rc = __bnxt_hwrm_get_tx_rings(bp, pf->vf[i].fw_fid,
+ &vf_tx_rsvd);
+ if (rc)
+ break;
+ total_vf_tx_rings += vf_tx_rsvd;
}
mutex_unlock(&bp->hwrm_cmd_lock);
if (!rc) {
- pf->max_tx_rings -= vf_tx_rings * num_vfs;
+ pf->max_tx_rings -= total_vf_tx_rings;
pf->max_rx_rings -= vf_rx_rings * num_vfs;
pf->max_hw_ring_grps -= vf_ring_grps * num_vfs;
pf->max_cp_rings -= vf_cp_rings * num_vfs;
@@ -506,6 +516,8 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
min_rx_rings)
rx_ok = 1;
}
+ if (bp->pf.max_vnics - bp->nr_vnics < min_rx_rings)
+ rx_ok = 0;
if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings)
tx_ok = 1;
@@ -544,6 +556,8 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
if (rc)
goto err_out2;
+ bnxt_ulp_sriov_cfg(bp, *num_vfs);
+
rc = pci_enable_sriov(bp->pdev, *num_vfs);
if (rc)
goto err_out2;
@@ -585,6 +599,8 @@ void bnxt_sriov_disable(struct bnxt *bp)
rtnl_lock();
bnxt_restore_pf_fw_resources(bp);
rtnl_unlock();
+
+ bnxt_ulp_sriov_cfg(bp, 0);
}
int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
new file mode 100644
index 000000000000..899c30fb5188
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -0,0 +1,240 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2016-2017 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
+#include <linux/filter.h>
+#include "bnxt_hsi.h"
+#include "bnxt.h"
+#include "bnxt_xdp.h"
+
+static void bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
+ dma_addr_t mapping, u32 len, u16 rx_prod)
+{
+ struct bnxt_sw_tx_bd *tx_buf;
+ struct tx_bd_ext *txbd1;
+ struct tx_bd *txbd;
+ u32 flags;
+ u16 prod;
+
+ prod = txr->tx_prod;
+ tx_buf = &txr->tx_buf_ring[prod];
+ tx_buf->rx_prod = rx_prod;
+
+ txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+ flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
+ (2 << TX_BD_FLAGS_BD_CNT_SHIFT) | TX_BD_FLAGS_COAL_NOW |
+ TX_BD_FLAGS_PACKET_END | bnxt_lhint_arr[len >> 9];
+ txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
+ txbd->tx_bd_opaque = prod;
+ txbd->tx_bd_haddr = cpu_to_le64(mapping);
+
+ prod = NEXT_TX(prod);
+ txbd1 = (struct tx_bd_ext *)
+ &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+
+ txbd1->tx_bd_hsize_lflags = cpu_to_le32(0);
+ txbd1->tx_bd_mss = cpu_to_le32(0);
+ txbd1->tx_bd_cfa_action = cpu_to_le32(0);
+ txbd1->tx_bd_cfa_meta = cpu_to_le32(0);
+
+ prod = NEXT_TX(prod);
+ txr->tx_prod = prod;
+}
+
+void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
+{
+ struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
+ struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
+ struct bnxt_sw_tx_bd *tx_buf;
+ u16 tx_cons = txr->tx_cons;
+ u16 last_tx_cons = tx_cons;
+ u16 rx_prod;
+ int i;
+
+ for (i = 0; i < nr_pkts; i++) {
+ last_tx_cons = tx_cons;
+ tx_cons = NEXT_TX(tx_cons);
+ tx_cons = NEXT_TX(tx_cons);
+ }
+ txr->tx_cons = tx_cons;
+ if (bnxt_tx_avail(bp, txr) == bp->tx_ring_size) {
+ rx_prod = rxr->rx_prod;
+ } else {
+ tx_buf = &txr->tx_buf_ring[last_tx_cons];
+ rx_prod = tx_buf->rx_prod;
+ }
+ writel(DB_KEY_RX | rx_prod, rxr->rx_doorbell);
+}
+
+/* returns the following:
+ * true - packet consumed by XDP and new buffer is allocated.
+ * false - packet should be passed to the stack.
+ */
+bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
+ struct page *page, u8 **data_ptr, unsigned int *len, u8 *event)
+{
+ struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog);
+ struct bnxt_tx_ring_info *txr;
+ struct bnxt_sw_rx_bd *rx_buf;
+ struct pci_dev *pdev;
+ struct xdp_buff xdp;
+ dma_addr_t mapping;
+ void *orig_data;
+ u32 tx_avail;
+ u32 offset;
+ u32 act;
+
+ if (!xdp_prog)
+ return false;
+
+ pdev = bp->pdev;
+ txr = rxr->bnapi->tx_ring;
+ rx_buf = &rxr->rx_buf_ring[cons];
+ offset = bp->rx_offset;
+
+ xdp.data_hard_start = *data_ptr - offset;
+ xdp.data = *data_ptr;
+ xdp.data_end = *data_ptr + *len;
+ orig_data = xdp.data;
+ mapping = rx_buf->mapping - bp->rx_dma_offset;
+
+ dma_sync_single_for_cpu(&pdev->dev, mapping + offset, *len, bp->rx_dir);
+
+ rcu_read_lock();
+ act = bpf_prog_run_xdp(xdp_prog, &xdp);
+ rcu_read_unlock();
+
+ tx_avail = bnxt_tx_avail(bp, txr);
+ /* If the tx ring is not full, we must not update the rx producer yet
+ * because we may still be transmitting on some BDs.
+ */
+ if (tx_avail != bp->tx_ring_size)
+ *event &= ~BNXT_RX_EVENT;
+
+ if (orig_data != xdp.data) {
+ offset = xdp.data - xdp.data_hard_start;
+ *data_ptr = xdp.data_hard_start + offset;
+ *len = xdp.data_end - xdp.data;
+ }
+ switch (act) {
+ case XDP_PASS:
+ return false;
+
+ case XDP_TX:
+ if (tx_avail < 2) {
+ trace_xdp_exception(bp->dev, xdp_prog, act);
+ bnxt_reuse_rx_data(rxr, cons, page);
+ return true;
+ }
+
+ *event = BNXT_TX_EVENT;
+ dma_sync_single_for_device(&pdev->dev, mapping + offset, *len,
+ bp->rx_dir);
+ bnxt_xmit_xdp(bp, txr, mapping + offset, *len,
+ NEXT_RX(rxr->rx_prod));
+ bnxt_reuse_rx_data(rxr, cons, page);
+ return true;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ /* Fall thru */
+ case XDP_ABORTED:
+ trace_xdp_exception(bp->dev, xdp_prog, act);
+ /* Fall thru */
+ case XDP_DROP:
+ bnxt_reuse_rx_data(rxr, cons, page);
+ break;
+ }
+ return true;
+}
+
+/* Under rtnl_lock */
+static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
+{
+ struct net_device *dev = bp->dev;
+ int tx_xdp = 0, rc, tc;
+ struct bpf_prog *old;
+
+ if (prog && bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
+ netdev_warn(dev, "MTU %d larger than largest XDP supported MTU %d.\n",
+ bp->dev->mtu, BNXT_MAX_PAGE_MODE_MTU);
+ return -EOPNOTSUPP;
+ }
+ if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) {
+ netdev_warn(dev, "ethtool rx/tx channels must be combined to support XDP.\n");
+ return -EOPNOTSUPP;
+ }
+ if (prog)
+ tx_xdp = bp->rx_nr_rings;
+
+ tc = netdev_get_num_tc(dev);
+ if (!tc)
+ tc = 1;
+ rc = bnxt_reserve_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
+ tc, tx_xdp);
+ if (rc) {
+ netdev_warn(dev, "Unable to reserve enough TX rings to support XDP.\n");
+ return rc;
+ }
+ if (netif_running(dev))
+ bnxt_close_nic(bp, true, false);
+
+ old = xchg(&bp->xdp_prog, prog);
+ if (old)
+ bpf_prog_put(old);
+
+ if (prog) {
+ bnxt_set_rx_skb_mode(bp, true);
+ } else {
+ int rx, tx;
+
+ bnxt_set_rx_skb_mode(bp, false);
+ bnxt_get_max_rings(bp, &rx, &tx, true);
+ if (rx > 1) {
+ bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS;
+ bp->dev->hw_features |= NETIF_F_LRO;
+ }
+ }
+ bp->tx_nr_rings_xdp = tx_xdp;
+ bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc + tx_xdp;
+ bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings);
+ bp->num_stat_ctxs = bp->cp_nr_rings;
+ bnxt_set_tpa_flags(bp);
+ bnxt_set_ring_params(bp);
+
+ if (netif_running(dev))
+ return bnxt_open_nic(bp, true, false);
+
+ return 0;
+}
+
+int bnxt_xdp(struct net_device *dev, struct netdev_xdp *xdp)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int rc;
+
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ rc = bnxt_xdp_set(bp, xdp->prog);
+ break;
+ case XDP_QUERY_PROG:
+ xdp->prog_attached = !!bp->xdp_prog;
+ rc = 0;
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+ return rc;
+}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
new file mode 100644
index 000000000000..b529f2c5355b
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
@@ -0,0 +1,19 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2016-2017 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNXT_XDP_H
+#define BNXT_XDP_H
+
+void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts);
+bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
+ struct page *page, u8 **data_ptr, unsigned int *len,
+ u8 *event);
+int bnxt_xdp(struct net_device *dev, struct netdev_xdp *xdp);
+
+#endif
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index b1d2ac818710..cec94bbb2ea5 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -3665,7 +3665,7 @@ static int cnic_cm_destroy(struct cnic_sock *csk)
static inline u16 cnic_get_vlan(struct net_device *dev,
struct net_device **vlan_dev)
{
- if (dev->priv_flags & IFF_802_1Q_VLAN) {
+ if (is_vlan_dev(dev)) {
*vlan_dev = vlan_dev_real_dev(dev);
return vlan_dev_vlan_id(dev);
}
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index 435a2e4739d1..55c8e25b43d9 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -2537,7 +2537,7 @@ static int sbmac_poll(struct napi_struct *napi, int budget)
sbdma_tx_process(sc, &(sc->sbm_txdma), 1);
if (work_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
#ifdef CONFIG_SBMAC_COALESCE
__raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) |
@@ -2617,7 +2617,7 @@ out_out:
return err;
}
-static int __exit sbmac_remove(struct platform_device *pldev)
+static int sbmac_remove(struct platform_device *pldev)
{
struct net_device *dev = platform_get_drvdata(pldev);
struct sbmac_softc *sc = netdev_priv(dev);
@@ -2634,7 +2634,7 @@ static int __exit sbmac_remove(struct platform_device *pldev)
static struct platform_driver sbmac_driver = {
.probe = sbmac_probe,
- .remove = __exit_p(sbmac_remove),
+ .remove = sbmac_remove,
.driver = {
.name = sbmac_string,
},
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index ae42de4fdddf..30d1eb9ebec9 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -20,6 +20,7 @@
#include <linux/moduleparam.h>
#include <linux/stringify.h>
#include <linux/kernel.h>
+#include <linux/sched/signal.h>
#include <linux/types.h>
#include <linux/compiler.h>
#include <linux/slab.h>
@@ -14145,8 +14146,8 @@ static const struct ethtool_ops tg3_ethtool_ops = {
.set_link_ksettings = tg3_set_link_ksettings,
};
-static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *stats)
+static void tg3_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
{
struct tg3 *tp = netdev_priv(dev);
@@ -14154,13 +14155,11 @@ static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
if (!tp->hw_stats) {
*stats = tp->net_stats_prev;
spin_unlock_bh(&tp->lock);
- return stats;
+ return;
}
tg3_get_nstats(tp, stats);
spin_unlock_bh(&tp->lock);
-
- return stats;
}
static void tg3_set_rx_mode(struct net_device *dev)