diff options
Diffstat (limited to 'drivers/net/ethernet/broadcom/bnxt/bnxt.c')
-rw-r--r-- | drivers/net/ethernet/broadcom/bnxt/bnxt.c | 959 |
1 files changed, 685 insertions, 274 deletions
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 4fcc6a84a087..235733e91c79 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1,6 +1,7 @@ /* Broadcom NetXtreme-C/E network driver. * * Copyright (c) 2014-2016 Broadcom Corporation + * Copyright (c) 2016-2017 Broadcom Limited * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -33,15 +34,13 @@ #include <linux/if.h> #include <linux/if_vlan.h> #include <linux/rtc.h> +#include <linux/bpf.h> #include <net/ip.h> #include <net/tcp.h> #include <net/udp.h> #include <net/checksum.h> #include <net/ip6_checksum.h> #include <net/udp_tunnel.h> -#ifdef CONFIG_NET_RX_BUSY_POLL -#include <net/busy_poll.h> -#endif #include <linux/workqueue.h> #include <linux/prefetch.h> #include <linux/cache.h> @@ -56,6 +55,7 @@ #include "bnxt_sriov.h" #include "bnxt_ethtool.h" #include "bnxt_dcb.h" +#include "bnxt_xdp.h" #define BNXT_TX_TIMEOUT (5 * HZ) @@ -99,6 +99,8 @@ enum board_idx { BCM57407_NPAR, BCM57414_NPAR, BCM57416_NPAR, + BCM57452, + BCM57454, NETXTREME_E_VF, NETXTREME_C_VF, }; @@ -133,6 +135,8 @@ static const struct { { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" }, { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" }, { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" }, + { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" }, + { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, { "Broadcom NetXtreme-E Ethernet Virtual Function" }, { "Broadcom NetXtreme-C Ethernet Virtual Function" }, }; @@ -168,6 +172,8 @@ static const struct pci_device_id bnxt_pci_tbl[] = { { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR }, { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR }, { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR }, + { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 }, + { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 }, #ifdef CONFIG_BNXT_SRIOV { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF }, { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF }, @@ -213,16 +219,7 @@ static bool bnxt_vf_pciid(enum board_idx idx) #define BNXT_CP_DB_IRQ_DIS(db) \ writel(DB_CP_IRQ_DIS_FLAGS, db) -static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr) -{ - /* Tell compiler to fetch tx indices from memory. */ - barrier(); - - return bp->tx_ring_size - - ((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask); -} - -static const u16 bnxt_lhint_arr[] = { +const u16 bnxt_lhint_arr[] = { TX_BD_FLAGS_LHINT_512_AND_SMALLER, TX_BD_FLAGS_LHINT_512_TO_1023, TX_BD_FLAGS_LHINT_1024_TO_2047, @@ -265,8 +262,8 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } - txr = &bp->tx_ring[i]; txq = netdev_get_tx_queue(dev, i); + txr = &bp->tx_ring[bp->tx_ring_map[i]]; prod = txr->tx_prod; free_size = bnxt_tx_avail(bp, txr); @@ -512,8 +509,7 @@ tx_dma_error: static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts) { struct bnxt_tx_ring_info *txr = bnapi->tx_ring; - int index = txr - &bp->tx_ring[0]; - struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, index); + struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index); u16 cons = txr->tx_cons; struct pci_dev *pdev = bp->pdev; int i; @@ -576,6 +572,25 @@ next_tx_int: } } +static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping, + gfp_t gfp) +{ + struct device *dev = &bp->pdev->dev; + struct page *page; + + page = alloc_page(gfp); + if (!page) + return NULL; + + *mapping = dma_map_page(dev, page, 0, PAGE_SIZE, bp->rx_dir); + if (dma_mapping_error(dev, *mapping)) { + __free_page(page); + return NULL; + } + *mapping += bp->rx_dma_offset; + return page; +} + static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping, gfp_t gfp) { @@ -586,8 +601,8 @@ static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping, if (!data) return NULL; - *mapping = dma_map_single(&pdev->dev, data + BNXT_RX_DMA_OFFSET, - bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); + *mapping = dma_map_single(&pdev->dev, data + bp->rx_dma_offset, + bp->rx_buf_use_size, bp->rx_dir); if (dma_mapping_error(&pdev->dev, *mapping)) { kfree(data); @@ -596,29 +611,37 @@ static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping, return data; } -static inline int bnxt_alloc_rx_data(struct bnxt *bp, - struct bnxt_rx_ring_info *rxr, - u16 prod, gfp_t gfp) +int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, + u16 prod, gfp_t gfp) { struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod]; - u8 *data; dma_addr_t mapping; - data = __bnxt_alloc_rx_data(bp, &mapping, gfp); - if (!data) - return -ENOMEM; + if (BNXT_RX_PAGE_MODE(bp)) { + struct page *page = __bnxt_alloc_rx_page(bp, &mapping, gfp); - rx_buf->data = data; - dma_unmap_addr_set(rx_buf, mapping, mapping); + if (!page) + return -ENOMEM; - rxbd->rx_bd_haddr = cpu_to_le64(mapping); + rx_buf->data = page; + rx_buf->data_ptr = page_address(page) + bp->rx_offset; + } else { + u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp); + if (!data) + return -ENOMEM; + + rx_buf->data = data; + rx_buf->data_ptr = data + bp->rx_offset; + } + rx_buf->mapping = mapping; + + rxbd->rx_bd_haddr = cpu_to_le64(mapping); return 0; } -static void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, - u8 *data) +void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data) { u16 prod = rxr->rx_prod; struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf; @@ -628,9 +651,9 @@ static void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, cons_rx_buf = &rxr->rx_buf_ring[cons]; prod_rx_buf->data = data; + prod_rx_buf->data_ptr = cons_rx_buf->data_ptr; - dma_unmap_addr_set(prod_rx_buf, mapping, - dma_unmap_addr(cons_rx_buf, mapping)); + prod_rx_buf->mapping = cons_rx_buf->mapping; prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)]; @@ -756,13 +779,60 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons, rxr->rx_sw_agg_prod = sw_prod; } +static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp, + struct bnxt_rx_ring_info *rxr, + u16 cons, void *data, u8 *data_ptr, + dma_addr_t dma_addr, + unsigned int offset_and_len) +{ + unsigned int payload = offset_and_len >> 16; + unsigned int len = offset_and_len & 0xffff; + struct skb_frag_struct *frag; + struct page *page = data; + u16 prod = rxr->rx_prod; + struct sk_buff *skb; + int off, err; + + err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); + if (unlikely(err)) { + bnxt_reuse_rx_data(rxr, cons, data); + return NULL; + } + dma_addr -= bp->rx_dma_offset; + dma_unmap_page(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir); + + if (unlikely(!payload)) + payload = eth_get_headlen(data_ptr, len); + + skb = napi_alloc_skb(&rxr->bnapi->napi, payload); + if (!skb) { + __free_page(page); + return NULL; + } + + off = (void *)data_ptr - page_address(page); + skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE); + memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN, + payload + NET_IP_ALIGN); + + frag = &skb_shinfo(skb)->frags[0]; + skb_frag_size_sub(frag, payload); + frag->page_offset += payload; + skb->data_len -= payload; + skb->tail += payload; + + return skb; +} + static struct sk_buff *bnxt_rx_skb(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, - u16 prod, u8 *data, dma_addr_t dma_addr, - unsigned int len) + void *data, u8 *data_ptr, + dma_addr_t dma_addr, + unsigned int offset_and_len) { - int err; + u16 prod = rxr->rx_prod; struct sk_buff *skb; + int err; err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); if (unlikely(err)) { @@ -772,14 +842,14 @@ static struct sk_buff *bnxt_rx_skb(struct bnxt *bp, skb = build_skb(data, 0); dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size, - PCI_DMA_FROMDEVICE); + bp->rx_dir); if (!skb) { kfree(data); return NULL; } - skb_reserve(skb, BNXT_RX_OFFSET); - skb_put(skb, len); + skb_reserve(skb, bp->rx_offset); + skb_put(skb, offset_and_len & 0xffff); return skb; } @@ -815,7 +885,7 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi, * a sw_prod index that equals the cons index, so we * need to clear the cons entry now. */ - mapping = dma_unmap_addr(cons_rx_buf, mapping); + mapping = cons_rx_buf->mapping; page = cons_rx_buf->page; cons_rx_buf->page = NULL; @@ -878,14 +948,14 @@ static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data, if (!skb) return NULL; - dma_sync_single_for_cpu(&pdev->dev, mapping, - bp->rx_copy_thresh, PCI_DMA_FROMDEVICE); + dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh, + bp->rx_dir); - memcpy(skb->data - BNXT_RX_OFFSET, data, len + BNXT_RX_OFFSET); + memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN, + len + NET_IP_ALIGN); - dma_sync_single_for_device(&pdev->dev, mapping, - bp->rx_copy_thresh, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh, + bp->rx_dir); skb_put(skb, len); return skb; @@ -954,17 +1024,19 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, } prod_rx_buf->data = tpa_info->data; + prod_rx_buf->data_ptr = tpa_info->data_ptr; mapping = tpa_info->mapping; - dma_unmap_addr_set(prod_rx_buf, mapping, mapping); + prod_rx_buf->mapping = mapping; prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; prod_bd->rx_bd_haddr = cpu_to_le64(mapping); tpa_info->data = cons_rx_buf->data; + tpa_info->data_ptr = cons_rx_buf->data_ptr; cons_rx_buf->data = NULL; - tpa_info->mapping = dma_unmap_addr(cons_rx_buf, mapping); + tpa_info->mapping = cons_rx_buf->mapping; tpa_info->len = le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >> @@ -1130,7 +1202,6 @@ static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info, dev_kfree_skb_any(skb); return NULL; } - tcp_gro_complete(skb); if (nw_off) { /* tunnel */ struct udphdr *uh = NULL; @@ -1180,6 +1251,8 @@ static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp, RX_TPA_END_CMP_PAYLOAD_OFFSET) >> RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT; skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb); + if (likely(skb)) + tcp_gro_complete(skb); #endif return skb; } @@ -1189,17 +1262,18 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, u32 *raw_cons, struct rx_tpa_end_cmp *tpa_end, struct rx_tpa_end_cmp_ext *tpa_end1, - bool *agg_event) + u8 *event) { struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; u8 agg_id = TPA_END_AGG_ID(tpa_end); - u8 *data, agg_bufs; + u8 *data_ptr, agg_bufs; u16 cp_cons = RING_CMP(*raw_cons); unsigned int len; struct bnxt_tpa_info *tpa_info; dma_addr_t mapping; struct sk_buff *skb; + void *data; if (unlikely(bnapi->in_reset)) { int rc = bnxt_discard_rx(bp, bnapi, raw_cons, tpa_end); @@ -1211,7 +1285,8 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, tpa_info = &rxr->rx_tpa[agg_id]; data = tpa_info->data; - prefetch(data); + data_ptr = tpa_info->data_ptr; + prefetch(data_ptr); len = tpa_info->len; mapping = tpa_info->mapping; @@ -1222,7 +1297,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons)) return ERR_PTR(-EBUSY); - *agg_event = true; + *event |= BNXT_AGG_EVENT; cp_cons = NEXT_CMP(cp_cons); } @@ -1234,7 +1309,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, } if (len <= bp->rx_copy_thresh) { - skb = bnxt_copy_skb(bnapi, data, len, mapping); + skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping); if (!skb) { bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs); return NULL; @@ -1250,18 +1325,19 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, } tpa_info->data = new_data; + tpa_info->data_ptr = new_data + bp->rx_offset; tpa_info->mapping = new_mapping; skb = build_skb(data, 0); dma_unmap_single(&bp->pdev->dev, mapping, bp->rx_buf_use_size, - PCI_DMA_FROMDEVICE); + bp->rx_dir); if (!skb) { kfree(data); bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs); return NULL; } - skb_reserve(skb, BNXT_RX_OFFSET); + skb_reserve(skb, bp->rx_offset); skb_put(skb, len); } @@ -1307,7 +1383,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, * -EIO - packet aborted due to hw error indicated in BD */ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, - bool *agg_event) + u8 *event) { struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; @@ -1318,10 +1394,12 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons); struct bnxt_sw_rx_bd *rx_buf; unsigned int len; - u8 *data, agg_bufs, cmp_type; + u8 *data_ptr, agg_bufs, cmp_type; dma_addr_t dma_addr; struct sk_buff *skb; + void *data; int rc = 0; + u32 misc; rxcmp = (struct rx_cmp *) &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; @@ -1342,13 +1420,13 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp, (struct rx_tpa_start_cmp_ext *)rxcmp1); + *event |= BNXT_RX_EVENT; goto next_rx_no_prod; } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { skb = bnxt_tpa_end(bp, bnapi, &tmp_raw_cons, (struct rx_tpa_end_cmp *)rxcmp, - (struct rx_tpa_end_cmp_ext *)rxcmp1, - agg_event); + (struct rx_tpa_end_cmp_ext *)rxcmp1, event); if (unlikely(IS_ERR(skb))) return -EBUSY; @@ -1356,37 +1434,36 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, rc = -ENOMEM; if (likely(skb)) { skb_record_rx_queue(skb, bnapi->index); - skb_mark_napi_id(skb, &bnapi->napi); - if (bnxt_busy_polling(bnapi)) - netif_receive_skb(skb); - else - napi_gro_receive(&bnapi->napi, skb); + napi_gro_receive(&bnapi->napi, skb); rc = 1; } + *event |= BNXT_RX_EVENT; goto next_rx_no_prod; } cons = rxcmp->rx_cmp_opaque; rx_buf = &rxr->rx_buf_ring[cons]; data = rx_buf->data; + data_ptr = rx_buf->data_ptr; if (unlikely(cons != rxr->rx_next_cons)) { int rc1 = bnxt_discard_rx(bp, bnapi, raw_cons, rxcmp); bnxt_sched_reset(bp, rxr); return rc1; } - prefetch(data); + prefetch(data_ptr); - agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & RX_CMP_AGG_BUFS) >> - RX_CMP_AGG_BUFS_SHIFT; + misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1); + agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT; if (agg_bufs) { if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons)) return -EBUSY; cp_cons = NEXT_CMP(cp_cons); - *agg_event = true; + *event |= BNXT_AGG_EVENT; } + *event |= BNXT_RX_EVENT; rx_buf->data = NULL; if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) { @@ -1399,17 +1476,29 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, } len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT; - dma_addr = dma_unmap_addr(rx_buf, mapping); + dma_addr = rx_buf->mapping; + + if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) { + rc = 1; + goto next_rx; + } if (len <= bp->rx_copy_thresh) { - skb = bnxt_copy_skb(bnapi, data, len, dma_addr); + skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr); bnxt_reuse_rx_data(rxr, cons, data); if (!skb) { rc = -ENOMEM; goto next_rx; } } else { - skb = bnxt_rx_skb(bp, rxr, cons, prod, data, dma_addr, len); + u32 payload; + + if (rx_buf->data_ptr == data_ptr) + payload = misc & RX_CMP_PAYLOAD_OFFSET; + else + payload = 0; + skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr, + payload | len); if (!skb) { rc = -ENOMEM; goto next_rx; @@ -1460,11 +1549,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, } skb_record_rx_queue(skb, bnapi->index); - skb_mark_napi_id(skb, &bnapi->napi); - if (bnxt_busy_polling(bnapi)) - netif_receive_skb(skb); - else - napi_gro_receive(&bnapi->napi, skb); + napi_gro_receive(&bnapi->napi, skb); rc = 1; next_rx: @@ -1637,8 +1722,7 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) u32 cons; int tx_pkts = 0; int rx_pkts = 0; - bool rx_event = false; - bool agg_event = false; + u8 event = 0; struct tx_cmp *txcmp; while (1) { @@ -1660,12 +1744,11 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) if (unlikely(tx_pkts > bp->tx_wake_thresh)) rx_pkts = budget; } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) { - rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &agg_event); + rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event); if (likely(rc >= 0)) rx_pkts += rc; else if (rc == -EBUSY) /* partial completion */ break; - rx_event = true; } else if (unlikely((TX_CMP_TYPE(txcmp) == CMPL_BASE_TYPE_HWRM_DONE) || (TX_CMP_TYPE(txcmp) == @@ -1680,6 +1763,18 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) break; } + if (event & BNXT_TX_EVENT) { + struct bnxt_tx_ring_info *txr = bnapi->tx_ring; + void __iomem *db = txr->tx_doorbell; + u16 prod = txr->tx_prod; + + /* Sync BD data before updating doorbell */ + wmb(); + + writel(DB_KEY_TX | prod, db); + writel(DB_KEY_TX | prod, db); + } + cpr->cp_raw_cons = raw_cons; /* ACK completion ring before freeing tx ring and producing new * buffers in rx/agg rings to prevent overflowing the completion @@ -1688,14 +1783,14 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); if (tx_pkts) - bnxt_tx_int(bp, bnapi, tx_pkts); + bnapi->tx_int(bp, bnapi, tx_pkts); - if (rx_event) { + if (event & BNXT_RX_EVENT) { struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell); writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell); - if (agg_event) { + if (event & BNXT_AGG_EVENT) { writel(DB_KEY_RX | rxr->rx_agg_prod, rxr->rx_agg_doorbell); writel(DB_KEY_RX | rxr->rx_agg_prod, @@ -1716,7 +1811,7 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget) u32 cp_cons, tmp_raw_cons; u32 raw_cons = cpr->cp_raw_cons; u32 rx_pkts = 0; - bool agg_event = false; + u8 event = 0; while (1) { int rc; @@ -1740,7 +1835,7 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget) rxcmp1->rx_cmp_cfa_code_errors_v2 |= cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR); - rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &agg_event); + rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event); if (likely(rc == -EIO)) rx_pkts++; else if (rc == -EBUSY) /* partial completion */ @@ -1763,13 +1858,13 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget) writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell); writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell); - if (agg_event) { + if (event & BNXT_AGG_EVENT) { writel(DB_KEY_RX | rxr->rx_agg_prod, rxr->rx_agg_doorbell); writel(DB_KEY_RX | rxr->rx_agg_prod, rxr->rx_agg_doorbell); } if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) { - napi_complete(napi); + napi_complete_done(napi, rx_pkts); BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons); } return rx_pkts; @@ -1782,9 +1877,6 @@ static int bnxt_poll(struct napi_struct *napi, int budget) struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; int work_done = 0; - if (!bnxt_lock_napi(bnapi)) - return budget; - while (1) { work_done += bnxt_poll_work(bp, bnapi, budget - work_done); @@ -1792,42 +1884,16 @@ static int bnxt_poll(struct napi_struct *napi, int budget) break; if (!bnxt_has_work(bp, cpr)) { - napi_complete(napi); - BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons); + if (napi_complete_done(napi, work_done)) + BNXT_CP_DB_REARM(cpr->cp_doorbell, + cpr->cp_raw_cons); break; } } mmiowb(); - bnxt_unlock_napi(bnapi); return work_done; } -#ifdef CONFIG_NET_RX_BUSY_POLL -static int bnxt_busy_poll(struct napi_struct *napi) -{ - struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); - struct bnxt *bp = bnapi->bp; - struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; - int rx_work, budget = 4; - - if (atomic_read(&bp->intr_sem) != 0) - return LL_FLUSH_FAILED; - - if (!bp->link_info.link_up) - return LL_FLUSH_FAILED; - - if (!bnxt_lock_poll(bnapi)) - return LL_FLUSH_BUSY; - - rx_work = bnxt_poll_work(bp, bnapi, budget); - - BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons); - - bnxt_unlock_poll(bnapi); - return rx_work; -} -#endif - static void bnxt_free_tx_skbs(struct bnxt *bp) { int i, max_idx; @@ -1905,11 +1971,9 @@ static void bnxt_free_rx_skbs(struct bnxt *bp) if (!data) continue; - dma_unmap_single( - &pdev->dev, - dma_unmap_addr(tpa_info, mapping), - bp->rx_buf_use_size, - PCI_DMA_FROMDEVICE); + dma_unmap_single(&pdev->dev, tpa_info->mapping, + bp->rx_buf_use_size, + bp->rx_dir); tpa_info->data = NULL; @@ -1919,19 +1983,20 @@ static void bnxt_free_rx_skbs(struct bnxt *bp) for (j = 0; j < max_idx; j++) { struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j]; - u8 *data = rx_buf->data; + void *data = rx_buf->data; if (!data) continue; - dma_unmap_single(&pdev->dev, - dma_unmap_addr(rx_buf, mapping), - bp->rx_buf_use_size, - PCI_DMA_FROMDEVICE); + dma_unmap_single(&pdev->dev, rx_buf->mapping, + bp->rx_buf_use_size, bp->rx_dir); rx_buf->data = NULL; - kfree(data); + if (BNXT_RX_PAGE_MODE(bp)) + __free_page(data); + else + kfree(data); } for (j = 0; j < max_agg_idx; j++) { @@ -1942,8 +2007,7 @@ static void bnxt_free_rx_skbs(struct bnxt *bp) if (!page) continue; - dma_unmap_page(&pdev->dev, - dma_unmap_addr(rx_agg_buf, mapping), + dma_unmap_page(&pdev->dev, rx_agg_buf->mapping, BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE); rx_agg_buf->page = NULL; @@ -2034,6 +2098,9 @@ static void bnxt_free_rx_rings(struct bnxt *bp) struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; struct bnxt_ring_struct *ring; + if (rxr->xdp_prog) + bpf_prog_put(rxr->xdp_prog); + kfree(rxr->rx_tpa); rxr->rx_tpa = NULL; @@ -2172,6 +2239,8 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp) memset(txr->tx_push, 0, sizeof(struct tx_push_bd)); } ring->queue_id = bp->q_info[j].queue_id; + if (i < bp->tx_nr_rings_xdp) + continue; if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1)) j++; } @@ -2319,6 +2388,15 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) ring = &rxr->rx_ring_struct; bnxt_init_rxbd_pages(ring, type); + if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) { + rxr->xdp_prog = bpf_prog_add(bp->xdp_prog, 1); + if (IS_ERR(rxr->xdp_prog)) { + int rc = PTR_ERR(rxr->xdp_prog); + + rxr->xdp_prog = NULL; + return rc; + } + } prod = rxr->rx_prod; for (i = 0; i < bp->rx_ring_size; i++) { if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) { @@ -2365,6 +2443,7 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) return -ENOMEM; rxr->rx_tpa[i].data = data; + rxr->rx_tpa[i].data_ptr = data + bp->rx_offset; rxr->rx_tpa[i].mapping = mapping; } } else { @@ -2380,6 +2459,14 @@ static int bnxt_init_rx_rings(struct bnxt *bp) { int i, rc = 0; + if (BNXT_RX_PAGE_MODE(bp)) { + bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM; + bp->rx_dma_offset = XDP_PACKET_HEADROOM; + } else { + bp->rx_offset = BNXT_RX_OFFSET; + bp->rx_dma_offset = BNXT_RX_DMA_OFFSET; + } + for (i = 0; i < bp->rx_nr_rings; i++) { rc = bnxt_init_one_rx_ring(bp, i); if (rc) @@ -2503,9 +2590,11 @@ static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg) return pages; } -static void bnxt_set_tpa_flags(struct bnxt *bp) +void bnxt_set_tpa_flags(struct bnxt *bp) { bp->flags &= ~BNXT_FLAG_TPA; + if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) + return; if (bp->dev->features & NETIF_F_LRO) bp->flags |= BNXT_FLAG_LRO; if (bp->dev->features & NETIF_F_GRO) @@ -2535,7 +2624,7 @@ void bnxt_set_ring_params(struct bnxt *bp) agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE); bp->flags &= ~BNXT_FLAG_JUMBO; - if (rx_space > PAGE_SIZE) { + if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) { u32 jumbo_factor; bp->flags |= BNXT_FLAG_JUMBO; @@ -2587,6 +2676,27 @@ void bnxt_set_ring_params(struct bnxt *bp) bp->cp_ring_mask = bp->cp_bit - 1; } +int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode) +{ + if (page_mode) { + if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) + return -EOPNOTSUPP; + bp->dev->max_mtu = BNXT_MAX_PAGE_MODE_MTU; + bp->flags &= ~BNXT_FLAG_AGG_RINGS; + bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE; + bp->dev->hw_features &= ~NETIF_F_LRO; + bp->dev->features &= ~NETIF_F_LRO; + bp->rx_dir = DMA_BIDIRECTIONAL; + bp->rx_skb_func = bnxt_rx_page_skb; + } else { + bp->dev->max_mtu = BNXT_MAX_MTU; + bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE; + bp->rx_dir = DMA_FROM_DEVICE; + bp->rx_skb_func = bnxt_rx_skb; + } + return 0; +} + static void bnxt_free_vnic_attributes(struct bnxt *bp) { int i; @@ -2669,6 +2779,10 @@ static int bnxt_alloc_vnic_attributes(struct bnxt *bp) goto out; } + if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) && + !(vnic->flags & BNXT_VNIC_RSS_FLAG)) + continue; + /* Allocate rss table and hash key */ vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &vnic->rss_table_dma_addr, @@ -2892,6 +3006,8 @@ static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init) bnxt_free_stats(bp); bnxt_free_ring_grps(bp); bnxt_free_vnics(bp); + kfree(bp->tx_ring_map); + bp->tx_ring_map = NULL; kfree(bp->tx_ring); bp->tx_ring = NULL; kfree(bp->rx_ring); @@ -2944,6 +3060,12 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init) if (!bp->tx_ring) return -ENOMEM; + bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16), + GFP_KERNEL); + + if (!bp->tx_ring_map) + return -ENOMEM; + if (bp->flags & BNXT_FLAG_SHARED_RINGS) j = 0; else @@ -2952,6 +3074,15 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init) for (i = 0; i < bp->tx_nr_rings; i++, j++) { bp->tx_ring[i].bnapi = bp->bnapi[j]; bp->bnapi[j]->tx_ring = &bp->tx_ring[i]; + bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i; + if (i >= bp->tx_nr_rings_xdp) { + bp->tx_ring[i].txq_index = i - + bp->tx_nr_rings_xdp; + bp->bnapi[j]->tx_int = bnxt_tx_int; + } else { + bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP; + bp->bnapi[j]->tx_int = bnxt_tx_int_xdp; + } } rc = bnxt_alloc_stats(bp); @@ -2993,6 +3124,47 @@ alloc_mem_err: return rc; } +static void bnxt_disable_int(struct bnxt *bp) +{ + int i; + + if (!bp->bnapi) + return; + + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; + + if (ring->fw_ring_id != INVALID_HW_RING_ID) + BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); + } +} + +static void bnxt_disable_int_sync(struct bnxt *bp) +{ + int i; + + atomic_inc(&bp->intr_sem); + + bnxt_disable_int(bp); + for (i = 0; i < bp->cp_nr_rings; i++) + synchronize_irq(bp->irq_tbl[i].vector); +} + +static void bnxt_enable_int(struct bnxt *bp) +{ + int i; + + atomic_set(&bp->intr_sem, 0); + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + + BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons); + } +} + void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type, u16 cmpl_ring, u16 target_id) { @@ -3292,6 +3464,9 @@ static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp, CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID) +#define BNXT_NTP_TUNNEL_FLTR_FLAG \ + CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE + static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, struct bnxt_ntuple_filter *fltr) { @@ -3312,10 +3487,31 @@ static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4; req.ip_protocol = keys->basic.ip_proto; - req.src_ipaddr[0] = keys->addrs.v4addrs.src; - req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff); - req.dst_ipaddr[0] = keys->addrs.v4addrs.dst; - req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff); + if (keys->basic.n_proto == htons(ETH_P_IPV6)) { + int i; + + req.ethertype = htons(ETH_P_IPV6); + req.ip_addr_type = + CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6; + *(struct in6_addr *)&req.src_ipaddr[0] = + keys->addrs.v6addrs.src; + *(struct in6_addr *)&req.dst_ipaddr[0] = + keys->addrs.v6addrs.dst; + for (i = 0; i < 4; i++) { + req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff); + req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff); + } + } else { + req.src_ipaddr[0] = keys->addrs.v4addrs.src; + req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff); + req.dst_ipaddr[0] = keys->addrs.v4addrs.dst; + req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff); + } + if (keys->control.flags & FLOW_DIS_ENCAPSULATION) { + req.enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG); + req.tunnel_type = + CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL; + } req.src_port = keys->ports.src; req.src_port_mask = cpu_to_be16(0xffff); @@ -3562,6 +3758,12 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | VNIC_CFG_REQ_ENABLES_MRU); + } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) { + req.rss_rule = + cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]); + req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | + VNIC_CFG_REQ_ENABLES_MRU); + req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE); } else { req.rss_rule = cpu_to_le16(0xffff); } @@ -3665,6 +3867,27 @@ static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id, return rc; } +static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp) +{ + struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_vnic_qcaps_input req = {0}; + int rc; + + if (bp->hwrm_spec_code < 0x10600) + return 0; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1); + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!rc) { + if (resp->flags & + cpu_to_le32(VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP)) + bp->flags |= BNXT_FLAG_NEW_RSS_CAP; + } + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp) { u16 i; @@ -3768,7 +3991,7 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp, req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1); break; case HWRM_RING_ALLOC_CMPL: - req.ring_type = RING_ALLOC_REQ_RING_TYPE_CMPL; + req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL; req.length = cpu_to_le32(bp->cp_ring_mask + 1); if (bp->flags & BNXT_FLAG_USING_MSIX) req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; @@ -3787,7 +4010,7 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp, if (rc || err) { switch (ring_type) { - case RING_FREE_REQ_RING_TYPE_CMPL: + case RING_FREE_REQ_RING_TYPE_L2_CMPL: netdev_err(bp->dev, "hwrm_ring_alloc cp failed. rc:%x err:%x\n", rc, err); return -1; @@ -3811,6 +4034,30 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp, return rc; } +static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx) +{ + int rc; + + if (BNXT_PF(bp)) { + struct hwrm_func_cfg_input req = {0}; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); + req.fid = cpu_to_le16(0xffff); + req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR); + req.async_event_cr = cpu_to_le16(idx); + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + } else { + struct hwrm_func_vf_cfg_input req = {0}; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1); + req.enables = + cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR); + req.async_event_cr = cpu_to_le16(idx); + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + } + return rc; +} + static int bnxt_hwrm_ring_alloc(struct bnxt *bp) { int i, rc = 0; @@ -3827,6 +4074,12 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp) goto err_out; BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id; + + if (!i) { + rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id); + if (rc) + netdev_warn(bp->dev, "Failed to set async event completion ring.\n"); + } } for (i = 0; i < bp->tx_nr_rings; i++) { @@ -3901,7 +4154,7 @@ static int hwrm_ring_free_send_msg(struct bnxt *bp, if (rc || error_code) { switch (ring_type) { - case RING_FREE_REQ_RING_TYPE_CMPL: + case RING_FREE_REQ_RING_TYPE_L2_CMPL: netdev_err(bp->dev, "hwrm_ring_free cp failed. rc:%d\n", rc); return rc; @@ -3977,6 +4230,12 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) } } + /* The completion rings are about to be freed. After that the + * IRQ doorbell will not work anymore. So we need to disable + * IRQ here. + */ + bnxt_disable_int_sync(bp); + for (i = 0; i < bp->cp_nr_rings; i++) { struct bnxt_napi *bnapi = bp->bnapi[i]; struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; @@ -3984,7 +4243,7 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) if (ring->fw_ring_id != INVALID_HW_RING_ID) { hwrm_ring_free_send_msg(bp, ring, - RING_FREE_REQ_RING_TYPE_CMPL, + RING_FREE_REQ_RING_TYPE_L2_CMPL, INVALID_HW_RING_ID); ring->fw_ring_id = INVALID_HW_RING_ID; bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; @@ -3992,6 +4251,50 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) } } +/* Caller must hold bp->hwrm_cmd_lock */ +int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings) +{ + struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_func_qcfg_input req = {0}; + int rc; + + if (bp->hwrm_spec_code < 0x10601) + return 0; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); + req.fid = cpu_to_le16(fid); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!rc) + *tx_rings = le16_to_cpu(resp->alloc_tx_rings); + + return rc; +} + +static int bnxt_hwrm_reserve_tx_rings(struct bnxt *bp, int *tx_rings) +{ + struct hwrm_func_cfg_input req = {0}; + int rc; + + if (bp->hwrm_spec_code < 0x10601) + return 0; + + if (BNXT_VF(bp)) + return 0; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); + req.fid = cpu_to_le16(0xffff); + req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS); + req.num_tx_rings = cpu_to_le16(*tx_rings); + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) + return rc; + + mutex_lock(&bp->hwrm_cmd_lock); + rc = __bnxt_hwrm_get_tx_rings(bp, 0xffff, tx_rings); + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + static void bnxt_hwrm_set_coal_params(struct bnxt *bp, u32 max_bufs, u32 buf_tmrs, u16 flags, struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req) @@ -4249,7 +4552,7 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp) /* overwrite netdev dev_adr with admin VF MAC */ memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN); } else { - random_ether_addr(bp->dev->dev_addr); + eth_hw_addr_random(bp->dev); rc = bnxt_approve_mac(bp, bp->dev->dev_addr); } return rc; @@ -4463,8 +4766,12 @@ static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path, static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) { + struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; int rc; + if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) + goto skip_rss_ctx; + /* allocate context for vnic */ rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0); if (rc) { @@ -4484,6 +4791,7 @@ static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) bp->rsscos_nr_ctxs++; } +skip_rss_ctx: /* configure default vnic, ring grp */ rc = bnxt_hwrm_vnic_cfg(bp, vnic_id); if (rc) { @@ -4518,13 +4826,17 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp) int i, rc = 0; for (i = 0; i < bp->rx_nr_rings; i++) { + struct bnxt_vnic_info *vnic; u16 vnic_id = i + 1; u16 ring_id = i; if (vnic_id >= bp->nr_vnics) break; - bp->vnic_info[vnic_id].flags |= BNXT_VNIC_RFS_FLAG; + vnic = &bp->vnic_info[vnic_id]; + vnic->flags |= BNXT_VNIC_RFS_FLAG; + if (bp->flags & BNXT_FLAG_NEW_RSS_CAP) + vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG; rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1); if (rc) { netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", @@ -4698,40 +5010,13 @@ static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init) return bnxt_init_chip(bp, irq_re_init); } -static void bnxt_disable_int(struct bnxt *bp) -{ - int i; - - if (!bp->bnapi) - return; - - for (i = 0; i < bp->cp_nr_rings; i++) { - struct bnxt_napi *bnapi = bp->bnapi[i]; - struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; - - BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); - } -} - -static void bnxt_enable_int(struct bnxt *bp) -{ - int i; - - atomic_set(&bp->intr_sem, 0); - for (i = 0; i < bp->cp_nr_rings; i++) { - struct bnxt_napi *bnapi = bp->bnapi[i]; - struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; - - BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons); - } -} - static int bnxt_set_real_num_queues(struct bnxt *bp) { int rc; struct net_device *dev = bp->dev; - rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings); + rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings - + bp->tx_nr_rings_xdp); if (rc) return rc; @@ -4779,19 +5064,12 @@ static void bnxt_setup_msix(struct bnxt *bp) tcs = netdev_get_num_tc(dev); if (tcs > 1) { - bp->tx_nr_rings_per_tc = bp->tx_nr_rings / tcs; - if (bp->tx_nr_rings_per_tc == 0) { - netdev_reset_tc(dev); - bp->tx_nr_rings_per_tc = bp->tx_nr_rings; - } else { - int i, off, count; + int i, off, count; - bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs; - for (i = 0; i < tcs; i++) { - count = bp->tx_nr_rings_per_tc; - off = i * count; - netdev_set_tc_queue(dev, i, count, off); - } + for (i = 0; i < tcs; i++) { + count = bp->tx_nr_rings_per_tc; + off = i * count; + netdev_set_tc_queue(dev, i, count, off); } } @@ -4836,6 +5114,26 @@ static int bnxt_setup_int_mode(struct bnxt *bp) return rc; } +#ifdef CONFIG_RFS_ACCEL +static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp) +{ +#if defined(CONFIG_BNXT_SRIOV) + if (BNXT_VF(bp)) + return bp->vf.max_rsscos_ctxs; +#endif + return bp->pf.max_rsscos_ctxs; +} + +static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp) +{ +#if defined(CONFIG_BNXT_SRIOV) + if (BNXT_VF(bp)) + return bp->vf.max_vnics; +#endif + return bp->pf.max_vnics; +} +#endif + unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp) { #if defined(CONFIG_BNXT_SRIOV) @@ -5094,10 +5392,8 @@ static void bnxt_disable_napi(struct bnxt *bp) if (!bp->bnapi) return; - for (i = 0; i < bp->cp_nr_rings; i++) { + for (i = 0; i < bp->cp_nr_rings; i++) napi_disable(&bp->bnapi[i]->napi); - bnxt_disable_poll(bp->bnapi[i]); - } } static void bnxt_enable_napi(struct bnxt *bp) @@ -5106,7 +5402,6 @@ static void bnxt_enable_napi(struct bnxt *bp) for (i = 0; i < bp->cp_nr_rings; i++) { bp->bnapi[i]->in_reset = false; - bnxt_enable_poll(bp->bnapi[i]); napi_enable(&bp->bnapi[i]->napi); } } @@ -5150,7 +5445,7 @@ static void bnxt_report_link(struct bnxt *bp) if (bp->link_info.link_up) { const char *duplex; const char *flow_ctrl; - u16 speed; + u16 speed, fec; netif_carrier_on(bp->dev); if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL) @@ -5172,6 +5467,12 @@ static void bnxt_report_link(struct bnxt *bp) netdev_info(bp->dev, "EEE is %s\n", bp->eee.eee_active ? "active" : "not active"); + fec = bp->link_info.fec_cfg; + if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED)) + netdev_info(bp->dev, "FEC autoneg %s encodings: %s\n", + (fec & BNXT_FEC_AUTONEG) ? "on" : "off", + (fec & BNXT_FEC_ENC_BASE_R) ? "BaseR" : + (fec & BNXT_FEC_ENC_RS) ? "RS" : "None"); } else { netif_carrier_off(bp->dev); netdev_err(bp->dev, "NIC Link is Down\n"); @@ -5296,6 +5597,11 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state) } } } + + link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED; + if (bp->hwrm_spec_code >= 0x10504) + link_info->fec_cfg = le16_to_cpu(resp->fec_cfg); + /* TODO: need to add more logic to report VF link */ if (chng_link_state) { if (link_info->phy_link_status == BNXT_LINK_LINK) @@ -5384,7 +5690,7 @@ static void bnxt_hwrm_set_link_common(struct bnxt *bp, { u8 autoneg = bp->link_info.autoneg; u16 fw_link_speed = bp->link_info.req_link_speed; - u32 advertising = bp->link_info.advertising; + u16 advertising = bp->link_info.advertising; if (autoneg & BNXT_AUTONEG_SPEED) { req->auto_mode |= @@ -5489,6 +5795,45 @@ static int bnxt_hwrm_shutdown_link(struct bnxt *bp) return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); } +static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp) +{ + struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_port_led_qcaps_input req = {0}; + struct bnxt_pf_info *pf = &bp->pf; + int rc; + + if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601) + return 0; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1); + req.port_id = cpu_to_le16(pf->port_id); + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) { + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; + } + if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) { + int i; + + bp->num_leds = resp->num_leds; + memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) * + bp->num_leds); + for (i = 0; i < bp->num_leds; i++) { + struct bnxt_led_info *led = &bp->leds[i]; + __le16 caps = led->led_state_caps; + + if (!led->led_group_id || + !BNXT_LED_ALT_BLINK_CAP(caps)) { + bp->num_leds = 0; + break; + } + } + } + mutex_unlock(&bp->hwrm_cmd_lock); + return 0; +} + static bool bnxt_eee_config_ok(struct bnxt *bp) { struct ethtool_eee *eee = &bp->eee; @@ -5527,6 +5872,9 @@ static int bnxt_update_phy_setting(struct bnxt *bp) rc); return rc; } + if (!BNXT_SINGLE_PF(bp)) + return 0; + if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) != link_info->req_flow_ctrl) @@ -5678,19 +6026,6 @@ static int bnxt_open(struct net_device *dev) return __bnxt_open_nic(bp, true, true); } -static void bnxt_disable_int_sync(struct bnxt *bp) -{ - int i; - - atomic_inc(&bp->intr_sem); - if (!netif_running(bp->dev)) - return; - - bnxt_disable_int(bp); - for (i = 0; i < bp->cp_nr_rings; i++) - synchronize_irq(bp->irq_tbl[i].vector); -} - int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) { int rc = 0; @@ -5712,13 +6047,12 @@ int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) while (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state)) msleep(20); - /* Flush rings before disabling interrupts */ + /* Flush rings and and disable interrupts */ bnxt_shutdown_nic(bp, irq_re_init); /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */ bnxt_disable_napi(bp); - bnxt_disable_int_sync(bp); del_timer_sync(&bp->timer); bnxt_free_skbs(bp); @@ -5765,16 +6099,14 @@ static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) return -EOPNOTSUPP; } -static struct rtnl_link_stats64 * +static void bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { u32 i; struct bnxt *bp = netdev_priv(dev); - memset(stats, 0, sizeof(struct rtnl_link_stats64)); - if (!bp->bnapi) - return stats; + return; /* TODO check if we need to synchronize with bnxt_close path */ for (i = 0; i < bp->cp_nr_rings; i++) { @@ -5821,8 +6153,6 @@ bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns); stats->tx_errors = le64_to_cpu(tx->tx_err); } - - return stats; } static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask) @@ -5975,20 +6305,36 @@ skip_uc: return rc; } +/* If the chip and firmware supports RFS */ +static bool bnxt_rfs_supported(struct bnxt *bp) +{ + if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp)) + return true; + if (bp->flags & BNXT_FLAG_NEW_RSS_CAP) + return true; + return false; +} + +/* If runtime conditions support RFS */ static bool bnxt_rfs_capable(struct bnxt *bp) { #ifdef CONFIG_RFS_ACCEL - struct bnxt_pf_info *pf = &bp->pf; - int vnics; + int vnics, max_vnics, max_rss_ctxs; - if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_MSIX_CAP)) + if (!(bp->flags & BNXT_FLAG_MSIX_CAP)) return false; vnics = 1 + bp->rx_nr_rings; - if (vnics > pf->max_rsscos_ctxs || vnics > pf->max_vnics) { + max_vnics = bnxt_get_max_func_vnics(bp); + max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp); + + /* RSS contexts not a limiting factor */ + if (bp->flags & BNXT_FLAG_NEW_RSS_CAP) + max_rss_ctxs = max_vnics; + if (vnics > max_vnics || vnics > max_rss_ctxs) { netdev_warn(bp->dev, "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n", - min(pf->max_rsscos_ctxs - 1, pf->max_vnics - 1)); + min(max_rss_ctxs - 1, max_vnics - 1)); return false; } @@ -6044,6 +6390,9 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features) if (features & NETIF_F_LRO) flags |= BNXT_FLAG_LRO; + if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) + flags &= ~BNXT_FLAG_TPA; + if (features & NETIF_F_HW_VLAN_CTAG_RX) flags |= BNXT_FLAG_STRIP_VLAN; @@ -6296,6 +6645,62 @@ static void bnxt_sp_task(struct work_struct *work) clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); } +/* Under rtnl_lock */ +int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, int tcs, int tx_xdp) +{ + int max_rx, max_tx, tx_sets = 1; + int tx_rings_needed; + bool sh = true; + int rc; + + if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) + sh = false; + + if (tcs) + tx_sets = tcs; + + rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh); + if (rc) + return rc; + + if (max_rx < rx) + return -ENOMEM; + + tx_rings_needed = tx * tx_sets + tx_xdp; + if (max_tx < tx_rings_needed) + return -ENOMEM; + + if (bnxt_hwrm_reserve_tx_rings(bp, &tx_rings_needed) || + tx_rings_needed < (tx * tx_sets + tx_xdp)) + return -ENOMEM; + return 0; +} + +static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev) +{ + if (bp->bar2) { + pci_iounmap(pdev, bp->bar2); + bp->bar2 = NULL; + } + + if (bp->bar1) { + pci_iounmap(pdev, bp->bar1); + bp->bar1 = NULL; + } + + if (bp->bar0) { + pci_iounmap(pdev, bp->bar0); + bp->bar0 = NULL; + } +} + +static void bnxt_cleanup_pci(struct bnxt *bp) +{ + bnxt_unmap_bars(bp, bp->pdev); + pci_release_regions(bp->pdev); + pci_disable_device(bp->pdev); +} + static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) { int rc; @@ -6383,25 +6788,10 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) bp->current_interval = BNXT_TIMER_INTERVAL; clear_bit(BNXT_STATE_OPEN, &bp->state); - return 0; init_err_release: - if (bp->bar2) { - pci_iounmap(pdev, bp->bar2); - bp->bar2 = NULL; - } - - if (bp->bar1) { - pci_iounmap(pdev, bp->bar1); - bp->bar1 = NULL; - } - - if (bp->bar0) { - pci_iounmap(pdev, bp->bar0); - bp->bar0 = NULL; - } - + bnxt_unmap_bars(bp, pdev); pci_release_regions(pdev); init_err_disable: @@ -6458,9 +6848,10 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc) { struct bnxt *bp = netdev_priv(dev); bool sh = false; + int rc; if (tc > bp->max_tc) { - netdev_err(dev, "too many traffic classes requested: %d Max supported is %d\n", + netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n", tc, bp->max_tc); return -EINVAL; } @@ -6471,13 +6862,10 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc) if (bp->flags & BNXT_FLAG_SHARED_RINGS) sh = true; - if (tc) { - int max_rx_rings, max_tx_rings, rc; - - rc = bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, sh); - if (rc || bp->tx_nr_rings_per_tc * tc > max_tx_rings) - return -ENOMEM; - } + rc = bnxt_reserve_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings, + tc, bp->tx_nr_rings_xdp); + if (rc) + return rc; /* Needs to close the device and do hw resource re-allocations */ if (netif_running(bp->dev)) @@ -6521,6 +6909,7 @@ static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1, keys1->ports.ports == keys2->ports.ports && keys1->basic.ip_proto == keys2->basic.ip_proto && keys1->basic.n_proto == keys2->basic.n_proto && + keys1->control.flags == keys2->control.flags && ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) && ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr)) return true; @@ -6538,9 +6927,6 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, int rc = 0, idx, bit_id, l2_idx = 0; struct hlist_head *head; - if (skb->encapsulation) - return -EPROTONOSUPPORT; - if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) { struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; int off = 0, j; @@ -6567,12 +6953,23 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, goto err_free; } - if ((fkeys->basic.n_proto != htons(ETH_P_IP)) || + if ((fkeys->basic.n_proto != htons(ETH_P_IP) && + fkeys->basic.n_proto != htons(ETH_P_IPV6)) || ((fkeys->basic.ip_proto != IPPROTO_TCP) && (fkeys->basic.ip_proto != IPPROTO_UDP))) { rc = -EPROTONOSUPPORT; goto err_free; } + if (fkeys->basic.n_proto == htons(ETH_P_IPV6) && + bp->hwrm_spec_code < 0x10601) { + rc = -EPROTONOSUPPORT; + goto err_free; + } + if ((fkeys->control.flags & FLOW_DIS_ENCAPSULATION) && + bp->hwrm_spec_code < 0x10601) { + rc = -EPROTONOSUPPORT; + goto err_free; + } memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN); memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN); @@ -6779,9 +7176,7 @@ static const struct net_device_ops bnxt_netdev_ops = { #endif .ndo_udp_tunnel_add = bnxt_udp_tunnel_add, .ndo_udp_tunnel_del = bnxt_udp_tunnel_del, -#ifdef CONFIG_NET_RX_BUSY_POLL - .ndo_busy_poll = bnxt_busy_poll, -#endif + .ndo_xdp = bnxt_xdp, }; static void bnxt_remove_one(struct pci_dev *pdev) @@ -6801,15 +7196,12 @@ static void bnxt_remove_one(struct pci_dev *pdev) bnxt_hwrm_func_drv_unrgtr(bp); bnxt_free_hwrm_resources(bp); bnxt_dcb_free(bp); - pci_iounmap(pdev, bp->bar2); - pci_iounmap(pdev, bp->bar1); - pci_iounmap(pdev, bp->bar0); kfree(bp->edev); bp->edev = NULL; + if (bp->xdp_prog) + bpf_prog_put(bp->xdp_prog); + bnxt_cleanup_pci(bp); free_netdev(dev); - - pci_release_regions(pdev); - pci_disable_device(pdev); } static int bnxt_probe_phy(struct bnxt *bp) @@ -6920,8 +7312,17 @@ static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx, int rc; rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared); - if (rc) - return rc; + if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) { + /* Not enough rings, try disabling agg rings. */ + bp->flags &= ~BNXT_FLAG_AGG_RINGS; + rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared); + if (rc) + return rc; + bp->flags |= BNXT_FLAG_NO_AGG_RINGS; + bp->dev->hw_features &= ~NETIF_F_LRO; + bp->dev->features &= ~NETIF_F_LRO; + bnxt_set_ring_params(bp); + } if (bp->flags & BNXT_FLAG_ROCE_CAP) { int max_cp, max_stat, max_irq; @@ -6960,6 +7361,11 @@ static int bnxt_set_dflt_rings(struct bnxt *bp) return rc; bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings); bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings); + + rc = bnxt_hwrm_reserve_tx_rings(bp, &bp->tx_nr_rings_per_tc); + if (rc) + netdev_warn(bp->dev, "Unable to reserve tx rings\n"); + bp->tx_nr_rings = bp->tx_nr_rings_per_tc; bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : bp->tx_nr_rings + bp->rx_nr_rings; @@ -7001,7 +7407,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) struct bnxt *bp; int rc, max_irqs; - if (pdev->device == 0x16cd && pci_is_bridge(pdev)) + if (pci_is_bridge(pdev)) return -ENODEV; if (version_printed++ == 0) @@ -7027,17 +7433,16 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev->netdev_ops = &bnxt_netdev_ops; dev->watchdog_timeo = BNXT_TX_TIMEOUT; dev->ethtool_ops = &bnxt_ethtool_ops; - pci_set_drvdata(pdev, dev); rc = bnxt_alloc_hwrm_resources(bp); if (rc) - goto init_err; + goto init_err_pci_clean; mutex_init(&bp->hwrm_cmd_lock); rc = bnxt_hwrm_ver_get(bp); if (rc) - goto init_err; + goto init_err_pci_clean; bnxt_hwrm_fw_set_time(bp); @@ -7068,7 +7473,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* MTU range: 60 - 9500 */ dev->min_mtu = ETH_ZLEN; - dev->max_mtu = 9500; + dev->max_mtu = BNXT_MAX_MTU; bnxt_dcb_init(bp); @@ -7081,11 +7486,11 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) rc = bnxt_hwrm_func_drv_rgtr(bp); if (rc) - goto init_err; + goto init_err_pci_clean; rc = bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0); if (rc) - goto init_err; + goto init_err_pci_clean; bp->ulp_probe = bnxt_ulp_probe; @@ -7095,7 +7500,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) netdev_err(bp->dev, "hwrm query capability failure rc: %x\n", rc); rc = -1; - goto init_err; + goto init_err_pci_clean; } rc = bnxt_hwrm_queue_qportcfg(bp); @@ -7103,15 +7508,22 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n", rc); rc = -1; - goto init_err; + goto init_err_pci_clean; } bnxt_hwrm_func_qcfg(bp); + bnxt_hwrm_port_led_qcaps(bp); + bnxt_set_rx_skb_mode(bp, false); bnxt_set_tpa_flags(bp); bnxt_set_ring_params(bp); bnxt_set_max_func_irqs(bp, max_irqs); - bnxt_set_dflt_rings(bp); + rc = bnxt_set_dflt_rings(bp); + if (rc) { + netdev_err(bp->dev, "Not enough rings available.\n"); + rc = -ENOMEM; + goto init_err_pci_clean; + } /* Default RSS hash cfg. */ bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 | @@ -7126,7 +7538,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; } - if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp)) { + bnxt_hwrm_vnic_qcaps(bp); + if (bnxt_rfs_supported(bp)) { dev->hw_features |= NETIF_F_NTUPLE; if (bnxt_rfs_capable(bp)) { bp->flags |= BNXT_FLAG_RFS; @@ -7139,15 +7552,15 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) rc = bnxt_probe_phy(bp); if (rc) - goto init_err; + goto init_err_pci_clean; rc = bnxt_hwrm_func_reset(bp); if (rc) - goto init_err; + goto init_err_pci_clean; rc = bnxt_init_int_mode(bp); if (rc) - goto init_err; + goto init_err_pci_clean; rc = register_netdev(dev); if (rc) @@ -7164,10 +7577,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) init_err_clr_int: bnxt_clear_int_mode(bp); -init_err: - pci_iounmap(pdev, bp->bar0); - pci_release_regions(pdev); - pci_disable_device(pdev); +init_err_pci_clean: + bnxt_cleanup_pci(bp); init_err_free: free_netdev(dev); |