diff options
Diffstat (limited to 'drivers/net/ethernet')
569 files changed, 47403 insertions, 14277 deletions
diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c index 9fe3990319ec..084a6d58543a 100644 --- a/drivers/net/ethernet/3com/typhoon.c +++ b/drivers/net/ethernet/3com/typhoon.c @@ -1753,7 +1753,7 @@ typhoon_poll(struct napi_struct *napi, int budget) } if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); iowrite32(TYPHOON_INTR_NONE, tp->ioaddr + TYPHOON_REG_INTR_MASK); typhoon_post_pci_writes(tp->ioaddr); @@ -2370,9 +2370,9 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) * 4) Get the hardware address. * 5) Put the card to sleep. */ - if (typhoon_reset(ioaddr, WaitSleep) < 0) { + err = typhoon_reset(ioaddr, WaitSleep); + if (err < 0) { err_msg = "could not reset 3XP"; - err = -EIO; goto error_out_dma; } @@ -2386,24 +2386,25 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) typhoon_init_interface(tp); typhoon_init_rings(tp); - if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) { + err = typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST); + if (err < 0) { err_msg = "cannot boot 3XP sleep image"; - err = -EIO; goto error_out_reset; } INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_MAC_ADDRESS); - if(typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp) < 0) { + err = typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp); + if (err < 0) { err_msg = "cannot read MAC address"; - err = -EIO; goto error_out_reset; } *(__be16 *)&dev->dev_addr[0] = htons(le16_to_cpu(xp_resp[0].parm1)); *(__be32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2)); - if(!is_valid_ether_addr(dev->dev_addr)) { + if (!is_valid_ether_addr(dev->dev_addr)) { err_msg = "Could not obtain valid ethernet address, aborting"; + err = -EIO; goto error_out_reset; } @@ -2411,7 +2412,8 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) * later when we print out the version reported. */ INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS); - if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) { + err = typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp); + if (err < 0) { err_msg = "Could not get Sleep Image version"; goto error_out_reset; } @@ -2428,9 +2430,9 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if(xp_resp[0].numDesc != 0) tp->capabilities |= TYPHOON_WAKEUP_NEEDS_RESET; - if(typhoon_sleep(tp, PCI_D3hot, 0) < 0) { + err = typhoon_sleep(tp, PCI_D3hot, 0); + if (err < 0) { err_msg = "cannot put adapter to sleep"; - err = -EIO; goto error_out_reset; } @@ -2453,7 +2455,8 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev->features = dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_RXCSUM; - if(register_netdev(dev) < 0) { + err = register_netdev(dev); + if (err < 0) { err_msg = "unable to register netdev"; goto error_out_reset; } diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index e4c28fed61d5..8c08f9deef92 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -29,6 +29,7 @@ source "drivers/net/ethernet/amazon/Kconfig" source "drivers/net/ethernet/amd/Kconfig" source "drivers/net/ethernet/apm/Kconfig" source "drivers/net/ethernet/apple/Kconfig" +source "drivers/net/ethernet/aquantia/Kconfig" source "drivers/net/ethernet/arc/Kconfig" source "drivers/net/ethernet/atheros/Kconfig" source "drivers/net/ethernet/aurora/Kconfig" @@ -170,7 +171,6 @@ source "drivers/net/ethernet/sgi/Kconfig" source "drivers/net/ethernet/smsc/Kconfig" source "drivers/net/ethernet/stmicro/Kconfig" source "drivers/net/ethernet/sun/Kconfig" -source "drivers/net/ethernet/synopsys/Kconfig" source "drivers/net/ethernet/tehuti/Kconfig" source "drivers/net/ethernet/ti/Kconfig" source "drivers/net/ethernet/tile/Kconfig" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 24330f4885a9..26dce5bf2c18 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -15,6 +15,7 @@ obj-$(CONFIG_NET_VENDOR_AMAZON) += amazon/ obj-$(CONFIG_NET_VENDOR_AMD) += amd/ obj-$(CONFIG_NET_XGENE) += apm/ obj-$(CONFIG_NET_VENDOR_APPLE) += apple/ +obj-$(CONFIG_NET_VENDOR_AQUANTIA) += aquantia/ obj-$(CONFIG_NET_VENDOR_ARC) += arc/ obj-$(CONFIG_NET_VENDOR_ATHEROS) += atheros/ obj-$(CONFIG_NET_VENDOR_AURORA) += aurora/ @@ -81,7 +82,6 @@ obj-$(CONFIG_NET_VENDOR_SGI) += sgi/ obj-$(CONFIG_NET_VENDOR_SMSC) += smsc/ obj-$(CONFIG_NET_VENDOR_STMICRO) += stmicro/ obj-$(CONFIG_NET_VENDOR_SUN) += sun/ -obj-$(CONFIG_NET_VENDOR_SYNOPSYS) += synopsys/ obj-$(CONFIG_NET_VENDOR_TEHUTI) += tehuti/ obj-$(CONFIG_NET_VENDOR_TI) += ti/ obj-$(CONFIG_TILE_NET) += tile/ diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c index 88164529b52a..a9ac58c351a0 100644 --- a/drivers/net/ethernet/adi/bfin_mac.c +++ b/drivers/net/ethernet/adi/bfin_mac.c @@ -1206,7 +1206,7 @@ static void bfin_mac_rx(struct bfin_mac_local *lp) /* reserve 2 bytes for RXDWA padding */ skb_reserve(new_skb, NET_IP_ALIGN); /* Invalidate the data cache of skb->data range when it is write back - * cache. It will prevent overwritting the new data from DMA + * cache. It will prevent overwriting the new data from DMA */ blackfin_dcache_invalidate_range((unsigned long)new_skb->head, (unsigned long)new_skb->end); @@ -1274,7 +1274,7 @@ static int bfin_mac_poll(struct napi_struct *napi, int budget) } if (i < budget) { - napi_complete(napi); + napi_complete_done(napi, i); if (test_and_clear_bit(BFIN_MAC_RX_IRQ_DISABLED, &lp->flags)) enable_irq(IRQ_MAC_RX); } diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c index 93def92f9997..9f7422ada704 100644 --- a/drivers/net/ethernet/aeroflex/greth.c +++ b/drivers/net/ethernet/aeroflex/greth.c @@ -1008,7 +1008,7 @@ restart_txrx_poll: spin_unlock_irqrestore(&greth->devlock, flags); goto restart_txrx_poll; } else { - __napi_complete(napi); + napi_complete_done(napi, work_done); spin_unlock_irqrestore(&greth->devlock, flags); } } diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c index 831bab352f8e..87a11b9f0ea5 100644 --- a/drivers/net/ethernet/agere/et131x.c +++ b/drivers/net/ethernet/agere/et131x.c @@ -3575,7 +3575,7 @@ static int et131x_poll(struct napi_struct *napi, int budget) et131x_handle_send_pkts(adapter); if (work_done < budget) { - napi_complete(&adapter->napi); + napi_complete_done(&adapter->napi, work_done); et131x_enable_interrupts(adapter); } diff --git a/drivers/net/ethernet/alacritech/slicoss.c b/drivers/net/ethernet/alacritech/slicoss.c index b21d8aa8d653..15a8096c60df 100644 --- a/drivers/net/ethernet/alacritech/slicoss.c +++ b/drivers/net/ethernet/alacritech/slicoss.c @@ -1471,8 +1471,8 @@ drop_skb: return NETDEV_TX_OK; } -static struct rtnl_link_stats64 *slic_get_stats(struct net_device *dev, - struct rtnl_link_stats64 *lst) +static void slic_get_stats(struct net_device *dev, + struct rtnl_link_stats64 *lst) { struct slic_device *sdev = netdev_priv(dev); struct slic_stats *stats = &sdev->stats; @@ -1489,8 +1489,6 @@ static struct rtnl_link_stats64 *slic_get_stats(struct net_device *dev, SLIC_GET_STATS_COUNTER(lst->rx_crc_errors, stats, rx_crc); SLIC_GET_STATS_COUNTER(lst->rx_fifo_errors, stats, rx_oflow802); SLIC_GET_STATS_COUNTER(lst->tx_carrier_errors, stats, tx_carrier); - - return lst; } static int slic_get_sset_count(struct net_device *dev, int sset) diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index 25864bff25ee..527908c7e384 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c @@ -513,7 +513,7 @@ static int tse_poll(struct napi_struct *napi, int budget) if (rxcomplete < budget) { - napi_complete(napi); + napi_complete_done(napi, rxcomplete); netdev_dbg(priv->dev, "NAPI Complete, did %d packets with budget %d\n", diff --git a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h index a46e749bf226..5b6509d59716 100644 --- a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h +++ b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h @@ -631,22 +631,22 @@ enum ena_admin_flow_hash_proto { /* RSS flow hash fields */ enum ena_admin_flow_hash_fields { /* Ethernet Dest Addr */ - ENA_ADMIN_RSS_L2_DA = 0, + ENA_ADMIN_RSS_L2_DA = BIT(0), /* Ethernet Src Addr */ - ENA_ADMIN_RSS_L2_SA = 1, + ENA_ADMIN_RSS_L2_SA = BIT(1), /* ipv4/6 Dest Addr */ - ENA_ADMIN_RSS_L3_DA = 2, + ENA_ADMIN_RSS_L3_DA = BIT(2), /* ipv4/6 Src Addr */ - ENA_ADMIN_RSS_L3_SA = 5, + ENA_ADMIN_RSS_L3_SA = BIT(3), /* tcp/udp Dest Port */ - ENA_ADMIN_RSS_L4_DP = 6, + ENA_ADMIN_RSS_L4_DP = BIT(4), /* tcp/udp Src Port */ - ENA_ADMIN_RSS_L4_SP = 7, + ENA_ADMIN_RSS_L4_SP = BIT(5), }; struct ena_admin_proto_input { @@ -873,6 +873,14 @@ struct ena_admin_aenq_link_change_desc { u32 flags; }; +struct ena_admin_aenq_keep_alive_desc { + struct ena_admin_aenq_common_desc aenq_common_desc; + + u32 rx_drops_low; + + u32 rx_drops_high; +}; + struct ena_admin_ena_mmio_req_read_less_resp { u16 req_id; diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c index 3066d9c99984..08d11cede9c9 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_com.c @@ -36,9 +36,9 @@ /*****************************************************************************/ /* Timeout in micro-sec */ -#define ADMIN_CMD_TIMEOUT_US (1000000) +#define ADMIN_CMD_TIMEOUT_US (3000000) -#define ENA_ASYNC_QUEUE_DEPTH 4 +#define ENA_ASYNC_QUEUE_DEPTH 16 #define ENA_ADMIN_QUEUE_DEPTH 32 #define MIN_ENA_VER (((ENA_COMMON_SPEC_VERSION_MAJOR) << \ @@ -784,7 +784,7 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev, int ret; if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) { - pr_info("Feature %d isn't supported\n", feature_id); + pr_debug("Feature %d isn't supported\n", feature_id); return -EPERM; } @@ -1126,7 +1126,13 @@ int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue, comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size, comp, comp_size); if (unlikely(IS_ERR(comp_ctx))) { - pr_err("Failed to submit command [%ld]\n", PTR_ERR(comp_ctx)); + if (comp_ctx == ERR_PTR(-ENODEV)) + pr_debug("Failed to submit command [%ld]\n", + PTR_ERR(comp_ctx)); + else + pr_err("Failed to submit command [%ld]\n", + PTR_ERR(comp_ctx)); + return PTR_ERR(comp_ctx); } @@ -1895,7 +1901,7 @@ int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu) int ret; if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) { - pr_info("Feature %d isn't supported\n", ENA_ADMIN_MTU); + pr_debug("Feature %d isn't supported\n", ENA_ADMIN_MTU); return -EPERM; } @@ -1948,8 +1954,8 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev) if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_FUNCTION)) { - pr_info("Feature %d isn't supported\n", - ENA_ADMIN_RSS_HASH_FUNCTION); + pr_debug("Feature %d isn't supported\n", + ENA_ADMIN_RSS_HASH_FUNCTION); return -EPERM; } @@ -2112,7 +2118,8 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev) if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_INPUT)) { - pr_info("Feature %d isn't supported\n", ENA_ADMIN_RSS_HASH_INPUT); + pr_debug("Feature %d isn't supported\n", + ENA_ADMIN_RSS_HASH_INPUT); return -EPERM; } @@ -2184,7 +2191,7 @@ int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev) hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields = ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA; - hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields = + hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields = ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA; for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) { @@ -2270,8 +2277,8 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev) if (!ena_com_check_supported_feature_id( ena_dev, ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) { - pr_info("Feature %d isn't supported\n", - ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG); + pr_debug("Feature %d isn't supported\n", + ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG); return -EPERM; } @@ -2444,11 +2451,9 @@ int ena_com_set_host_attributes(struct ena_com_dev *ena_dev) int ret; - if (!ena_com_check_supported_feature_id(ena_dev, - ENA_ADMIN_HOST_ATTR_CONFIG)) { - pr_warn("Set host attribute isn't supported\n"); - return -EPERM; - } + /* Host attribute config is called before ena_com_get_dev_attr_feat + * so ena_com can't check if the feature is supported. + */ memset(&cmd, 0x0, sizeof(cmd)); admin_queue = &ena_dev->admin_queue; @@ -2542,8 +2547,8 @@ int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev) if (rc) { if (rc == -EPERM) { - pr_info("Feature %d isn't supported\n", - ENA_ADMIN_INTERRUPT_MODERATION); + pr_debug("Feature %d isn't supported\n", + ENA_ADMIN_INTERRUPT_MODERATION); rc = 0; } else { pr_err("Failed to get interrupt moderation admin cmd. rc: %d\n", diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h index 509d7b8e15ab..c9b33ee5f258 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.h +++ b/drivers/net/ethernet/amazon/ena/ena_com.h @@ -33,6 +33,7 @@ #ifndef ENA_COM #define ENA_COM +#include <linux/compiler.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/gfp.h> diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c index 539c536464a5..f999305e1363 100644 --- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c @@ -45,7 +45,7 @@ static inline struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc( cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr + (head_masked * io_cq->cdesc_entry_size_in_bytes)); - desc_phase = (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >> + desc_phase = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT; if (desc_phase != expected_phase) @@ -141,7 +141,7 @@ static inline u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq, ena_com_cq_inc_head(io_cq); count++; - last = (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >> + last = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT; } while (!last); @@ -489,13 +489,13 @@ int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id) * expected, it mean that the device still didn't update * this completion. */ - cdesc_phase = cdesc->flags & ENA_ETH_IO_TX_CDESC_PHASE_MASK; + cdesc_phase = READ_ONCE(cdesc->flags) & ENA_ETH_IO_TX_CDESC_PHASE_MASK; if (cdesc_phase != expected_phase) return -EAGAIN; ena_com_cq_inc_head(io_cq); - *req_id = cdesc->req_id; + *req_id = READ_ONCE(cdesc->req_id); return 0; } diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index cc8b13ebfa75..35f19430c84a 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -80,14 +80,18 @@ static void ena_tx_timeout(struct net_device *dev) { struct ena_adapter *adapter = netdev_priv(dev); + /* Change the state of the device to trigger reset + * Check that we are not in the middle or a trigger already + */ + + if (test_and_set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) + return; + u64_stats_update_begin(&adapter->syncp); adapter->dev_stats.tx_timeout++; u64_stats_update_end(&adapter->syncp); netif_err(adapter, tx_err, dev, "Transmit time out\n"); - - /* Change the state of the device to trigger reset */ - set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); } static void update_rx_ring_mtu(struct ena_adapter *adapter, int mtu) @@ -559,6 +563,7 @@ static void ena_free_all_rx_bufs(struct ena_adapter *adapter) */ static void ena_free_tx_bufs(struct ena_ring *tx_ring) { + bool print_once = true; u32 i; for (i = 0; i < tx_ring->ring_size; i++) { @@ -570,9 +575,16 @@ static void ena_free_tx_bufs(struct ena_ring *tx_ring) if (!tx_info->skb) continue; - netdev_notice(tx_ring->netdev, - "free uncompleted tx skb qid %d idx 0x%x\n", - tx_ring->qid, i); + if (print_once) { + netdev_notice(tx_ring->netdev, + "free uncompleted tx skb qid %d idx 0x%x\n", + tx_ring->qid, i); + print_once = false; + } else { + netdev_dbg(tx_ring->netdev, + "free uncompleted tx skb qid %d idx 0x%x\n", + tx_ring->qid, i); + } ena_buf = tx_info->bufs; dma_unmap_single(tx_ring->dev, @@ -1109,7 +1121,8 @@ static int ena_io_poll(struct napi_struct *napi, int budget) tx_budget = tx_ring->ring_size / ENA_TX_POLL_BUDGET_DIVIDER; - if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags)) { + if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) || + test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags)) { napi_complete_done(napi, 0); return 0; } @@ -1117,26 +1130,40 @@ static int ena_io_poll(struct napi_struct *napi, int budget) tx_work_done = ena_clean_tx_irq(tx_ring, tx_budget); rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget); - if ((budget > rx_work_done) && (tx_budget > tx_work_done)) { - napi_complete_done(napi, rx_work_done); + /* If the device is about to reset or down, avoid unmask + * the interrupt and return 0 so NAPI won't reschedule + */ + if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) || + test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags))) { + napi_complete_done(napi, 0); + ret = 0; + } else if ((budget > rx_work_done) && (tx_budget > tx_work_done)) { napi_comp_call = 1; - /* Tx and Rx share the same interrupt vector */ - if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev)) - ena_adjust_intr_moderation(rx_ring, tx_ring); - /* Update intr register: rx intr delay, tx intr delay and - * interrupt unmask + /* Update numa and unmask the interrupt only when schedule + * from the interrupt context (vs from sk_busy_loop) */ - ena_com_update_intr_reg(&intr_reg, - rx_ring->smoothed_interval, - tx_ring->smoothed_interval, - true); + if (napi_complete_done(napi, rx_work_done)) { + /* Tx and Rx share the same interrupt vector */ + if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev)) + ena_adjust_intr_moderation(rx_ring, tx_ring); + + /* Update intr register: rx intr delay, + * tx intr delay and interrupt unmask + */ + ena_com_update_intr_reg(&intr_reg, + rx_ring->smoothed_interval, + tx_ring->smoothed_interval, + true); + + /* It is a shared MSI-X. + * Tx and Rx CQ have pointer to it. + * So we use one of them to reach the intr reg + */ + ena_com_unmask_intr(rx_ring->ena_com_io_cq, &intr_reg); + } - /* It is a shared MSI-X. Tx and Rx CQ have pointer to it. - * So we use one of them to reach the intr reg - */ - ena_com_unmask_intr(rx_ring->ena_com_io_cq, &intr_reg); ena_update_ring_numa_node(tx_ring, rx_ring); @@ -1698,12 +1725,22 @@ static void ena_down(struct ena_adapter *adapter) adapter->dev_stats.interface_down++; u64_stats_update_end(&adapter->syncp); - /* After this point the napi handler won't enable the tx queue */ - ena_napi_disable_all(adapter); netif_carrier_off(adapter->netdev); netif_tx_disable(adapter->netdev); + /* After this point the napi handler won't enable the tx queue */ + ena_napi_disable_all(adapter); + /* After destroy the queue there won't be any new interrupts */ + + if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) { + int rc; + + rc = ena_com_dev_reset(adapter->ena_dev); + if (rc) + dev_err(&adapter->pdev->dev, "Device reset failed\n"); + } + ena_destroy_all_io_queues(adapter); ena_disable_io_intr_sync(adapter); @@ -2065,6 +2102,14 @@ static void ena_netpoll(struct net_device *netdev) struct ena_adapter *adapter = netdev_priv(netdev); int i; + /* Dont schedule NAPI if the driver is in the middle of reset + * or netdev is down. + */ + + if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags) || + test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) + return; + for (i = 0; i < adapter->num_queues; i++) napi_schedule(&adapter->ena_napi[i].napi); } @@ -2165,32 +2210,50 @@ err: ena_com_delete_debug_area(adapter->ena_dev); } -static struct rtnl_link_stats64 *ena_get_stats64(struct net_device *netdev, - struct rtnl_link_stats64 *stats) +static void ena_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) { struct ena_adapter *adapter = netdev_priv(netdev); - struct ena_admin_basic_stats ena_stats; - int rc; + struct ena_ring *rx_ring, *tx_ring; + unsigned int start; + u64 rx_drops; + int i; if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) - return NULL; + return; - rc = ena_com_get_dev_basic_stats(adapter->ena_dev, &ena_stats); - if (rc) - return NULL; + for (i = 0; i < adapter->num_queues; i++) { + u64 bytes, packets; - stats->tx_bytes = ((u64)ena_stats.tx_bytes_high << 32) | - ena_stats.tx_bytes_low; - stats->rx_bytes = ((u64)ena_stats.rx_bytes_high << 32) | - ena_stats.rx_bytes_low; + tx_ring = &adapter->tx_ring[i]; - stats->rx_packets = ((u64)ena_stats.rx_pkts_high << 32) | - ena_stats.rx_pkts_low; - stats->tx_packets = ((u64)ena_stats.tx_pkts_high << 32) | - ena_stats.tx_pkts_low; + do { + start = u64_stats_fetch_begin_irq(&tx_ring->syncp); + packets = tx_ring->tx_stats.cnt; + bytes = tx_ring->tx_stats.bytes; + } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start)); - stats->rx_dropped = ((u64)ena_stats.rx_drops_high << 32) | - ena_stats.rx_drops_low; + stats->tx_packets += packets; + stats->tx_bytes += bytes; + + rx_ring = &adapter->rx_ring[i]; + + do { + start = u64_stats_fetch_begin_irq(&rx_ring->syncp); + packets = rx_ring->rx_stats.cnt; + bytes = rx_ring->rx_stats.bytes; + } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start)); + + stats->rx_packets += packets; + stats->rx_bytes += bytes; + } + + do { + start = u64_stats_fetch_begin_irq(&adapter->syncp); + rx_drops = adapter->dev_stats.rx_drops; + } while (u64_stats_fetch_retry_irq(&adapter->syncp, start)); + + stats->rx_dropped = rx_drops; stats->multicast = 0; stats->collisions = 0; @@ -2204,8 +2267,6 @@ static struct rtnl_link_stats64 *ena_get_stats64(struct net_device *netdev, stats->rx_errors = 0; stats->tx_errors = 0; - - return stats; } static const struct net_device_ops ena_netdev_ops = { @@ -2353,6 +2414,8 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev, */ ena_com_set_admin_polling_mode(ena_dev, true); + ena_config_host_info(ena_dev); + /* Get Device Attributes*/ rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx); if (rc) { @@ -2377,11 +2440,10 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev, *wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE)); - ena_config_host_info(ena_dev); - return 0; err_admin_init: + ena_com_delete_host_info(ena_dev); ena_com_admin_destroy(ena_dev); err_mmio_read_less: ena_com_mmio_reg_read_request_destroy(ena_dev); @@ -2433,6 +2495,14 @@ static void ena_fw_reset_device(struct work_struct *work) bool dev_up, wd_state; int rc; + if (unlikely(!test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { + dev_err(&pdev->dev, + "device reset schedule while reset bit is off\n"); + return; + } + + netif_carrier_off(netdev); + del_timer_sync(&adapter->timer_service); rtnl_lock(); @@ -2446,12 +2516,6 @@ static void ena_fw_reset_device(struct work_struct *work) */ ena_close(netdev); - rc = ena_com_dev_reset(ena_dev); - if (rc) { - dev_err(&pdev->dev, "Device reset failed\n"); - goto err; - } - ena_free_mgmnt_irq(adapter); ena_disable_msix(adapter); @@ -2464,6 +2528,8 @@ static void ena_fw_reset_device(struct work_struct *work) ena_com_mmio_reg_read_request_destroy(ena_dev); + clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); + /* Finish with the destroy part. Start the init part */ rc = ena_device_init(ena_dev, adapter->pdev, &get_feat_ctx, &wd_state); @@ -2509,6 +2575,8 @@ err_device_destroy: err: rtnl_unlock(); + clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); + dev_err(&pdev->dev, "Reset attempt failed. Can not reset the device\n"); } @@ -2527,6 +2595,9 @@ static void check_for_missing_tx_completions(struct ena_adapter *adapter) if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) return; + if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) + return; + budget = ENA_MONITORED_TX_QUEUES; for (i = adapter->last_monitored_tx_qid; i < adapter->num_queues; i++) { @@ -2626,7 +2697,7 @@ static void ena_timer_service(unsigned long data) if (host_info) ena_update_host_info(host_info, adapter->netdev); - if (unlikely(test_and_clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { + if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { netif_err(adapter, drv, adapter->netdev, "Trigger reset is on\n"); ena_dump_stats_to_dmesg(adapter); @@ -2660,7 +2731,7 @@ static int ena_calc_io_queue_num(struct pci_dev *pdev, io_sq_num = get_feat_ctx->max_queues.max_sq_num; } - io_queue_num = min_t(int, num_possible_cpus(), ENA_MAX_NUM_IO_QUEUES); + io_queue_num = min_t(int, num_online_cpus(), ENA_MAX_NUM_IO_QUEUES); io_queue_num = min_t(int, io_queue_num, io_sq_num); io_queue_num = min_t(int, io_queue_num, get_feat_ctx->max_queues.max_cq_num); @@ -2722,7 +2793,6 @@ static void ena_set_dev_offloads(struct ena_com_dev_get_features_ctx *feat, netdev->features = dev_features | NETIF_F_SG | - NETIF_F_NTUPLE | NETIF_F_RXHASH | NETIF_F_HIGHDMA; @@ -3093,12 +3163,6 @@ static void ena_remove(struct pci_dev *pdev) struct ena_com_dev *ena_dev; struct net_device *netdev; - if (!adapter) - /* This device didn't load properly and it's resources - * already released, nothing to do - */ - return; - ena_dev = adapter->ena_dev; netdev = adapter->netdev; @@ -3118,7 +3182,9 @@ static void ena_remove(struct pci_dev *pdev) cancel_work_sync(&adapter->resume_io_task); - ena_com_dev_reset(ena_dev); + /* Reset the device only if the device is running. */ + if (test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags)) + ena_com_dev_reset(ena_dev); ena_free_mgmnt_irq(adapter); diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h index 69d7e9ed5bc8..ed62d8e231a1 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.h +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h @@ -44,7 +44,7 @@ #include "ena_eth_com.h" #define DRV_MODULE_VER_MAJOR 1 -#define DRV_MODULE_VER_MINOR 0 +#define DRV_MODULE_VER_MINOR 1 #define DRV_MODULE_VER_SUBMINOR 2 #define DRV_MODULE_NAME "ena" @@ -100,7 +100,7 @@ /* Number of queues to check for missing queues per timer service */ #define ENA_MONITORED_TX_QUEUES 4 /* Max timeout packets before device reset */ -#define MAX_NUM_OF_TIMEOUTED_PACKETS 32 +#define MAX_NUM_OF_TIMEOUTED_PACKETS 128 #define ENA_TX_RING_IDX_NEXT(idx, ring_size) (((idx) + 1) & ((ring_size) - 1)) @@ -116,9 +116,9 @@ #define ENA_IO_IRQ_IDX(q) (ENA_IO_IRQ_FIRST_IDX + (q)) /* ENA device should send keep alive msg every 1 sec. - * We wait for 3 sec just to be on the safe side. + * We wait for 6 sec just to be on the safe side. */ -#define ENA_DEVICE_KALIVE_TIMEOUT (3 * HZ) +#define ENA_DEVICE_KALIVE_TIMEOUT (6 * HZ) #define ENA_MMIO_DISABLE_REG_READ BIT(0) @@ -241,6 +241,7 @@ struct ena_stats_dev { u64 interface_up; u64 interface_down; u64 admin_q_pause; + u64 rx_drops; }; enum ena_flags_t { diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c index 9595f1bc535b..7b5df562f30f 100644 --- a/drivers/net/ethernet/amd/amd8111e.c +++ b/drivers/net/ethernet/amd/amd8111e.c @@ -695,125 +695,105 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget) void __iomem *mmio = lp->mmio; struct sk_buff *skb,*new_skb; int min_pkt_len, status; - unsigned int intr0; int num_rx_pkt = 0; short pkt_len; #if AMD8111E_VLAN_TAG_USED short vtag; #endif - int rx_pkt_limit = budget; - unsigned long flags; - if (rx_pkt_limit <= 0) - goto rx_not_empty; + while (num_rx_pkt < budget) { + status = le16_to_cpu(lp->rx_ring[rx_index].rx_flags); + if (status & OWN_BIT) + break; - do{ - /* process receive packets until we use the quota. - * If we own the next entry, it's a new packet. Send it up. + /* There is a tricky error noted by John Murphy, + * <murf@perftech.com> to Russ Nelson: Even with + * full-sized * buffers it's possible for a + * jabber packet to use two buffers, with only + * the last correctly noting the error. */ - while(1) { - status = le16_to_cpu(lp->rx_ring[rx_index].rx_flags); - if (status & OWN_BIT) - break; - - /* There is a tricky error noted by John Murphy, - * <murf@perftech.com> to Russ Nelson: Even with - * full-sized * buffers it's possible for a - * jabber packet to use two buffers, with only - * the last correctly noting the error. - */ - if(status & ERR_BIT) { - /* resetting flags */ - lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; - goto err_next_pkt; - } - /* check for STP and ENP */ - if(!((status & STP_BIT) && (status & ENP_BIT))){ - /* resetting flags */ - lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; - goto err_next_pkt; - } - pkt_len = le16_to_cpu(lp->rx_ring[rx_index].msg_count) - 4; + if (status & ERR_BIT) { + /* resetting flags */ + lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; + goto err_next_pkt; + } + /* check for STP and ENP */ + if (!((status & STP_BIT) && (status & ENP_BIT))){ + /* resetting flags */ + lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; + goto err_next_pkt; + } + pkt_len = le16_to_cpu(lp->rx_ring[rx_index].msg_count) - 4; #if AMD8111E_VLAN_TAG_USED - vtag = status & TT_MASK; - /*MAC will strip vlan tag*/ - if (vtag != 0) - min_pkt_len =MIN_PKT_LEN - 4; + vtag = status & TT_MASK; + /* MAC will strip vlan tag */ + if (vtag != 0) + min_pkt_len = MIN_PKT_LEN - 4; else #endif - min_pkt_len =MIN_PKT_LEN; + min_pkt_len = MIN_PKT_LEN; - if (pkt_len < min_pkt_len) { - lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; - lp->drv_rx_errors++; - goto err_next_pkt; - } - if(--rx_pkt_limit < 0) - goto rx_not_empty; - new_skb = netdev_alloc_skb(dev, lp->rx_buff_len); - if (!new_skb) { - /* if allocation fail, - * ignore that pkt and go to next one - */ - lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; - lp->drv_rx_errors++; - goto err_next_pkt; - } + if (pkt_len < min_pkt_len) { + lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; + lp->drv_rx_errors++; + goto err_next_pkt; + } + new_skb = netdev_alloc_skb(dev, lp->rx_buff_len); + if (!new_skb) { + /* if allocation fail, + * ignore that pkt and go to next one + */ + lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; + lp->drv_rx_errors++; + goto err_next_pkt; + } - skb_reserve(new_skb, 2); - skb = lp->rx_skbuff[rx_index]; - pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[rx_index], - lp->rx_buff_len-2, PCI_DMA_FROMDEVICE); - skb_put(skb, pkt_len); - lp->rx_skbuff[rx_index] = new_skb; - lp->rx_dma_addr[rx_index] = pci_map_single(lp->pci_dev, - new_skb->data, - lp->rx_buff_len-2, - PCI_DMA_FROMDEVICE); + skb_reserve(new_skb, 2); + skb = lp->rx_skbuff[rx_index]; + pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[rx_index], + lp->rx_buff_len-2, PCI_DMA_FROMDEVICE); + skb_put(skb, pkt_len); + lp->rx_skbuff[rx_index] = new_skb; + lp->rx_dma_addr[rx_index] = pci_map_single(lp->pci_dev, + new_skb->data, + lp->rx_buff_len-2, + PCI_DMA_FROMDEVICE); - skb->protocol = eth_type_trans(skb, dev); + skb->protocol = eth_type_trans(skb, dev); #if AMD8111E_VLAN_TAG_USED - if (vtag == TT_VLAN_TAGGED){ - u16 vlan_tag = le16_to_cpu(lp->rx_ring[rx_index].tag_ctrl_info); - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); - } -#endif - netif_receive_skb(skb); - /*COAL update rx coalescing parameters*/ - lp->coal_conf.rx_packets++; - lp->coal_conf.rx_bytes += pkt_len; - num_rx_pkt++; - - err_next_pkt: - lp->rx_ring[rx_index].buff_phy_addr - = cpu_to_le32(lp->rx_dma_addr[rx_index]); - lp->rx_ring[rx_index].buff_count = - cpu_to_le16(lp->rx_buff_len-2); - wmb(); - lp->rx_ring[rx_index].rx_flags |= cpu_to_le16(OWN_BIT); - rx_index = (++lp->rx_idx) & RX_RING_DR_MOD_MASK; + if (vtag == TT_VLAN_TAGGED){ + u16 vlan_tag = le16_to_cpu(lp->rx_ring[rx_index].tag_ctrl_info); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); } - /* Check the interrupt status register for more packets in the - * mean time. Process them since we have not used up our quota. - */ - intr0 = readl(mmio + INT0); - /*Ack receive packets */ - writel(intr0 & RINT0,mmio + INT0); +#endif + napi_gro_receive(napi, skb); + /* COAL update rx coalescing parameters */ + lp->coal_conf.rx_packets++; + lp->coal_conf.rx_bytes += pkt_len; + num_rx_pkt++; + +err_next_pkt: + lp->rx_ring[rx_index].buff_phy_addr + = cpu_to_le32(lp->rx_dma_addr[rx_index]); + lp->rx_ring[rx_index].buff_count = + cpu_to_le16(lp->rx_buff_len-2); + wmb(); + lp->rx_ring[rx_index].rx_flags |= cpu_to_le16(OWN_BIT); + rx_index = (++lp->rx_idx) & RX_RING_DR_MOD_MASK; + } - } while(intr0 & RINT0); + if (num_rx_pkt < budget && napi_complete_done(napi, num_rx_pkt)) { + unsigned long flags; - if (rx_pkt_limit > 0) { /* Receive descriptor is empty now */ spin_lock_irqsave(&lp->lock, flags); - __napi_complete(napi); writel(VAL0|RINTEN0, mmio + INTEN0); writel(VAL2 | RDMD0, mmio + CMD0); spin_unlock_irqrestore(&lp->lock, flags); } -rx_not_empty: return num_rx_pkt; } diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c index 76e5fc7adff5..6c98901f1b89 100644 --- a/drivers/net/ethernet/amd/declance.c +++ b/drivers/net/ethernet/amd/declance.c @@ -1276,18 +1276,6 @@ err_out: return ret; } -static void __exit dec_lance_remove(struct device *bdev) -{ - struct net_device *dev = dev_get_drvdata(bdev); - resource_size_t start, len; - - unregister_netdev(dev); - start = to_tc_dev(bdev)->resource.start; - len = to_tc_dev(bdev)->resource.end - start + 1; - release_mem_region(start, len); - free_netdev(dev); -} - /* Find all the lance cards on the system and initialize them */ static int __init dec_lance_platform_probe(void) { @@ -1320,7 +1308,7 @@ static void __exit dec_lance_platform_remove(void) #ifdef CONFIG_TC static int dec_lance_tc_probe(struct device *dev); -static int __exit dec_lance_tc_remove(struct device *dev); +static int dec_lance_tc_remove(struct device *dev); static const struct tc_device_id dec_lance_tc_table[] = { { "DEC ", "PMAD-AA " }, @@ -1334,7 +1322,7 @@ static struct tc_driver dec_lance_tc_driver = { .name = "declance", .bus = &tc_bus_type, .probe = dec_lance_tc_probe, - .remove = __exit_p(dec_lance_tc_remove), + .remove = dec_lance_tc_remove, }, }; @@ -1346,7 +1334,19 @@ static int dec_lance_tc_probe(struct device *dev) return status; } -static int __exit dec_lance_tc_remove(struct device *dev) +static void dec_lance_remove(struct device *bdev) +{ + struct net_device *dev = dev_get_drvdata(bdev); + resource_size_t start, len; + + unregister_netdev(dev); + start = to_tc_dev(bdev)->resource.start; + len = to_tc_dev(bdev)->resource.end - start + 1; + release_mem_region(start, len); + free_netdev(dev); +} + +static int dec_lance_tc_remove(struct device *dev) { put_device(dev); dec_lance_remove(dev); diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c index 41e58cca8fee..86369d7c9a0f 100644 --- a/drivers/net/ethernet/amd/pcnet32.c +++ b/drivers/net/ethernet/amd/pcnet32.c @@ -291,7 +291,10 @@ struct pcnet32_private { int options; unsigned int shared_irq:1, /* shared irq possible */ dxsuflo:1, /* disable transmit stop on uflo */ - mii:1; /* mii port available */ + mii:1, /* mii port available */ + autoneg:1, /* autoneg enabled */ + port_tp:1, /* port set to TP */ + fdx:1; /* full duplex enabled */ struct net_device *next; struct mii_if_info mii_if; struct timer_list watchdog_timer; @@ -677,6 +680,52 @@ static void pcnet32_poll_controller(struct net_device *dev) } #endif +/* + * lp->lock must be held. + */ +static int pcnet32_suspend(struct net_device *dev, unsigned long *flags, + int can_sleep) +{ + int csr5; + struct pcnet32_private *lp = netdev_priv(dev); + const struct pcnet32_access *a = lp->a; + ulong ioaddr = dev->base_addr; + int ticks; + + /* really old chips have to be stopped. */ + if (lp->chip_version < PCNET32_79C970A) + return 0; + + /* set SUSPEND (SPND) - CSR5 bit 0 */ + csr5 = a->read_csr(ioaddr, CSR5); + a->write_csr(ioaddr, CSR5, csr5 | CSR5_SUSPEND); + + /* poll waiting for bit to be set */ + ticks = 0; + while (!(a->read_csr(ioaddr, CSR5) & CSR5_SUSPEND)) { + spin_unlock_irqrestore(&lp->lock, *flags); + if (can_sleep) + msleep(1); + else + mdelay(1); + spin_lock_irqsave(&lp->lock, *flags); + ticks++; + if (ticks > 200) { + netif_printk(lp, hw, KERN_DEBUG, dev, + "Error getting into suspend!\n"); + return 0; + } + } + return 1; +} + +static void pcnet32_clr_suspend(struct pcnet32_private *lp, ulong ioaddr) +{ + int csr5 = lp->a->read_csr(ioaddr, CSR5); + /* clear SUSPEND (SPND) - CSR5 bit 0 */ + lp->a->write_csr(ioaddr, CSR5, csr5 & ~CSR5_SUSPEND); +} + static int pcnet32_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { @@ -684,12 +733,29 @@ static int pcnet32_get_link_ksettings(struct net_device *dev, unsigned long flags; int r = -EOPNOTSUPP; + spin_lock_irqsave(&lp->lock, flags); if (lp->mii) { - spin_lock_irqsave(&lp->lock, flags); mii_ethtool_get_link_ksettings(&lp->mii_if, cmd); - spin_unlock_irqrestore(&lp->lock, flags); + r = 0; + } else if (lp->chip_version == PCNET32_79C970A) { + if (lp->autoneg) { + cmd->base.autoneg = AUTONEG_ENABLE; + if (lp->a->read_bcr(dev->base_addr, 4) == 0xc0) + cmd->base.port = PORT_AUI; + else + cmd->base.port = PORT_TP; + } else { + cmd->base.autoneg = AUTONEG_DISABLE; + cmd->base.port = lp->port_tp ? PORT_TP : PORT_AUI; + } + cmd->base.duplex = lp->fdx ? DUPLEX_FULL : DUPLEX_HALF; + cmd->base.speed = SPEED_10; + ethtool_convert_legacy_u32_to_link_mode( + cmd->link_modes.supported, + SUPPORTED_TP | SUPPORTED_AUI); r = 0; } + spin_unlock_irqrestore(&lp->lock, flags); return r; } @@ -697,14 +763,46 @@ static int pcnet32_set_link_ksettings(struct net_device *dev, const struct ethtool_link_ksettings *cmd) { struct pcnet32_private *lp = netdev_priv(dev); + ulong ioaddr = dev->base_addr; unsigned long flags; int r = -EOPNOTSUPP; + int suspended, bcr2, bcr9, csr15; + spin_lock_irqsave(&lp->lock, flags); if (lp->mii) { - spin_lock_irqsave(&lp->lock, flags); r = mii_ethtool_set_link_ksettings(&lp->mii_if, cmd); - spin_unlock_irqrestore(&lp->lock, flags); + } else if (lp->chip_version == PCNET32_79C970A) { + suspended = pcnet32_suspend(dev, &flags, 0); + if (!suspended) + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); + + lp->autoneg = cmd->base.autoneg == AUTONEG_ENABLE; + bcr2 = lp->a->read_bcr(ioaddr, 2); + if (cmd->base.autoneg == AUTONEG_ENABLE) { + lp->a->write_bcr(ioaddr, 2, bcr2 | 0x0002); + } else { + lp->a->write_bcr(ioaddr, 2, bcr2 & ~0x0002); + + lp->port_tp = cmd->base.port == PORT_TP; + csr15 = lp->a->read_csr(ioaddr, CSR15) & ~0x0180; + if (cmd->base.port == PORT_TP) + csr15 |= 0x0080; + lp->a->write_csr(ioaddr, CSR15, csr15); + lp->init_block->mode = cpu_to_le16(csr15); + + lp->fdx = cmd->base.duplex == DUPLEX_FULL; + bcr9 = lp->a->read_bcr(ioaddr, 9) & ~0x0003; + if (cmd->base.duplex == DUPLEX_FULL) + bcr9 |= 0x0003; + lp->a->write_bcr(ioaddr, 9, bcr9); + } + if (suspended) + pcnet32_clr_suspend(lp, ioaddr); + else if (netif_running(dev)) + pcnet32_restart(dev, CSR0_NORMAL); + r = 0; } + spin_unlock_irqrestore(&lp->lock, flags); return r; } @@ -732,7 +830,14 @@ static u32 pcnet32_get_link(struct net_device *dev) spin_lock_irqsave(&lp->lock, flags); if (lp->mii) { r = mii_link_ok(&lp->mii_if); - } else if (lp->chip_version >= PCNET32_79C970A) { + } else if (lp->chip_version == PCNET32_79C970A) { + ulong ioaddr = dev->base_addr; /* card base I/O address */ + /* only read link if port is set to TP */ + if (!lp->autoneg && lp->port_tp) + r = (lp->a->read_bcr(ioaddr, 4) != 0xc0); + else /* link always up for AUI port or port auto select */ + r = 1; + } else if (lp->chip_version > PCNET32_79C970A) { ulong ioaddr = dev->base_addr; /* card base I/O address */ r = (lp->a->read_bcr(ioaddr, 4) != 0xc0); } else { /* can not detect link on really old chips */ @@ -1070,45 +1175,6 @@ static int pcnet32_set_phys_id(struct net_device *dev, } /* - * lp->lock must be held. - */ -static int pcnet32_suspend(struct net_device *dev, unsigned long *flags, - int can_sleep) -{ - int csr5; - struct pcnet32_private *lp = netdev_priv(dev); - const struct pcnet32_access *a = lp->a; - ulong ioaddr = dev->base_addr; - int ticks; - - /* really old chips have to be stopped. */ - if (lp->chip_version < PCNET32_79C970A) - return 0; - - /* set SUSPEND (SPND) - CSR5 bit 0 */ - csr5 = a->read_csr(ioaddr, CSR5); - a->write_csr(ioaddr, CSR5, csr5 | CSR5_SUSPEND); - - /* poll waiting for bit to be set */ - ticks = 0; - while (!(a->read_csr(ioaddr, CSR5) & CSR5_SUSPEND)) { - spin_unlock_irqrestore(&lp->lock, *flags); - if (can_sleep) - msleep(1); - else - mdelay(1); - spin_lock_irqsave(&lp->lock, *flags); - ticks++; - if (ticks > 200) { - netif_printk(lp, hw, KERN_DEBUG, dev, - "Error getting into suspend!\n"); - return 0; - } - } - return 1; -} - -/* * process one receive descriptor entry */ @@ -1350,13 +1416,8 @@ static int pcnet32_poll(struct napi_struct *napi, int budget) pcnet32_restart(dev, CSR0_START); netif_wake_queue(dev); } - spin_unlock_irqrestore(&lp->lock, flags); - - if (work_done < budget) { - spin_lock_irqsave(&lp->lock, flags); - - __napi_complete(napi); + if (work_done < budget && napi_complete_done(napi, work_done)) { /* clear interrupt masks */ val = lp->a->read_csr(ioaddr, CSR3); val &= 0x00ff; @@ -1364,9 +1425,9 @@ static int pcnet32_poll(struct napi_struct *napi, int budget) /* Set interrupt enable. */ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN); - - spin_unlock_irqrestore(&lp->lock, flags); } + + spin_unlock_irqrestore(&lp->lock, flags); return work_done; } @@ -1430,13 +1491,8 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs, } } - if (!(csr0 & CSR0_STOP)) { /* If not stopped */ - int csr5; - - /* clear SUSPEND (SPND) - CSR5 bit 0 */ - csr5 = a->read_csr(ioaddr, CSR5); - a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND)); - } + if (!(csr0 & CSR0_STOP)) /* If not stopped */ + pcnet32_clr_suspend(lp, ioaddr); spin_unlock_irqrestore(&lp->lock, flags); } @@ -1817,6 +1873,9 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) lp->options = PCNET32_PORT_ASEL; else lp->options = options_mapping[options[cards_found]]; + /* force default port to TP on 79C970A so link detection can work */ + if (lp->chip_version == PCNET32_79C970A) + lp->options = PCNET32_PORT_10BT; lp->mii_if.dev = dev; lp->mii_if.mdio_read = mdio_read; lp->mii_if.mdio_write = mdio_write; @@ -2068,6 +2127,10 @@ static int pcnet32_open(struct net_device *dev) (u32) (lp->rx_ring_dma_addr), (u32) (lp->init_dma_addr)); + lp->autoneg = !!(lp->options & PCNET32_PORT_ASEL); + lp->port_tp = !!(lp->options & PCNET32_PORT_10BT); + lp->fdx = !!(lp->options & PCNET32_PORT_FD); + /* set/reset autoselect bit */ val = lp->a->read_bcr(ioaddr, 2) & ~2; if (lp->options & PCNET32_PORT_ASEL) @@ -2680,10 +2743,7 @@ static void pcnet32_set_multicast_list(struct net_device *dev) } if (suspended) { - int csr5; - /* clear SUSPEND (SPND) - CSR5 bit 0 */ - csr5 = lp->a->read_csr(ioaddr, CSR5); - lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND)); + pcnet32_clr_suspend(lp, ioaddr); } else { lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); pcnet32_restart(dev, CSR0_NORMAL); @@ -2794,6 +2854,13 @@ static void pcnet32_check_media(struct net_device *dev, int verbose) if (lp->mii) { curr_link = mii_link_ok(&lp->mii_if); + } else if (lp->chip_version == PCNET32_79C970A) { + ulong ioaddr = dev->base_addr; /* card base I/O address */ + /* only read link if port is set to TP */ + if (!lp->autoneg && lp->port_tp) + curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0); + else /* link always up for AUI port or port auto select */ + curr_link = 1; } else { ulong ioaddr = dev->base_addr; /* card base I/O address */ curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index a7d16db5c4b2..937f37a5dcb2 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -1323,7 +1323,7 @@ static int xgbe_read_ext_mii_regs(struct xgbe_prv_data *pdata, int addr, static int xgbe_set_ext_mii_mode(struct xgbe_prv_data *pdata, unsigned int port, enum xgbe_mdio_mode mode) { - unsigned int reg_val = 0; + unsigned int reg_val = XGMAC_IOREAD(pdata, MAC_MDIOCL22R); switch (mode) { case XGBE_MDIO_MODE_CL22: diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 1c87cc204075..248f60d171a5 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -1131,12 +1131,12 @@ static void xgbe_stop(struct xgbe_prv_data *pdata) hw_if->disable_tx(pdata); hw_if->disable_rx(pdata); + phy_if->phy_stop(pdata); + xgbe_free_irqs(pdata); xgbe_napi_disable(pdata, 1); - phy_if->phy_stop(pdata); - hw_if->exit(pdata); channel = pdata->channel; @@ -1761,8 +1761,8 @@ static void xgbe_tx_timeout(struct net_device *netdev) schedule_work(&pdata->restart_work); } -static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev, - struct rtnl_link_stats64 *s) +static void xgbe_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *s) { struct xgbe_prv_data *pdata = netdev_priv(netdev); struct xgbe_mmc_stats *pstats = &pdata->mmc_stats; @@ -1788,8 +1788,6 @@ static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev, s->tx_dropped = netdev->stats.tx_dropped; DBGPR("<--%s\n", __func__); - - return s; } static int xgbe_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c index c2730f15bd8b..38392a520725 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c @@ -122,104 +122,40 @@ #include "xgbe.h" #include "xgbe-common.h" -static int xgbe_config_msi(struct xgbe_prv_data *pdata) +static int xgbe_config_multi_msi(struct xgbe_prv_data *pdata) { - unsigned int msi_count; + unsigned int vector_count; unsigned int i, j; int ret; - msi_count = XGBE_MSIX_BASE_COUNT; - msi_count += max(pdata->rx_ring_count, - pdata->tx_ring_count); - msi_count = roundup_pow_of_two(msi_count); + vector_count = XGBE_MSI_BASE_COUNT; + vector_count += max(pdata->rx_ring_count, + pdata->tx_ring_count); - ret = pci_enable_msi_exact(pdata->pcidev, msi_count); + ret = pci_alloc_irq_vectors(pdata->pcidev, XGBE_MSI_MIN_COUNT, + vector_count, PCI_IRQ_MSI | PCI_IRQ_MSIX); if (ret < 0) { - dev_info(pdata->dev, "MSI request for %u interrupts failed\n", - msi_count); - - ret = pci_enable_msi(pdata->pcidev); - if (ret < 0) { - dev_info(pdata->dev, "MSI enablement failed\n"); - return ret; - } - - msi_count = 1; - } - - pdata->irq_count = msi_count; - - pdata->dev_irq = pdata->pcidev->irq; - - if (msi_count > 1) { - pdata->ecc_irq = pdata->pcidev->irq + 1; - pdata->i2c_irq = pdata->pcidev->irq + 2; - pdata->an_irq = pdata->pcidev->irq + 3; - - for (i = XGBE_MSIX_BASE_COUNT, j = 0; - (i < msi_count) && (j < XGBE_MAX_DMA_CHANNELS); - i++, j++) - pdata->channel_irq[j] = pdata->pcidev->irq + i; - pdata->channel_irq_count = j; - - pdata->per_channel_irq = 1; - pdata->channel_irq_mode = XGBE_IRQ_MODE_LEVEL; - } else { - pdata->ecc_irq = pdata->pcidev->irq; - pdata->i2c_irq = pdata->pcidev->irq; - pdata->an_irq = pdata->pcidev->irq; - } - - if (netif_msg_probe(pdata)) - dev_dbg(pdata->dev, "MSI interrupts enabled\n"); - - return 0; -} - -static int xgbe_config_msix(struct xgbe_prv_data *pdata) -{ - unsigned int msix_count; - unsigned int i, j; - int ret; - - msix_count = XGBE_MSIX_BASE_COUNT; - msix_count += max(pdata->rx_ring_count, - pdata->tx_ring_count); - - pdata->msix_entries = devm_kcalloc(pdata->dev, msix_count, - sizeof(struct msix_entry), - GFP_KERNEL); - if (!pdata->msix_entries) - return -ENOMEM; - - for (i = 0; i < msix_count; i++) - pdata->msix_entries[i].entry = i; - - ret = pci_enable_msix_range(pdata->pcidev, pdata->msix_entries, - XGBE_MSIX_MIN_COUNT, msix_count); - if (ret < 0) { - dev_info(pdata->dev, "MSI-X enablement failed\n"); - devm_kfree(pdata->dev, pdata->msix_entries); - pdata->msix_entries = NULL; + dev_info(pdata->dev, "multi MSI/MSI-X enablement failed\n"); return ret; } pdata->irq_count = ret; - pdata->dev_irq = pdata->msix_entries[0].vector; - pdata->ecc_irq = pdata->msix_entries[1].vector; - pdata->i2c_irq = pdata->msix_entries[2].vector; - pdata->an_irq = pdata->msix_entries[3].vector; + pdata->dev_irq = pci_irq_vector(pdata->pcidev, 0); + pdata->ecc_irq = pci_irq_vector(pdata->pcidev, 1); + pdata->i2c_irq = pci_irq_vector(pdata->pcidev, 2); + pdata->an_irq = pci_irq_vector(pdata->pcidev, 3); - for (i = XGBE_MSIX_BASE_COUNT, j = 0; i < ret; i++, j++) - pdata->channel_irq[j] = pdata->msix_entries[i].vector; + for (i = XGBE_MSI_BASE_COUNT, j = 0; i < ret; i++, j++) + pdata->channel_irq[j] = pci_irq_vector(pdata->pcidev, i); pdata->channel_irq_count = j; pdata->per_channel_irq = 1; pdata->channel_irq_mode = XGBE_IRQ_MODE_LEVEL; if (netif_msg_probe(pdata)) - dev_dbg(pdata->dev, "MSI-X interrupts enabled\n"); + dev_dbg(pdata->dev, "multi %s interrupts enabled\n", + pdata->pcidev->msix_enabled ? "MSI-X" : "MSI"); return 0; } @@ -228,21 +164,28 @@ static int xgbe_config_irqs(struct xgbe_prv_data *pdata) { int ret; - ret = xgbe_config_msix(pdata); + ret = xgbe_config_multi_msi(pdata); if (!ret) goto out; - ret = xgbe_config_msi(pdata); - if (!ret) - goto out; + ret = pci_alloc_irq_vectors(pdata->pcidev, 1, 1, + PCI_IRQ_LEGACY | PCI_IRQ_MSI); + if (ret < 0) { + dev_info(pdata->dev, "single IRQ enablement failed\n"); + return ret; + } pdata->irq_count = 1; - pdata->irq_shared = 1; + pdata->channel_irq_count = 1; + + pdata->dev_irq = pci_irq_vector(pdata->pcidev, 0); + pdata->ecc_irq = pci_irq_vector(pdata->pcidev, 0); + pdata->i2c_irq = pci_irq_vector(pdata->pcidev, 0); + pdata->an_irq = pci_irq_vector(pdata->pcidev, 0); - pdata->dev_irq = pdata->pcidev->irq; - pdata->ecc_irq = pdata->pcidev->irq; - pdata->i2c_irq = pdata->pcidev->irq; - pdata->an_irq = pdata->pcidev->irq; + if (netif_msg_probe(pdata)) + dev_dbg(pdata->dev, "single %s interrupt enabled\n", + pdata->pcidev->msi_enabled ? "MSI" : "legacy"); out: if (netif_msg_probe(pdata)) { @@ -425,12 +368,15 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) /* Configure the netdev resource */ ret = xgbe_config_netdev(pdata); if (ret) - goto err_pci_enable; + goto err_irq_vectors; netdev_notice(pdata->netdev, "net device enabled\n"); return 0; +err_irq_vectors: + pci_free_irq_vectors(pdata->pcidev); + err_pci_enable: xgbe_free_pdata(pdata); @@ -446,6 +392,8 @@ static void xgbe_pci_remove(struct pci_dev *pdev) xgbe_deconfig_netdev(pdata); + pci_free_irq_vectors(pdata->pcidev); + xgbe_free_pdata(pdata); } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c index 9d8c953083b4..e707c49cc55a 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c @@ -716,6 +716,8 @@ static void xgbe_phy_sfp_phy_settings(struct xgbe_prv_data *pdata) pdata->phy.duplex = DUPLEX_UNKNOWN; pdata->phy.autoneg = AUTONEG_ENABLE; pdata->phy.advertising = pdata->phy.supported; + + return; } pdata->phy.advertising &= ~ADVERTISED_Autoneg; @@ -875,6 +877,16 @@ static int xgbe_phy_find_phy_device(struct xgbe_prv_data *pdata) !phy_data->sfp_phy_avail) return 0; + /* Set the proper MDIO mode for the PHY */ + ret = pdata->hw_if.set_ext_mii_mode(pdata, phy_data->mdio_addr, + phy_data->phydev_mode); + if (ret) { + netdev_err(pdata->netdev, + "mdio port/clause not compatible (%u/%u)\n", + phy_data->mdio_addr, phy_data->phydev_mode); + return ret; + } + /* Create and connect to the PHY device */ phydev = get_phy_device(phy_data->mii, phy_data->mdio_addr, (phy_data->phydev_mode == XGBE_MDIO_MODE_CL45)); @@ -2722,6 +2734,18 @@ static int xgbe_phy_start(struct xgbe_prv_data *pdata) if (ret) return ret; + /* Set the proper MDIO mode for the re-driver */ + if (phy_data->redrv && !phy_data->redrv_if) { + ret = pdata->hw_if.set_ext_mii_mode(pdata, phy_data->redrv_addr, + XGBE_MDIO_MODE_CL22); + if (ret) { + netdev_err(pdata->netdev, + "redriver mdio port not compatible (%u)\n", + phy_data->redrv_addr); + return ret; + } + } + /* Start in highest supported mode */ xgbe_phy_set_mode(pdata, phy_data->start_mode); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index 00108815b55e..f9a24639f574 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -211,9 +211,9 @@ #define XGBE_MAC_PROP_OFFSET 0x1d000 #define XGBE_I2C_CTRL_OFFSET 0x1e000 -/* PCI MSIx support */ -#define XGBE_MSIX_BASE_COUNT 4 -#define XGBE_MSIX_MIN_COUNT (XGBE_MSIX_BASE_COUNT + 1) +/* PCI MSI/MSIx support */ +#define XGBE_MSI_BASE_COUNT 4 +#define XGBE_MSI_MIN_COUNT (XGBE_MSI_BASE_COUNT + 1) /* PCI clock frequencies */ #define XGBE_V2_DMA_CLOCK_FREQ 500000000 /* 500 MHz */ @@ -982,14 +982,12 @@ struct xgbe_prv_data { unsigned int desc_ded_count; unsigned int desc_sec_count; - struct msix_entry *msix_entries; int dev_irq; int ecc_irq; int i2c_irq; int channel_irq[XGBE_MAX_DMA_CHANNELS]; unsigned int per_channel_irq; - unsigned int irq_shared; unsigned int irq_count; unsigned int channel_irq_count; unsigned int channel_irq_mode; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index 523b8eff6d7b..b3568c453b14 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -293,36 +293,29 @@ static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring, static int xgene_enet_setup_mss(struct net_device *ndev, u32 mss) { struct xgene_enet_pdata *pdata = netdev_priv(ndev); - bool mss_index_found = false; - int mss_index; + int mss_index = -EBUSY; int i; spin_lock(&pdata->mss_lock); /* Reuse the slot if MSS matches */ - for (i = 0; !mss_index_found && i < NUM_MSS_REG; i++) { + for (i = 0; mss_index < 0 && i < NUM_MSS_REG; i++) { if (pdata->mss[i] == mss) { pdata->mss_refcnt[i]++; mss_index = i; - mss_index_found = true; } } /* Overwrite the slot with ref_count = 0 */ - for (i = 0; !mss_index_found && i < NUM_MSS_REG; i++) { + for (i = 0; mss_index < 0 && i < NUM_MSS_REG; i++) { if (!pdata->mss_refcnt[i]) { pdata->mss_refcnt[i]++; pdata->mac_ops->set_mss(pdata, mss, i); pdata->mss[i] = mss; mss_index = i; - mss_index_found = true; } } - /* No slots with ref_count = 0 available, return busy */ - if (!mss_index_found) - mss_index = -EBUSY; - spin_unlock(&pdata->mss_lock); return mss_index; @@ -840,7 +833,7 @@ static int xgene_enet_napi(struct napi_struct *napi, const int budget) processed = xgene_enet_process_ring(ring, budget); if (processed != budget) { - napi_complete(napi); + napi_complete_done(napi, processed); enable_irq(ring->irq); } @@ -1453,7 +1446,7 @@ err: return ret; } -static struct rtnl_link_stats64 *xgene_enet_get_stats64( +static void xgene_enet_get_stats64( struct net_device *ndev, struct rtnl_link_stats64 *storage) { @@ -1462,7 +1455,6 @@ static struct rtnl_link_stats64 *xgene_enet_get_stats64( struct xgene_enet_desc_ring *ring; int i; - memset(stats, 0, sizeof(struct rtnl_link_stats64)); for (i = 0; i < pdata->txq_cnt; i++) { ring = pdata->tx_ring[i]; if (ring) { @@ -1484,8 +1476,6 @@ static struct rtnl_link_stats64 *xgene_enet_get_stats64( } } memcpy(storage, stats, sizeof(struct rtnl_link_stats64)); - - return storage; } static int xgene_enet_set_mac_address(struct net_device *ndev, void *addr) @@ -1759,6 +1749,12 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) pdata->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(pdata->clk)) { + /* Abort if the clock is defined but couldn't be retrived. + * Always abort if the clock is missing on DT system as + * the driver can't cope with this case. + */ + if (PTR_ERR(pdata->clk) != -ENOENT || dev->of_node) + return PTR_ERR(pdata->clk); /* Firmware may have set up the clock already. */ dev_info(dev, "clocks have been setup already\n"); } @@ -1967,6 +1963,30 @@ static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata) } } +#ifdef CONFIG_ACPI +static const struct acpi_device_id xgene_enet_acpi_match[] = { + { "APMC0D05", XGENE_ENET1}, + { "APMC0D30", XGENE_ENET1}, + { "APMC0D31", XGENE_ENET1}, + { "APMC0D3F", XGENE_ENET1}, + { "APMC0D26", XGENE_ENET2}, + { "APMC0D25", XGENE_ENET2}, + { } +}; +MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match); +#endif + +static const struct of_device_id xgene_enet_of_match[] = { + {.compatible = "apm,xgene-enet", .data = (void *)XGENE_ENET1}, + {.compatible = "apm,xgene1-sgenet", .data = (void *)XGENE_ENET1}, + {.compatible = "apm,xgene1-xgenet", .data = (void *)XGENE_ENET1}, + {.compatible = "apm,xgene2-sgenet", .data = (void *)XGENE_ENET2}, + {.compatible = "apm,xgene2-xgenet", .data = (void *)XGENE_ENET2}, + {}, +}; + +MODULE_DEVICE_TABLE(of, xgene_enet_of_match); + static int xgene_enet_probe(struct platform_device *pdev) { struct net_device *ndev; @@ -2113,32 +2133,6 @@ static void xgene_enet_shutdown(struct platform_device *pdev) xgene_enet_remove(pdev); } -#ifdef CONFIG_ACPI -static const struct acpi_device_id xgene_enet_acpi_match[] = { - { "APMC0D05", XGENE_ENET1}, - { "APMC0D30", XGENE_ENET1}, - { "APMC0D31", XGENE_ENET1}, - { "APMC0D3F", XGENE_ENET1}, - { "APMC0D26", XGENE_ENET2}, - { "APMC0D25", XGENE_ENET2}, - { } -}; -MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match); -#endif - -#ifdef CONFIG_OF -static const struct of_device_id xgene_enet_of_match[] = { - {.compatible = "apm,xgene-enet", .data = (void *)XGENE_ENET1}, - {.compatible = "apm,xgene1-sgenet", .data = (void *)XGENE_ENET1}, - {.compatible = "apm,xgene1-xgenet", .data = (void *)XGENE_ENET1}, - {.compatible = "apm,xgene2-sgenet", .data = (void *)XGENE_ENET2}, - {.compatible = "apm,xgene2-xgenet", .data = (void *)XGENE_ENET2}, - {}, -}; - -MODULE_DEVICE_TABLE(of, xgene_enet_of_match); -#endif - static struct platform_driver xgene_enet_driver = { .driver = { .name = "xgene-enet", diff --git a/drivers/net/ethernet/aquantia/Kconfig b/drivers/net/ethernet/aquantia/Kconfig new file mode 100644 index 000000000000..cdf78e069a39 --- /dev/null +++ b/drivers/net/ethernet/aquantia/Kconfig @@ -0,0 +1,24 @@ +# +# aQuantia device configuration +# + +config NET_VENDOR_AQUANTIA + bool "aQuantia devices" + default y + ---help--- + Set this to y if you have an Ethernet network cards that uses the aQuantia + AQC107/AQC108 chipset. + + This option does not build any drivers; it casues the aQuantia + drivers that can be built to appear in the list of Ethernet drivers. + + +if NET_VENDOR_AQUANTIA + +config AQTION + tristate "aQuantia AQtion(tm) Support" + depends on PCI && X86_64 + ---help--- + This enables the support for the aQuantia AQtion(tm) Ethernet card. + +endif # NET_VENDOR_AQUANTIA diff --git a/drivers/net/ethernet/aquantia/Makefile b/drivers/net/ethernet/aquantia/Makefile new file mode 100644 index 000000000000..4f4897b689b2 --- /dev/null +++ b/drivers/net/ethernet/aquantia/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the aQuantia device drivers. +# + +obj-$(CONFIG_AQTION) += atlantic/ diff --git a/drivers/net/ethernet/aquantia/atlantic/Makefile b/drivers/net/ethernet/aquantia/atlantic/Makefile new file mode 100644 index 000000000000..e4ae696920ef --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/Makefile @@ -0,0 +1,42 @@ +################################################################################ +# +# aQuantia Ethernet Controller AQtion Linux Driver +# Copyright(c) 2014-2017 aQuantia Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see <http://www.gnu.org/licenses/>. +# +# The full GNU General Public License is included in this distribution in +# the file called "COPYING". +# +# Contact Information: <rdc-drv@aquantia.com> +# aQuantia Corporation, 105 E. Tasman Dr. San Jose, CA 95134, USA +# +################################################################################ + +# +# Makefile for the AQtion(tm) Ethernet driver +# + +obj-$(CONFIG_AQTION) += atlantic.o + +atlantic-objs := aq_main.o \ + aq_nic.o \ + aq_pci_func.o \ + aq_vec.o \ + aq_ring.o \ + aq_hw_utils.o \ + aq_ethtool.o \ + hw_atl/hw_atl_a0.o \ + hw_atl/hw_atl_b0.o \ + hw_atl/hw_atl_utils.o \ + hw_atl/hw_atl_llh.o diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h new file mode 100644 index 000000000000..5f99237a9d52 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h @@ -0,0 +1,77 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_cfg.h: Definition of configuration parameters and constants. */ + +#ifndef AQ_CFG_H +#define AQ_CFG_H + +#define AQ_CFG_VECS_DEF 4U +#define AQ_CFG_TCS_DEF 1U + +#define AQ_CFG_TXDS_DEF 4096U +#define AQ_CFG_RXDS_DEF 1024U + +#define AQ_CFG_IS_POLLING_DEF 0U + +#define AQ_CFG_FORCE_LEGACY_INT 0U + +#define AQ_CFG_IS_INTERRUPT_MODERATION_DEF 1U +#define AQ_CFG_INTERRUPT_MODERATION_RATE_DEF 0xFFFFU +#define AQ_CFG_IRQ_MASK 0x1FFU + +#define AQ_CFG_VECS_MAX 8U +#define AQ_CFG_TCS_MAX 8U + +#define AQ_CFG_TX_FRAME_MAX (16U * 1024U) +#define AQ_CFG_RX_FRAME_MAX (4U * 1024U) + +/* LRO */ +#define AQ_CFG_IS_LRO_DEF 1U + +/* RSS */ +#define AQ_CFG_RSS_INDIRECTION_TABLE_MAX 128U +#define AQ_CFG_RSS_HASHKEY_SIZE 320U + +#define AQ_CFG_IS_RSS_DEF 1U +#define AQ_CFG_NUM_RSS_QUEUES_DEF AQ_CFG_VECS_DEF +#define AQ_CFG_RSS_BASE_CPU_NUM_DEF 0U + +#define AQ_CFG_PCI_FUNC_MSIX_IRQS 9U +#define AQ_CFG_PCI_FUNC_PORTS 2U + +#define AQ_CFG_SERVICE_TIMER_INTERVAL (2 * HZ) +#define AQ_CFG_POLLING_TIMER_INTERVAL ((unsigned int)(2 * HZ)) + +#define AQ_CFG_SKB_FRAGS_MAX 32U + +#define AQ_CFG_NAPI_WEIGHT 64U + +#define AQ_CFG_MULTICAST_ADDRESS_MAX 32U + +/*#define AQ_CFG_MAC_ADDR_PERMANENT {0x30, 0x0E, 0xE3, 0x12, 0x34, 0x56}*/ + +#define AQ_CFG_FC_MODE 3U + +#define AQ_CFG_SPEED_MSK 0xFFFFU /* 0xFFFFU==auto_neg */ + +#define AQ_CFG_IS_AUTONEG_DEF 1U +#define AQ_CFG_MTU_DEF 1514U + +#define AQ_CFG_LOCK_TRYS 100U + +#define AQ_CFG_DRV_AUTHOR "aQuantia" +#define AQ_CFG_DRV_DESC "aQuantia Corporation(R) Network Driver" +#define AQ_CFG_DRV_NAME "aquantia" +#define AQ_CFG_DRV_VERSION __stringify(NIC_MAJOR_DRIVER_VERSION)"."\ + __stringify(NIC_MINOR_DRIVER_VERSION)"."\ + __stringify(NIC_BUILD_DRIVER_VERSION)"."\ + __stringify(NIC_REVISION_DRIVER_VERSION) + +#endif /* AQ_CFG_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_common.h b/drivers/net/ethernet/aquantia/atlantic/aq_common.h new file mode 100644 index 000000000000..9eb5e222a234 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_common.h @@ -0,0 +1,23 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_common.h: Basic includes for all files in project. */ + +#ifndef AQ_COMMON_H +#define AQ_COMMON_H + +#include <linux/etherdevice.h> +#include <linux/pci.h> + +#include "ver.h" +#include "aq_nic.h" +#include "aq_cfg.h" +#include "aq_utils.h" + +#endif /* AQ_COMMON_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c new file mode 100644 index 000000000000..a761e91471df --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c @@ -0,0 +1,262 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_ethtool.c: Definition of ethertool related functions. */ + +#include "aq_ethtool.h" +#include "aq_nic.h" + +static void aq_ethtool_get_regs(struct net_device *ndev, + struct ethtool_regs *regs, void *p) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + u32 regs_count = aq_nic_get_regs_count(aq_nic); + + memset(p, 0, regs_count * sizeof(u32)); + aq_nic_get_regs(aq_nic, regs, p); +} + +static int aq_ethtool_get_regs_len(struct net_device *ndev) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + u32 regs_count = aq_nic_get_regs_count(aq_nic); + + return regs_count * sizeof(u32); +} + +static u32 aq_ethtool_get_link(struct net_device *ndev) +{ + return ethtool_op_get_link(ndev); +} + +static int aq_ethtool_get_link_ksettings(struct net_device *ndev, + struct ethtool_link_ksettings *cmd) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + + aq_nic_get_link_ksettings(aq_nic, cmd); + cmd->base.speed = netif_carrier_ok(ndev) ? + aq_nic_get_link_speed(aq_nic) : 0U; + + return 0; +} + +static int +aq_ethtool_set_link_ksettings(struct net_device *ndev, + const struct ethtool_link_ksettings *cmd) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + + return aq_nic_set_link_ksettings(aq_nic, cmd); +} + +/* there "5U" is number of queue[#] stats lines (InPackets+...+InErrors) */ +static const unsigned int aq_ethtool_stat_queue_lines = 5U; +static const unsigned int aq_ethtool_stat_queue_chars = + 5U * ETH_GSTRING_LEN; +static const char aq_ethtool_stat_names[][ETH_GSTRING_LEN] = { + "InPackets", + "InUCast", + "InMCast", + "InBCast", + "InErrors", + "OutPackets", + "OutUCast", + "OutMCast", + "OutBCast", + "InUCastOctects", + "OutUCastOctects", + "InMCastOctects", + "OutMCastOctects", + "InBCastOctects", + "OutBCastOctects", + "InOctects", + "OutOctects", + "InPacketsDma", + "OutPacketsDma", + "InOctetsDma", + "OutOctetsDma", + "InDroppedDma", + "Queue[0] InPackets", + "Queue[0] OutPackets", + "Queue[0] InJumboPackets", + "Queue[0] InLroPackets", + "Queue[0] InErrors", + "Queue[1] InPackets", + "Queue[1] OutPackets", + "Queue[1] InJumboPackets", + "Queue[1] InLroPackets", + "Queue[1] InErrors", + "Queue[2] InPackets", + "Queue[2] OutPackets", + "Queue[2] InJumboPackets", + "Queue[2] InLroPackets", + "Queue[2] InErrors", + "Queue[3] InPackets", + "Queue[3] OutPackets", + "Queue[3] InJumboPackets", + "Queue[3] InLroPackets", + "Queue[3] InErrors", + "Queue[4] InPackets", + "Queue[4] OutPackets", + "Queue[4] InJumboPackets", + "Queue[4] InLroPackets", + "Queue[4] InErrors", + "Queue[5] InPackets", + "Queue[5] OutPackets", + "Queue[5] InJumboPackets", + "Queue[5] InLroPackets", + "Queue[5] InErrors", + "Queue[6] InPackets", + "Queue[6] OutPackets", + "Queue[6] InJumboPackets", + "Queue[6] InLroPackets", + "Queue[6] InErrors", + "Queue[7] InPackets", + "Queue[7] OutPackets", + "Queue[7] InJumboPackets", + "Queue[7] InLroPackets", + "Queue[7] InErrors", +}; + +static void aq_ethtool_stats(struct net_device *ndev, + struct ethtool_stats *stats, u64 *data) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + +/* ASSERT: Need add lines to aq_ethtool_stat_names if AQ_CFG_VECS_MAX > 8 */ + BUILD_BUG_ON(AQ_CFG_VECS_MAX > 8); + memset(data, 0, ARRAY_SIZE(aq_ethtool_stat_names) * sizeof(u64)); + aq_nic_get_stats(aq_nic, data); +} + +static void aq_ethtool_get_drvinfo(struct net_device *ndev, + struct ethtool_drvinfo *drvinfo) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic); + struct pci_dev *pdev = to_pci_dev(ndev->dev.parent); + u32 firmware_version = aq_nic_get_fw_version(aq_nic); + u32 regs_count = aq_nic_get_regs_count(aq_nic); + + strlcat(drvinfo->driver, AQ_CFG_DRV_NAME, sizeof(drvinfo->driver)); + strlcat(drvinfo->version, AQ_CFG_DRV_VERSION, sizeof(drvinfo->version)); + + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), + "%u.%u.%u", firmware_version >> 24, + (firmware_version >> 16) & 0xFFU, firmware_version & 0xFFFFU); + + strlcpy(drvinfo->bus_info, pdev ? pci_name(pdev) : "", + sizeof(drvinfo->bus_info)); + drvinfo->n_stats = ARRAY_SIZE(aq_ethtool_stat_names) - + (AQ_CFG_VECS_MAX - cfg->vecs) * aq_ethtool_stat_queue_lines; + drvinfo->testinfo_len = 0; + drvinfo->regdump_len = regs_count; + drvinfo->eedump_len = 0; +} + +static void aq_ethtool_get_strings(struct net_device *ndev, + u32 stringset, u8 *data) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic); + + if (stringset == ETH_SS_STATS) + memcpy(data, *aq_ethtool_stat_names, + sizeof(aq_ethtool_stat_names) - + (AQ_CFG_VECS_MAX - cfg->vecs) * + aq_ethtool_stat_queue_chars); +} + +static int aq_ethtool_get_sset_count(struct net_device *ndev, int stringset) +{ + int ret = 0; + struct aq_nic_s *aq_nic = netdev_priv(ndev); + struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic); + + switch (stringset) { + case ETH_SS_STATS: + ret = ARRAY_SIZE(aq_ethtool_stat_names) - + (AQ_CFG_VECS_MAX - cfg->vecs) * + aq_ethtool_stat_queue_lines; + break; + default: + ret = -EOPNOTSUPP; + } + return ret; +} + +static u32 aq_ethtool_get_rss_indir_size(struct net_device *ndev) +{ + return AQ_CFG_RSS_INDIRECTION_TABLE_MAX; +} + +static u32 aq_ethtool_get_rss_key_size(struct net_device *ndev) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic); + + return sizeof(cfg->aq_rss.hash_secret_key); +} + +static int aq_ethtool_get_rss(struct net_device *ndev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic); + unsigned int i = 0U; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */ + if (indir) { + for (i = 0; i < AQ_CFG_RSS_INDIRECTION_TABLE_MAX; i++) + indir[i] = cfg->aq_rss.indirection_table[i]; + } + if (key) + memcpy(key, cfg->aq_rss.hash_secret_key, + sizeof(cfg->aq_rss.hash_secret_key)); + return 0; +} + +static int aq_ethtool_get_rxnfc(struct net_device *ndev, + struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic); + int err = 0; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = cfg->vecs; + break; + + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +const struct ethtool_ops aq_ethtool_ops = { + .get_link = aq_ethtool_get_link, + .get_regs_len = aq_ethtool_get_regs_len, + .get_regs = aq_ethtool_get_regs, + .get_drvinfo = aq_ethtool_get_drvinfo, + .get_strings = aq_ethtool_get_strings, + .get_rxfh_indir_size = aq_ethtool_get_rss_indir_size, + .get_rxfh_key_size = aq_ethtool_get_rss_key_size, + .get_rxfh = aq_ethtool_get_rss, + .get_rxnfc = aq_ethtool_get_rxnfc, + .get_sset_count = aq_ethtool_get_sset_count, + .get_ethtool_stats = aq_ethtool_stats, + .get_link_ksettings = aq_ethtool_get_link_ksettings, + .set_link_ksettings = aq_ethtool_set_link_ksettings, +}; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h new file mode 100644 index 000000000000..21c126eeb5eb --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h @@ -0,0 +1,19 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_ethtool.h: Declaration of ethertool related functions. */ + +#ifndef AQ_ETHTOOL_H +#define AQ_ETHTOOL_H + +#include "aq_common.h" + +extern const struct ethtool_ops aq_ethtool_ops; + +#endif /* AQ_ETHTOOL_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h new file mode 100644 index 000000000000..fce0fd3f23ff --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h @@ -0,0 +1,177 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_hw.h: Declaraion of abstract interface for NIC hardware specific + * functions. + */ + +#ifndef AQ_HW_H +#define AQ_HW_H + +#include "aq_common.h" + +/* NIC H/W capabilities */ +struct aq_hw_caps_s { + u64 hw_features; + u64 link_speed_msk; + unsigned int hw_priv_flags; + u32 rxds; + u32 txds; + u32 txhwb_alignment; + u32 irq_mask; + u32 vecs; + u32 mtu; + u32 mac_regs_count; + u8 ports; + u8 msix_irqs; + u8 tcs; + u8 rxd_alignment; + u8 rxd_size; + u8 txd_alignment; + u8 txd_size; + u8 tx_rings; + u8 rx_rings; + bool flow_control; + bool is_64_dma; + u32 fw_ver_expected; +}; + +struct aq_hw_link_status_s { + unsigned int mbps; +}; + +#define AQ_HW_IRQ_INVALID 0U +#define AQ_HW_IRQ_LEGACY 1U +#define AQ_HW_IRQ_MSI 2U +#define AQ_HW_IRQ_MSIX 3U + +#define AQ_HW_POWER_STATE_D0 0U +#define AQ_HW_POWER_STATE_D3 3U + +#define AQ_HW_FLAG_STARTED 0x00000004U +#define AQ_HW_FLAG_STOPPING 0x00000008U +#define AQ_HW_FLAG_RESETTING 0x00000010U +#define AQ_HW_FLAG_CLOSING 0x00000020U +#define AQ_HW_LINK_DOWN 0x04000000U +#define AQ_HW_FLAG_ERR_UNPLUG 0x40000000U +#define AQ_HW_FLAG_ERR_HW 0x80000000U + +#define AQ_HW_FLAG_ERRORS (AQ_HW_FLAG_ERR_HW | AQ_HW_FLAG_ERR_UNPLUG) + +struct aq_hw_s { + struct aq_obj_s header; + struct aq_nic_cfg_s *aq_nic_cfg; + struct aq_pci_func_s *aq_pci_func; + void __iomem *mmio; + unsigned int not_ff_addr; + struct aq_hw_link_status_s aq_link_status; +}; + +struct aq_ring_s; +struct aq_ring_param_s; +struct aq_nic_cfg_s; +struct sk_buff; + +struct aq_hw_ops { + struct aq_hw_s *(*create)(struct aq_pci_func_s *aq_pci_func, + unsigned int port, struct aq_hw_ops *ops); + + void (*destroy)(struct aq_hw_s *self); + + int (*get_hw_caps)(struct aq_hw_s *self, + struct aq_hw_caps_s *aq_hw_caps); + + int (*hw_ring_tx_xmit)(struct aq_hw_s *self, struct aq_ring_s *aq_ring, + unsigned int frags); + + int (*hw_ring_rx_receive)(struct aq_hw_s *self, + struct aq_ring_s *aq_ring); + + int (*hw_ring_rx_fill)(struct aq_hw_s *self, struct aq_ring_s *aq_ring, + unsigned int sw_tail_old); + + int (*hw_ring_tx_head_update)(struct aq_hw_s *self, + struct aq_ring_s *aq_ring); + + int (*hw_get_mac_permanent)(struct aq_hw_s *self, + struct aq_hw_caps_s *aq_hw_caps, + u8 *mac); + + int (*hw_set_mac_address)(struct aq_hw_s *self, u8 *mac_addr); + + int (*hw_get_link_status)(struct aq_hw_s *self, + struct aq_hw_link_status_s *link_status); + + int (*hw_set_link_speed)(struct aq_hw_s *self, u32 speed); + + int (*hw_reset)(struct aq_hw_s *self); + + int (*hw_init)(struct aq_hw_s *self, struct aq_nic_cfg_s *aq_nic_cfg, + u8 *mac_addr); + + int (*hw_start)(struct aq_hw_s *self); + + int (*hw_stop)(struct aq_hw_s *self); + + int (*hw_ring_tx_init)(struct aq_hw_s *self, struct aq_ring_s *aq_ring, + struct aq_ring_param_s *aq_ring_param); + + int (*hw_ring_tx_start)(struct aq_hw_s *self, + struct aq_ring_s *aq_ring); + + int (*hw_ring_tx_stop)(struct aq_hw_s *self, + struct aq_ring_s *aq_ring); + + int (*hw_ring_rx_init)(struct aq_hw_s *self, + struct aq_ring_s *aq_ring, + struct aq_ring_param_s *aq_ring_param); + + int (*hw_ring_rx_start)(struct aq_hw_s *self, + struct aq_ring_s *aq_ring); + + int (*hw_ring_rx_stop)(struct aq_hw_s *self, + struct aq_ring_s *aq_ring); + + int (*hw_irq_enable)(struct aq_hw_s *self, u64 mask); + + int (*hw_irq_disable)(struct aq_hw_s *self, u64 mask); + + int (*hw_irq_read)(struct aq_hw_s *self, u64 *mask); + + int (*hw_packet_filter_set)(struct aq_hw_s *self, + unsigned int packet_filter); + + int (*hw_multicast_list_set)(struct aq_hw_s *self, + u8 ar_mac[AQ_CFG_MULTICAST_ADDRESS_MAX] + [ETH_ALEN], + u32 count); + + int (*hw_interrupt_moderation_set)(struct aq_hw_s *self, + bool itr_enabled); + + int (*hw_rss_set)(struct aq_hw_s *self, + struct aq_rss_parameters *rss_params); + + int (*hw_rss_hash_set)(struct aq_hw_s *self, + struct aq_rss_parameters *rss_params); + + int (*hw_get_regs)(struct aq_hw_s *self, + struct aq_hw_caps_s *aq_hw_caps, u32 *regs_buff); + + int (*hw_get_hw_stats)(struct aq_hw_s *self, u64 *data, + unsigned int *p_count); + + int (*hw_get_fw_version)(struct aq_hw_s *self, u32 *fw_version); + + int (*hw_deinit)(struct aq_hw_s *self); + + int (*hw_set_power)(struct aq_hw_s *self, unsigned int power_state); +}; + +#endif /* AQ_HW_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c new file mode 100644 index 000000000000..5f13465995f6 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c @@ -0,0 +1,68 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_hw_utils.c: Definitions of helper functions used across + * hardware layer. + */ + +#include "aq_hw_utils.h" +#include "aq_hw.h" + +void aq_hw_write_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk, + u32 shift, u32 val) +{ + if (msk ^ ~0) { + u32 reg_old, reg_new; + + reg_old = aq_hw_read_reg(aq_hw, addr); + reg_new = (reg_old & (~msk)) | (val << shift); + + if (reg_old != reg_new) + aq_hw_write_reg(aq_hw, addr, reg_new); + } else { + aq_hw_write_reg(aq_hw, addr, val); + } +} + +u32 aq_hw_read_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk, u32 shift) +{ + return ((aq_hw_read_reg(aq_hw, addr) & msk) >> shift); +} + +u32 aq_hw_read_reg(struct aq_hw_s *hw, u32 reg) +{ + u32 value = readl(hw->mmio + reg); + + if ((~0U) == value && (~0U) == readl(hw->mmio + hw->not_ff_addr)) + aq_utils_obj_set(&hw->header.flags, AQ_HW_FLAG_ERR_UNPLUG); + + return value; +} + +void aq_hw_write_reg(struct aq_hw_s *hw, u32 reg, u32 value) +{ + writel(value, hw->mmio + reg); +} + +int aq_hw_err_from_flags(struct aq_hw_s *hw) +{ + int err = 0; + + if (aq_utils_obj_test(&hw->header.flags, AQ_HW_FLAG_ERR_UNPLUG)) { + err = -ENXIO; + goto err_exit; + } + if (aq_utils_obj_test(&hw->header.flags, AQ_HW_FLAG_ERR_HW)) { + err = -EIO; + goto err_exit; + } + +err_exit: + return err; +} diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h new file mode 100644 index 000000000000..03b72ddbffb9 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h @@ -0,0 +1,47 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_hw_utils.h: Declaration of helper functions used across hardware + * layer. + */ + +#ifndef AQ_HW_UTILS_H +#define AQ_HW_UTILS_H + +#include "aq_common.h" + +#ifndef HIDWORD +#define LODWORD(_qw) ((u32)(_qw)) +#define HIDWORD(_qw) ((u32)(((_qw) >> 32) & 0xffffffff)) +#endif + +#define AQ_HW_SLEEP(_US_) mdelay(_US_) + +#define AQ_HW_WAIT_FOR(_B_, _US_, _N_) \ +do { \ + unsigned int AQ_HW_WAIT_FOR_i; \ + for (AQ_HW_WAIT_FOR_i = _N_; (!(_B_)) && (AQ_HW_WAIT_FOR_i);\ + --AQ_HW_WAIT_FOR_i) {\ + udelay(_US_); \ + } \ + if (!AQ_HW_WAIT_FOR_i) {\ + err = -ETIME; \ + } \ +} while (0) + +struct aq_hw_s; + +void aq_hw_write_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk, + u32 shift, u32 val); +u32 aq_hw_read_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk, u32 shift); +u32 aq_hw_read_reg(struct aq_hw_s *hw, u32 reg); +void aq_hw_write_reg(struct aq_hw_s *hw, u32 reg, u32 value); +int aq_hw_err_from_flags(struct aq_hw_s *hw); + +#endif /* AQ_HW_UTILS_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c new file mode 100644 index 000000000000..dad63623be6a --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c @@ -0,0 +1,239 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_main.c: Main file for aQuantia Linux driver. */ + +#include "aq_main.h" +#include "aq_nic.h" +#include "aq_pci_func.h" +#include "aq_ethtool.h" +#include "hw_atl/hw_atl_a0.h" +#include "hw_atl/hw_atl_b0.h" + +#include <linux/netdevice.h> +#include <linux/module.h> + +static const struct pci_device_id aq_pci_tbl[] = { + { PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_0001), }, + { PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D100), }, + { PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D107), }, + { PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D108), }, + { PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D109), }, + {} +}; + +MODULE_DEVICE_TABLE(pci, aq_pci_tbl); + +MODULE_LICENSE("GPL v2"); +MODULE_VERSION(AQ_CFG_DRV_VERSION); +MODULE_AUTHOR(AQ_CFG_DRV_AUTHOR); +MODULE_DESCRIPTION(AQ_CFG_DRV_DESC); + +static struct aq_hw_ops *aq_pci_probe_get_hw_ops_by_id(struct pci_dev *pdev) +{ + struct aq_hw_ops *ops = NULL; + + ops = hw_atl_a0_get_ops_by_id(pdev); + if (!ops) + ops = hw_atl_b0_get_ops_by_id(pdev); + + return ops; +} + +static int aq_ndev_open(struct net_device *ndev) +{ + struct aq_nic_s *aq_nic = NULL; + int err = 0; + + aq_nic = aq_nic_alloc_hot(ndev); + if (!aq_nic) { + err = -ENOMEM; + goto err_exit; + } + err = aq_nic_init(aq_nic); + if (err < 0) + goto err_exit; + err = aq_nic_start(aq_nic); + if (err < 0) + goto err_exit; + +err_exit: + if (err < 0) + aq_nic_deinit(aq_nic); + return err; +} + +static int aq_ndev_close(struct net_device *ndev) +{ + int err = 0; + struct aq_nic_s *aq_nic = netdev_priv(ndev); + + err = aq_nic_stop(aq_nic); + if (err < 0) + goto err_exit; + aq_nic_deinit(aq_nic); + aq_nic_free_hot_resources(aq_nic); + +err_exit: + return err; +} + +static int aq_ndev_start_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + + return aq_nic_xmit(aq_nic, skb); +} + +static int aq_ndev_change_mtu(struct net_device *ndev, int new_mtu) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + int err = aq_nic_set_mtu(aq_nic, new_mtu + ETH_HLEN); + + if (err < 0) + goto err_exit; + + if (netif_running(ndev)) { + aq_ndev_close(ndev); + aq_ndev_open(ndev); + } + +err_exit: + return err; +} + +static int aq_ndev_set_features(struct net_device *ndev, + netdev_features_t features) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + struct aq_nic_cfg_s *aq_cfg = aq_nic_get_cfg(aq_nic); + bool is_lro = false; + + if (aq_cfg->hw_features & NETIF_F_LRO) { + is_lro = features & NETIF_F_LRO; + + if (aq_cfg->is_lro != is_lro) { + aq_cfg->is_lro = is_lro; + + if (netif_running(ndev)) { + aq_ndev_close(ndev); + aq_ndev_open(ndev); + } + } + } + + return 0; +} + +static int aq_ndev_set_mac_address(struct net_device *ndev, void *addr) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + int err = 0; + + err = eth_mac_addr(ndev, addr); + if (err < 0) + goto err_exit; + err = aq_nic_set_mac(aq_nic, ndev); + if (err < 0) + goto err_exit; + +err_exit: + return err; +} + +static void aq_ndev_set_multicast_settings(struct net_device *ndev) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + int err = 0; + + err = aq_nic_set_packet_filter(aq_nic, ndev->flags); + if (err < 0) + goto err_exit; + + if (netdev_mc_count(ndev)) { + err = aq_nic_set_multicast_list(aq_nic, ndev); + if (err < 0) + goto err_exit; + } + +err_exit:; +} + +static const struct net_device_ops aq_ndev_ops = { + .ndo_open = aq_ndev_open, + .ndo_stop = aq_ndev_close, + .ndo_start_xmit = aq_ndev_start_xmit, + .ndo_set_rx_mode = aq_ndev_set_multicast_settings, + .ndo_change_mtu = aq_ndev_change_mtu, + .ndo_set_mac_address = aq_ndev_set_mac_address, + .ndo_set_features = aq_ndev_set_features +}; + +static int aq_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *pci_id) +{ + struct aq_hw_ops *aq_hw_ops = NULL; + struct aq_pci_func_s *aq_pci_func = NULL; + int err = 0; + + err = pci_enable_device(pdev); + if (err < 0) + goto err_exit; + aq_hw_ops = aq_pci_probe_get_hw_ops_by_id(pdev); + aq_pci_func = aq_pci_func_alloc(aq_hw_ops, pdev, + &aq_ndev_ops, &aq_ethtool_ops); + if (!aq_pci_func) { + err = -ENOMEM; + goto err_exit; + } + err = aq_pci_func_init(aq_pci_func); + if (err < 0) + goto err_exit; + +err_exit: + if (err < 0) { + if (aq_pci_func) + aq_pci_func_free(aq_pci_func); + } + return err; +} + +static void aq_pci_remove(struct pci_dev *pdev) +{ + struct aq_pci_func_s *aq_pci_func = pci_get_drvdata(pdev); + + aq_pci_func_deinit(aq_pci_func); + aq_pci_func_free(aq_pci_func); +} + +static int aq_pci_suspend(struct pci_dev *pdev, pm_message_t pm_msg) +{ + struct aq_pci_func_s *aq_pci_func = pci_get_drvdata(pdev); + + return aq_pci_func_change_pm_state(aq_pci_func, &pm_msg); +} + +static int aq_pci_resume(struct pci_dev *pdev) +{ + struct aq_pci_func_s *aq_pci_func = pci_get_drvdata(pdev); + pm_message_t pm_msg = PMSG_RESTORE; + + return aq_pci_func_change_pm_state(aq_pci_func, &pm_msg); +} + +static struct pci_driver aq_pci_ops = { + .name = AQ_CFG_DRV_NAME, + .id_table = aq_pci_tbl, + .probe = aq_pci_probe, + .remove = aq_pci_remove, + .suspend = aq_pci_suspend, + .resume = aq_pci_resume, +}; + +module_pci_driver(aq_pci_ops); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.h b/drivers/net/ethernet/aquantia/atlantic/aq_main.h new file mode 100644 index 000000000000..9748e7e575e0 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.h @@ -0,0 +1,17 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_main.h: Main file for aQuantia Linux driver. */ + +#ifndef AQ_MAIN_H +#define AQ_MAIN_H + +#include "aq_common.h" + +#endif /* AQ_MAIN_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c new file mode 100644 index 000000000000..ee78444bfb88 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -0,0 +1,990 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_nic.c: Definition of common code for NIC. */ + +#include "aq_nic.h" +#include "aq_ring.h" +#include "aq_vec.h" +#include "aq_hw.h" +#include "aq_pci_func.h" +#include "aq_nic_internal.h" + +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/timer.h> +#include <linux/cpu.h> +#include <linux/ip.h> +#include <linux/tcp.h> +#include <net/ip.h> + +static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues) +{ + struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg; + struct aq_rss_parameters *rss_params = &cfg->aq_rss; + int i = 0; + + static u8 rss_key[40] = { + 0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d, + 0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18, + 0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8, + 0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70, + 0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c + }; + + rss_params->hash_secret_key_size = sizeof(rss_key); + memcpy(rss_params->hash_secret_key, rss_key, sizeof(rss_key)); + rss_params->indirection_table_size = AQ_CFG_RSS_INDIRECTION_TABLE_MAX; + + for (i = rss_params->indirection_table_size; i--;) + rss_params->indirection_table[i] = i & (num_rss_queues - 1); +} + +/* Fills aq_nic_cfg with valid defaults */ +static void aq_nic_cfg_init_defaults(struct aq_nic_s *self) +{ + struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg; + + cfg->aq_hw_caps = &self->aq_hw_caps; + + cfg->vecs = AQ_CFG_VECS_DEF; + cfg->tcs = AQ_CFG_TCS_DEF; + + cfg->rxds = AQ_CFG_RXDS_DEF; + cfg->txds = AQ_CFG_TXDS_DEF; + + cfg->is_polling = AQ_CFG_IS_POLLING_DEF; + + cfg->is_interrupt_moderation = AQ_CFG_IS_INTERRUPT_MODERATION_DEF; + cfg->itr = cfg->is_interrupt_moderation ? + AQ_CFG_INTERRUPT_MODERATION_RATE_DEF : 0U; + + cfg->is_rss = AQ_CFG_IS_RSS_DEF; + cfg->num_rss_queues = AQ_CFG_NUM_RSS_QUEUES_DEF; + cfg->aq_rss.base_cpu_number = AQ_CFG_RSS_BASE_CPU_NUM_DEF; + cfg->flow_control = AQ_CFG_FC_MODE; + + cfg->mtu = AQ_CFG_MTU_DEF; + cfg->link_speed_msk = AQ_CFG_SPEED_MSK; + cfg->is_autoneg = AQ_CFG_IS_AUTONEG_DEF; + + cfg->is_lro = AQ_CFG_IS_LRO_DEF; + + cfg->vlan_id = 0U; + + aq_nic_rss_init(self, cfg->num_rss_queues); +} + +/* Checks hw_caps and 'corrects' aq_nic_cfg in runtime */ +int aq_nic_cfg_start(struct aq_nic_s *self) +{ + struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg; + + /*descriptors */ + cfg->rxds = min(cfg->rxds, cfg->aq_hw_caps->rxds); + cfg->txds = min(cfg->txds, cfg->aq_hw_caps->txds); + + /*rss rings */ + cfg->vecs = min(cfg->vecs, cfg->aq_hw_caps->vecs); + cfg->vecs = min(cfg->vecs, num_online_cpus()); + /* cfg->vecs should be power of 2 for RSS */ + if (cfg->vecs >= 8U) + cfg->vecs = 8U; + else if (cfg->vecs >= 4U) + cfg->vecs = 4U; + else if (cfg->vecs >= 2U) + cfg->vecs = 2U; + else + cfg->vecs = 1U; + + cfg->irq_type = aq_pci_func_get_irq_type(self->aq_pci_func); + + if ((cfg->irq_type == AQ_HW_IRQ_LEGACY) || + (self->aq_hw_caps.vecs == 1U) || + (cfg->vecs == 1U)) { + cfg->is_rss = 0U; + cfg->vecs = 1U; + } + + cfg->link_speed_msk &= self->aq_hw_caps.link_speed_msk; + cfg->hw_features = self->aq_hw_caps.hw_features; + return 0; +} + +static void aq_nic_service_timer_cb(unsigned long param) +{ + struct aq_nic_s *self = (struct aq_nic_s *)param; + struct net_device *ndev = aq_nic_get_ndev(self); + int err = 0; + unsigned int i = 0U; + struct aq_hw_link_status_s link_status; + struct aq_ring_stats_rx_s stats_rx; + struct aq_ring_stats_tx_s stats_tx; + + if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY)) + goto err_exit; + + err = self->aq_hw_ops.hw_get_link_status(self->aq_hw, &link_status); + if (err < 0) + goto err_exit; + + self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw, + self->aq_nic_cfg.is_interrupt_moderation); + + if (memcmp(&link_status, &self->link_status, sizeof(link_status))) { + if (link_status.mbps) { + aq_utils_obj_set(&self->header.flags, + AQ_NIC_FLAG_STARTED); + aq_utils_obj_clear(&self->header.flags, + AQ_NIC_LINK_DOWN); + netif_carrier_on(self->ndev); + } else { + netif_carrier_off(self->ndev); + aq_utils_obj_set(&self->header.flags, AQ_NIC_LINK_DOWN); + } + + self->link_status = link_status; + } + + memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s)); + memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s)); + for (i = AQ_DIMOF(self->aq_vec); i--;) { + if (self->aq_vec[i]) + aq_vec_add_stats(self->aq_vec[i], &stats_rx, &stats_tx); + } + + ndev->stats.rx_packets = stats_rx.packets; + ndev->stats.rx_bytes = stats_rx.bytes; + ndev->stats.rx_errors = stats_rx.errors; + ndev->stats.tx_packets = stats_tx.packets; + ndev->stats.tx_bytes = stats_tx.bytes; + ndev->stats.tx_errors = stats_tx.errors; + +err_exit: + mod_timer(&self->service_timer, + jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL); +} + +static void aq_nic_polling_timer_cb(unsigned long param) +{ + struct aq_nic_s *self = (struct aq_nic_s *)param; + struct aq_vec_s *aq_vec = NULL; + unsigned int i = 0U; + + for (i = 0U, aq_vec = self->aq_vec[0]; + self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) + aq_vec_isr(i, (void *)aq_vec); + + mod_timer(&self->polling_timer, jiffies + + AQ_CFG_POLLING_TIMER_INTERVAL); +} + +static struct net_device *aq_nic_ndev_alloc(void) +{ + return alloc_etherdev_mq(sizeof(struct aq_nic_s), AQ_CFG_VECS_MAX); +} + +struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops, + const struct ethtool_ops *et_ops, + struct device *dev, + struct aq_pci_func_s *aq_pci_func, + unsigned int port, + const struct aq_hw_ops *aq_hw_ops) +{ + struct net_device *ndev = NULL; + struct aq_nic_s *self = NULL; + int err = 0; + + ndev = aq_nic_ndev_alloc(); + if (!ndev) { + err = -ENOMEM; + goto err_exit; + } + + self = netdev_priv(ndev); + + ndev->netdev_ops = ndev_ops; + ndev->ethtool_ops = et_ops; + + SET_NETDEV_DEV(ndev, dev); + + ndev->if_port = port; + ndev->min_mtu = ETH_MIN_MTU; + self->ndev = ndev; + + self->aq_pci_func = aq_pci_func; + + self->aq_hw_ops = *aq_hw_ops; + self->port = (u8)port; + + self->aq_hw = self->aq_hw_ops.create(aq_pci_func, self->port, + &self->aq_hw_ops); + err = self->aq_hw_ops.get_hw_caps(self->aq_hw, &self->aq_hw_caps); + if (err < 0) + goto err_exit; + + aq_nic_cfg_init_defaults(self); + +err_exit: + if (err < 0) { + aq_nic_free_hot_resources(self); + self = NULL; + } + return self; +} + +int aq_nic_ndev_register(struct aq_nic_s *self) +{ + int err = 0; + unsigned int i = 0U; + + if (!self->ndev) { + err = -EINVAL; + goto err_exit; + } + err = self->aq_hw_ops.hw_get_mac_permanent(self->aq_hw, + self->aq_nic_cfg.aq_hw_caps, + self->ndev->dev_addr); + if (err < 0) + goto err_exit; + +#if defined(AQ_CFG_MAC_ADDR_PERMANENT) + { + static u8 mac_addr_permanent[] = AQ_CFG_MAC_ADDR_PERMANENT; + + ether_addr_copy(self->ndev->dev_addr, mac_addr_permanent); + } +#endif + + netif_carrier_off(self->ndev); + + for (i = AQ_CFG_VECS_MAX; i--;) + aq_nic_ndev_queue_stop(self, i); + + err = register_netdev(self->ndev); + if (err < 0) + goto err_exit; + +err_exit: + return err; +} + +int aq_nic_ndev_init(struct aq_nic_s *self) +{ + struct aq_hw_caps_s *aq_hw_caps = self->aq_nic_cfg.aq_hw_caps; + struct aq_nic_cfg_s *aq_nic_cfg = &self->aq_nic_cfg; + + self->ndev->hw_features |= aq_hw_caps->hw_features; + self->ndev->features = aq_hw_caps->hw_features; + self->ndev->priv_flags = aq_hw_caps->hw_priv_flags; + self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN; + + return 0; +} + +void aq_nic_ndev_free(struct aq_nic_s *self) +{ + if (!self->ndev) + goto err_exit; + + if (self->ndev->reg_state == NETREG_REGISTERED) + unregister_netdev(self->ndev); + + if (self->aq_hw) + self->aq_hw_ops.destroy(self->aq_hw); + + free_netdev(self->ndev); + +err_exit:; +} + +struct aq_nic_s *aq_nic_alloc_hot(struct net_device *ndev) +{ + struct aq_nic_s *self = NULL; + int err = 0; + + if (!ndev) { + err = -EINVAL; + goto err_exit; + } + self = netdev_priv(ndev); + + if (!self) { + err = -EINVAL; + goto err_exit; + } + if (netif_running(ndev)) { + unsigned int i; + + for (i = AQ_CFG_VECS_MAX; i--;) + netif_stop_subqueue(ndev, i); + } + + for (self->aq_vecs = 0; self->aq_vecs < self->aq_nic_cfg.vecs; + self->aq_vecs++) { + self->aq_vec[self->aq_vecs] = + aq_vec_alloc(self, self->aq_vecs, &self->aq_nic_cfg); + if (!self->aq_vec[self->aq_vecs]) { + err = -ENOMEM; + goto err_exit; + } + } + +err_exit: + if (err < 0) { + aq_nic_free_hot_resources(self); + self = NULL; + } + return self; +} + +void aq_nic_set_tx_ring(struct aq_nic_s *self, unsigned int idx, + struct aq_ring_s *ring) +{ + self->aq_ring_tx[idx] = ring; +} + +struct device *aq_nic_get_dev(struct aq_nic_s *self) +{ + return self->ndev->dev.parent; +} + +struct net_device *aq_nic_get_ndev(struct aq_nic_s *self) +{ + return self->ndev; +} + +int aq_nic_init(struct aq_nic_s *self) +{ + struct aq_vec_s *aq_vec = NULL; + int err = 0; + unsigned int i = 0U; + + self->power_state = AQ_HW_POWER_STATE_D0; + err = self->aq_hw_ops.hw_reset(self->aq_hw); + if (err < 0) + goto err_exit; + + err = self->aq_hw_ops.hw_init(self->aq_hw, &self->aq_nic_cfg, + aq_nic_get_ndev(self)->dev_addr); + if (err < 0) + goto err_exit; + + for (i = 0U, aq_vec = self->aq_vec[0]; + self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) + aq_vec_init(aq_vec, &self->aq_hw_ops, self->aq_hw); + +err_exit: + return err; +} + +void aq_nic_ndev_queue_start(struct aq_nic_s *self, unsigned int idx) +{ + netif_start_subqueue(self->ndev, idx); +} + +void aq_nic_ndev_queue_stop(struct aq_nic_s *self, unsigned int idx) +{ + netif_stop_subqueue(self->ndev, idx); +} + +int aq_nic_start(struct aq_nic_s *self) +{ + struct aq_vec_s *aq_vec = NULL; + int err = 0; + unsigned int i = 0U; + + err = self->aq_hw_ops.hw_multicast_list_set(self->aq_hw, + self->mc_list.ar, + self->mc_list.count); + if (err < 0) + goto err_exit; + + err = self->aq_hw_ops.hw_packet_filter_set(self->aq_hw, + self->packet_filter); + if (err < 0) + goto err_exit; + + for (i = 0U, aq_vec = self->aq_vec[0]; + self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) { + err = aq_vec_start(aq_vec); + if (err < 0) + goto err_exit; + } + + err = self->aq_hw_ops.hw_start(self->aq_hw); + if (err < 0) + goto err_exit; + + err = self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw, + self->aq_nic_cfg.is_interrupt_moderation); + if (err < 0) + goto err_exit; + setup_timer(&self->service_timer, &aq_nic_service_timer_cb, + (unsigned long)self); + mod_timer(&self->service_timer, jiffies + + AQ_CFG_SERVICE_TIMER_INTERVAL); + + if (self->aq_nic_cfg.is_polling) { + setup_timer(&self->polling_timer, &aq_nic_polling_timer_cb, + (unsigned long)self); + mod_timer(&self->polling_timer, jiffies + + AQ_CFG_POLLING_TIMER_INTERVAL); + } else { + for (i = 0U, aq_vec = self->aq_vec[0]; + self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) { + err = aq_pci_func_alloc_irq(self->aq_pci_func, i, + self->ndev->name, aq_vec, + aq_vec_get_affinity_mask(aq_vec)); + if (err < 0) + goto err_exit; + } + + err = self->aq_hw_ops.hw_irq_enable(self->aq_hw, + AQ_CFG_IRQ_MASK); + if (err < 0) + goto err_exit; + } + + for (i = 0U, aq_vec = self->aq_vec[0]; + self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) + aq_nic_ndev_queue_start(self, i); + + err = netif_set_real_num_tx_queues(self->ndev, self->aq_vecs); + if (err < 0) + goto err_exit; + + err = netif_set_real_num_rx_queues(self->ndev, self->aq_vecs); + if (err < 0) + goto err_exit; + +err_exit: + return err; +} + +static unsigned int aq_nic_map_skb(struct aq_nic_s *self, + struct sk_buff *skb, + struct aq_ring_s *ring) +{ + unsigned int ret = 0U; + unsigned int nr_frags = skb_shinfo(skb)->nr_frags; + unsigned int frag_count = 0U; + unsigned int dx = ring->sw_tail; + struct aq_ring_buff_s *dx_buff = &ring->buff_ring[dx]; + + if (unlikely(skb_is_gso(skb))) { + dx_buff->flags = 0U; + dx_buff->len_pkt = skb->len; + dx_buff->len_l2 = ETH_HLEN; + dx_buff->len_l3 = ip_hdrlen(skb); + dx_buff->len_l4 = tcp_hdrlen(skb); + dx_buff->mss = skb_shinfo(skb)->gso_size; + dx_buff->is_txc = 1U; + + dx = aq_ring_next_dx(ring, dx); + dx_buff = &ring->buff_ring[dx]; + ++ret; + } + + dx_buff->flags = 0U; + dx_buff->len = skb_headlen(skb); + dx_buff->pa = dma_map_single(aq_nic_get_dev(self), + skb->data, + dx_buff->len, + DMA_TO_DEVICE); + + if (unlikely(dma_mapping_error(aq_nic_get_dev(self), dx_buff->pa))) + goto exit; + + dx_buff->len_pkt = skb->len; + dx_buff->is_sop = 1U; + dx_buff->is_mapped = 1U; + ++ret; + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + dx_buff->is_ip_cso = (htons(ETH_P_IP) == skb->protocol) ? + 1U : 0U; + dx_buff->is_tcp_cso = + (ip_hdr(skb)->protocol == IPPROTO_TCP) ? 1U : 0U; + dx_buff->is_udp_cso = + (ip_hdr(skb)->protocol == IPPROTO_UDP) ? 1U : 0U; + } + + for (; nr_frags--; ++frag_count) { + unsigned int frag_len = 0U; + dma_addr_t frag_pa; + skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_count]; + + frag_len = skb_frag_size(frag); + frag_pa = skb_frag_dma_map(aq_nic_get_dev(self), frag, 0, + frag_len, DMA_TO_DEVICE); + + if (unlikely(dma_mapping_error(aq_nic_get_dev(self), frag_pa))) + goto mapping_error; + + while (frag_len > AQ_CFG_TX_FRAME_MAX) { + dx = aq_ring_next_dx(ring, dx); + dx_buff = &ring->buff_ring[dx]; + + dx_buff->flags = 0U; + dx_buff->len = AQ_CFG_TX_FRAME_MAX; + dx_buff->pa = frag_pa; + dx_buff->is_mapped = 1U; + + frag_len -= AQ_CFG_TX_FRAME_MAX; + frag_pa += AQ_CFG_TX_FRAME_MAX; + ++ret; + } + + dx = aq_ring_next_dx(ring, dx); + dx_buff = &ring->buff_ring[dx]; + + dx_buff->flags = 0U; + dx_buff->len = frag_len; + dx_buff->pa = frag_pa; + dx_buff->is_mapped = 1U; + ++ret; + } + + dx_buff->is_eop = 1U; + dx_buff->skb = skb; + goto exit; + +mapping_error: + for (dx = ring->sw_tail; + ret > 0; + --ret, dx = aq_ring_next_dx(ring, dx)) { + dx_buff = &ring->buff_ring[dx]; + + if (!dx_buff->is_txc && dx_buff->pa) { + if (unlikely(dx_buff->is_sop)) { + dma_unmap_single(aq_nic_get_dev(self), + dx_buff->pa, + dx_buff->len, + DMA_TO_DEVICE); + } else { + dma_unmap_page(aq_nic_get_dev(self), + dx_buff->pa, + dx_buff->len, + DMA_TO_DEVICE); + } + } + } + +exit: + return ret; +} + +int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb) +__releases(&ring->lock) +__acquires(&ring->lock) +{ + struct aq_ring_s *ring = NULL; + unsigned int frags = 0U; + unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs; + unsigned int tc = 0U; + unsigned int trys = AQ_CFG_LOCK_TRYS; + int err = NETDEV_TX_OK; + bool is_nic_in_bad_state; + + frags = skb_shinfo(skb)->nr_frags + 1; + + ring = self->aq_ring_tx[AQ_NIC_TCVEC2RING(self, tc, vec)]; + + if (frags > AQ_CFG_SKB_FRAGS_MAX) { + dev_kfree_skb_any(skb); + goto err_exit; + } + + is_nic_in_bad_state = aq_utils_obj_test(&self->header.flags, + AQ_NIC_FLAGS_IS_NOT_TX_READY) || + (aq_ring_avail_dx(ring) < + AQ_CFG_SKB_FRAGS_MAX); + + if (is_nic_in_bad_state) { + aq_nic_ndev_queue_stop(self, ring->idx); + err = NETDEV_TX_BUSY; + goto err_exit; + } + + do { + if (spin_trylock(&ring->header.lock)) { + frags = aq_nic_map_skb(self, skb, ring); + + if (likely(frags)) { + err = self->aq_hw_ops.hw_ring_tx_xmit( + self->aq_hw, + ring, frags); + if (err >= 0) { + if (aq_ring_avail_dx(ring) < + AQ_CFG_SKB_FRAGS_MAX + 1) + aq_nic_ndev_queue_stop( + self, + ring->idx); + + ++ring->stats.tx.packets; + ring->stats.tx.bytes += skb->len; + } + } else { + err = NETDEV_TX_BUSY; + } + + spin_unlock(&ring->header.lock); + break; + } + } while (--trys); + + if (!trys) { + err = NETDEV_TX_BUSY; + goto err_exit; + } + +err_exit: + return err; +} + +int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags) +{ + int err = 0; + + err = self->aq_hw_ops.hw_packet_filter_set(self->aq_hw, flags); + if (err < 0) + goto err_exit; + + self->packet_filter = flags; + +err_exit: + return err; +} + +int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev) +{ + struct netdev_hw_addr *ha = NULL; + unsigned int i = 0U; + + self->mc_list.count = 0U; + + netdev_for_each_mc_addr(ha, ndev) { + ether_addr_copy(self->mc_list.ar[i++], ha->addr); + ++self->mc_list.count; + } + + return self->aq_hw_ops.hw_multicast_list_set(self->aq_hw, + self->mc_list.ar, + self->mc_list.count); +} + +int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu) +{ + int err = 0; + + if (new_mtu > self->aq_hw_caps.mtu) { + err = -EINVAL; + goto err_exit; + } + self->aq_nic_cfg.mtu = new_mtu; + +err_exit: + return err; +} + +int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev) +{ + return self->aq_hw_ops.hw_set_mac_address(self->aq_hw, ndev->dev_addr); +} + +unsigned int aq_nic_get_link_speed(struct aq_nic_s *self) +{ + return self->link_status.mbps; +} + +int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p) +{ + u32 *regs_buff = p; + int err = 0; + + regs->version = 1; + + err = self->aq_hw_ops.hw_get_regs(self->aq_hw, + &self->aq_hw_caps, regs_buff); + if (err < 0) + goto err_exit; + +err_exit: + return err; +} + +int aq_nic_get_regs_count(struct aq_nic_s *self) +{ + return self->aq_hw_caps.mac_regs_count; +} + +void aq_nic_get_stats(struct aq_nic_s *self, u64 *data) +{ + struct aq_vec_s *aq_vec = NULL; + unsigned int i = 0U; + unsigned int count = 0U; + int err = 0; + + err = self->aq_hw_ops.hw_get_hw_stats(self->aq_hw, data, &count); + if (err < 0) + goto err_exit; + + data += count; + count = 0U; + + for (i = 0U, aq_vec = self->aq_vec[0]; + self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) { + data += count; + aq_vec_get_sw_stats(aq_vec, data, &count); + } + +err_exit:; + (void)err; +} + +void aq_nic_get_link_ksettings(struct aq_nic_s *self, + struct ethtool_link_ksettings *cmd) +{ + cmd->base.port = PORT_TP; + /* This driver supports only 10G capable adapters, so DUPLEX_FULL */ + cmd->base.duplex = DUPLEX_FULL; + cmd->base.autoneg = self->aq_nic_cfg.is_autoneg; + + ethtool_link_ksettings_zero_link_mode(cmd, supported); + + if (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_10G) + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10000baseT_Full); + + if (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_5G) + ethtool_link_ksettings_add_link_mode(cmd, supported, + 5000baseT_Full); + + if (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_2GS) + ethtool_link_ksettings_add_link_mode(cmd, supported, + 2500baseT_Full); + + if (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_1G) + ethtool_link_ksettings_add_link_mode(cmd, supported, + 1000baseT_Full); + + if (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_100M) + ethtool_link_ksettings_add_link_mode(cmd, supported, + 100baseT_Full); + + if (self->aq_hw_caps.flow_control) + ethtool_link_ksettings_add_link_mode(cmd, supported, + Pause); + + ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(cmd, supported, TP); + + ethtool_link_ksettings_zero_link_mode(cmd, advertising); + + if (self->aq_nic_cfg.is_autoneg) + ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); + + if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_10G) + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10000baseT_Full); + + if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_5G) + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 5000baseT_Full); + + if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_2GS) + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 2500baseT_Full); + + if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_1G) + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 1000baseT_Full); + + if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_100M) + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 100baseT_Full); + + if (self->aq_nic_cfg.flow_control) + ethtool_link_ksettings_add_link_mode(cmd, advertising, + Pause); + + ethtool_link_ksettings_add_link_mode(cmd, advertising, TP); +} + +int aq_nic_set_link_ksettings(struct aq_nic_s *self, + const struct ethtool_link_ksettings *cmd) +{ + u32 speed = 0U; + u32 rate = 0U; + int err = 0; + + if (cmd->base.autoneg == AUTONEG_ENABLE) { + rate = self->aq_hw_caps.link_speed_msk; + self->aq_nic_cfg.is_autoneg = true; + } else { + speed = cmd->base.speed; + + switch (speed) { + case SPEED_100: + rate = AQ_NIC_RATE_100M; + break; + + case SPEED_1000: + rate = AQ_NIC_RATE_1G; + break; + + case SPEED_2500: + rate = AQ_NIC_RATE_2GS; + break; + + case SPEED_5000: + rate = AQ_NIC_RATE_5G; + break; + + case SPEED_10000: + rate = AQ_NIC_RATE_10G; + break; + + default: + err = -1; + goto err_exit; + break; + } + if (!(self->aq_hw_caps.link_speed_msk & rate)) { + err = -1; + goto err_exit; + } + + self->aq_nic_cfg.is_autoneg = false; + } + + err = self->aq_hw_ops.hw_set_link_speed(self->aq_hw, rate); + if (err < 0) + goto err_exit; + + self->aq_nic_cfg.link_speed_msk = rate; + +err_exit: + return err; +} + +struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self) +{ + return &self->aq_nic_cfg; +} + +u32 aq_nic_get_fw_version(struct aq_nic_s *self) +{ + u32 fw_version = 0U; + + self->aq_hw_ops.hw_get_fw_version(self->aq_hw, &fw_version); + + return fw_version; +} + +int aq_nic_stop(struct aq_nic_s *self) +{ + struct aq_vec_s *aq_vec = NULL; + unsigned int i = 0U; + + for (i = 0U, aq_vec = self->aq_vec[0]; + self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) + aq_nic_ndev_queue_stop(self, i); + + del_timer_sync(&self->service_timer); + + self->aq_hw_ops.hw_irq_disable(self->aq_hw, AQ_CFG_IRQ_MASK); + + if (self->aq_nic_cfg.is_polling) + del_timer_sync(&self->polling_timer); + else + aq_pci_func_free_irqs(self->aq_pci_func); + + for (i = 0U, aq_vec = self->aq_vec[0]; + self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) + aq_vec_stop(aq_vec); + + return self->aq_hw_ops.hw_stop(self->aq_hw); +} + +void aq_nic_deinit(struct aq_nic_s *self) +{ + struct aq_vec_s *aq_vec = NULL; + unsigned int i = 0U; + + if (!self) + goto err_exit; + + for (i = 0U, aq_vec = self->aq_vec[0]; + self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) + aq_vec_deinit(aq_vec); + + if (self->power_state == AQ_HW_POWER_STATE_D0) { + (void)self->aq_hw_ops.hw_deinit(self->aq_hw); + } else { + (void)self->aq_hw_ops.hw_set_power(self->aq_hw, + self->power_state); + } + +err_exit:; +} + +void aq_nic_free_hot_resources(struct aq_nic_s *self) +{ + unsigned int i = 0U; + + if (!self) + goto err_exit; + + for (i = AQ_DIMOF(self->aq_vec); i--;) { + if (self->aq_vec[i]) + aq_vec_free(self->aq_vec[i]); + } + +err_exit:; +} + +int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg) +{ + int err = 0; + + if (!netif_running(self->ndev)) { + err = 0; + goto out; + } + rtnl_lock(); + if (pm_msg->event & PM_EVENT_SLEEP || pm_msg->event & PM_EVENT_FREEZE) { + self->power_state = AQ_HW_POWER_STATE_D3; + netif_device_detach(self->ndev); + netif_tx_stop_all_queues(self->ndev); + + err = aq_nic_stop(self); + if (err < 0) + goto err_exit; + + aq_nic_deinit(self); + } else { + err = aq_nic_init(self); + if (err < 0) + goto err_exit; + + err = aq_nic_start(self); + if (err < 0) + goto err_exit; + + netif_device_attach(self->ndev); + netif_tx_start_all_queues(self->ndev); + } + +err_exit: + rtnl_unlock(); +out: + return err; +} diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h new file mode 100644 index 000000000000..7fc2a5ecb2b7 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h @@ -0,0 +1,110 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_nic.h: Declaration of common code for NIC. */ + +#ifndef AQ_NIC_H +#define AQ_NIC_H + +#include "aq_common.h" +#include "aq_rss.h" + +struct aq_ring_s; +struct aq_pci_func_s; +struct aq_hw_ops; + +#define AQ_NIC_FC_OFF 0U +#define AQ_NIC_FC_TX 1U +#define AQ_NIC_FC_RX 2U +#define AQ_NIC_FC_FULL 3U +#define AQ_NIC_FC_AUTO 4U + +#define AQ_NIC_RATE_10G BIT(0) +#define AQ_NIC_RATE_5G BIT(1) +#define AQ_NIC_RATE_5GSR BIT(2) +#define AQ_NIC_RATE_2GS BIT(3) +#define AQ_NIC_RATE_1G BIT(4) +#define AQ_NIC_RATE_100M BIT(5) + +struct aq_nic_cfg_s { + struct aq_hw_caps_s *aq_hw_caps; + u64 hw_features; + u32 rxds; /* rx ring size, descriptors # */ + u32 txds; /* tx ring size, descriptors # */ + u32 vecs; /* vecs==allocated irqs */ + u32 irq_type; + u32 itr; + u32 num_rss_queues; + u32 mtu; + u32 ucp_0x364; + u32 flow_control; + u32 link_speed_msk; + u32 vlan_id; + u16 is_mc_list_enabled; + u16 mc_list_count; + bool is_autoneg; + bool is_interrupt_moderation; + bool is_polling; + bool is_rss; + bool is_lro; + u8 tcs; + struct aq_rss_parameters aq_rss; +}; + +#define AQ_NIC_FLAG_STARTED 0x00000004U +#define AQ_NIC_FLAG_STOPPING 0x00000008U +#define AQ_NIC_FLAG_RESETTING 0x00000010U +#define AQ_NIC_FLAG_CLOSING 0x00000020U +#define AQ_NIC_LINK_DOWN 0x04000000U +#define AQ_NIC_FLAG_ERR_UNPLUG 0x40000000U +#define AQ_NIC_FLAG_ERR_HW 0x80000000U + +#define AQ_NIC_TCVEC2RING(_NIC_, _TC_, _VEC_) \ + ((_TC_) * AQ_CFG_TCS_MAX + (_VEC_)) + +struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops, + const struct ethtool_ops *et_ops, + struct device *dev, + struct aq_pci_func_s *aq_pci_func, + unsigned int port, + const struct aq_hw_ops *aq_hw_ops); +int aq_nic_ndev_init(struct aq_nic_s *self); +struct aq_nic_s *aq_nic_alloc_hot(struct net_device *ndev); +void aq_nic_set_tx_ring(struct aq_nic_s *self, unsigned int idx, + struct aq_ring_s *ring); +struct device *aq_nic_get_dev(struct aq_nic_s *self); +struct net_device *aq_nic_get_ndev(struct aq_nic_s *self); +int aq_nic_init(struct aq_nic_s *self); +int aq_nic_cfg_start(struct aq_nic_s *self); +int aq_nic_ndev_register(struct aq_nic_s *self); +void aq_nic_ndev_queue_start(struct aq_nic_s *self, unsigned int idx); +void aq_nic_ndev_queue_stop(struct aq_nic_s *self, unsigned int idx); +void aq_nic_ndev_free(struct aq_nic_s *self); +int aq_nic_start(struct aq_nic_s *self); +int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb); +int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p); +int aq_nic_get_regs_count(struct aq_nic_s *self); +void aq_nic_get_stats(struct aq_nic_s *self, u64 *data); +int aq_nic_stop(struct aq_nic_s *self); +void aq_nic_deinit(struct aq_nic_s *self); +void aq_nic_free_hot_resources(struct aq_nic_s *self); +int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu); +int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev); +int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags); +int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev); +unsigned int aq_nic_get_link_speed(struct aq_nic_s *self); +void aq_nic_get_link_ksettings(struct aq_nic_s *self, + struct ethtool_link_ksettings *cmd); +int aq_nic_set_link_ksettings(struct aq_nic_s *self, + const struct ethtool_link_ksettings *cmd); +struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self); +u32 aq_nic_get_fw_version(struct aq_nic_s *self); +int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg); + +#endif /* AQ_NIC_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic_internal.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic_internal.h new file mode 100644 index 000000000000..e7d2711dc165 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic_internal.h @@ -0,0 +1,45 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_nic_internal.h: Definition of private object structure. */ + +#ifndef AQ_NIC_INTERNAL_H +#define AQ_NIC_INTERNAL_H + +struct aq_nic_s { + struct aq_obj_s header; + struct aq_vec_s *aq_vec[AQ_CFG_VECS_MAX]; + struct aq_ring_s *aq_ring_tx[AQ_CFG_VECS_MAX * AQ_CFG_TCS_MAX]; + struct aq_hw_s *aq_hw; + struct net_device *ndev; + struct aq_pci_func_s *aq_pci_func; + unsigned int aq_vecs; + unsigned int packet_filter; + unsigned int power_state; + u8 port; + struct aq_hw_ops aq_hw_ops; + struct aq_hw_caps_s aq_hw_caps; + struct aq_nic_cfg_s aq_nic_cfg; + struct timer_list service_timer; + struct timer_list polling_timer; + struct aq_hw_link_status_s link_status; + struct { + u32 count; + u8 ar[AQ_CFG_MULTICAST_ADDRESS_MAX][ETH_ALEN]; + } mc_list; +}; + +#define AQ_NIC_FLAGS_IS_NOT_READY (AQ_NIC_FLAG_STOPPING | \ + AQ_NIC_FLAG_RESETTING | AQ_NIC_FLAG_CLOSING | \ + AQ_NIC_FLAG_ERR_UNPLUG | AQ_NIC_FLAG_ERR_HW) + +#define AQ_NIC_FLAGS_IS_NOT_TX_READY (AQ_NIC_FLAGS_IS_NOT_READY | \ + AQ_NIC_LINK_DOWN) + +#endif /* AQ_NIC_INTERNAL_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c new file mode 100644 index 000000000000..581de71a958a --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c @@ -0,0 +1,292 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_pci_func.c: Definition of PCI functions. */ + +#include "aq_pci_func.h" +#include "aq_nic.h" +#include "aq_vec.h" +#include "aq_hw.h" +#include <linux/interrupt.h> + +struct aq_pci_func_s { + struct pci_dev *pdev; + struct aq_nic_s *port[AQ_CFG_PCI_FUNC_PORTS]; + void __iomem *mmio; + void *aq_vec[AQ_CFG_PCI_FUNC_MSIX_IRQS]; + resource_size_t mmio_pa; + unsigned int msix_entry_mask; + unsigned int ports; + bool is_pci_enabled; + bool is_regions; + bool is_pci_using_dac; + struct aq_hw_caps_s aq_hw_caps; +}; + +struct aq_pci_func_s *aq_pci_func_alloc(struct aq_hw_ops *aq_hw_ops, + struct pci_dev *pdev, + const struct net_device_ops *ndev_ops, + const struct ethtool_ops *eth_ops) +{ + struct aq_pci_func_s *self = NULL; + int err = 0; + unsigned int port = 0U; + + if (!aq_hw_ops) { + err = -EFAULT; + goto err_exit; + } + self = kzalloc(sizeof(*self), GFP_KERNEL); + if (!self) { + err = -ENOMEM; + goto err_exit; + } + + pci_set_drvdata(pdev, self); + self->pdev = pdev; + + err = aq_hw_ops->get_hw_caps(NULL, &self->aq_hw_caps); + if (err < 0) + goto err_exit; + + self->ports = self->aq_hw_caps.ports; + + for (port = 0; port < self->ports; ++port) { + struct aq_nic_s *aq_nic = aq_nic_alloc_cold(ndev_ops, eth_ops, + &pdev->dev, self, + port, aq_hw_ops); + + if (!aq_nic) { + err = -ENOMEM; + goto err_exit; + } + self->port[port] = aq_nic; + } + +err_exit: + if (err < 0) { + if (self) + aq_pci_func_free(self); + self = NULL; + } + + (void)err; + return self; +} + +int aq_pci_func_init(struct aq_pci_func_s *self) +{ + int err = 0; + unsigned int bar = 0U; + unsigned int port = 0U; + + err = pci_enable_device(self->pdev); + if (err < 0) + goto err_exit; + + self->is_pci_enabled = true; + + err = pci_set_dma_mask(self->pdev, DMA_BIT_MASK(64)); + if (!err) { + err = pci_set_consistent_dma_mask(self->pdev, DMA_BIT_MASK(64)); + self->is_pci_using_dac = 1; + } + if (err) { + err = pci_set_dma_mask(self->pdev, DMA_BIT_MASK(32)); + if (!err) + err = pci_set_consistent_dma_mask(self->pdev, + DMA_BIT_MASK(32)); + self->is_pci_using_dac = 0; + } + if (err != 0) { + err = -ENOSR; + goto err_exit; + } + + err = pci_request_regions(self->pdev, AQ_CFG_DRV_NAME "_mmio"); + if (err < 0) + goto err_exit; + + self->is_regions = true; + + pci_set_master(self->pdev); + + for (bar = 0; bar < 4; ++bar) { + if (IORESOURCE_MEM & pci_resource_flags(self->pdev, bar)) { + resource_size_t reg_sz; + + self->mmio_pa = pci_resource_start(self->pdev, bar); + if (self->mmio_pa == 0U) { + err = -EIO; + goto err_exit; + } + + reg_sz = pci_resource_len(self->pdev, bar); + if ((reg_sz <= 24 /*ATL_REGS_SIZE*/)) { + err = -EIO; + goto err_exit; + } + + self->mmio = ioremap_nocache(self->mmio_pa, reg_sz); + if (!self->mmio) { + err = -EIO; + goto err_exit; + } + break; + } + } + + /*enable interrupts */ +#if !AQ_CFG_FORCE_LEGACY_INT + err = pci_alloc_irq_vectors(self->pdev, self->aq_hw_caps.msix_irqs, + self->aq_hw_caps.msix_irqs, PCI_IRQ_MSIX); + + if (err < 0) { + err = pci_alloc_irq_vectors(self->pdev, 1, 1, + PCI_IRQ_MSI | PCI_IRQ_LEGACY); + if (err < 0) + goto err_exit; + } +#endif + + /* net device init */ + for (port = 0; port < self->ports; ++port) { + if (!self->port[port]) + continue; + + err = aq_nic_cfg_start(self->port[port]); + if (err < 0) + goto err_exit; + + err = aq_nic_ndev_init(self->port[port]); + if (err < 0) + goto err_exit; + + err = aq_nic_ndev_register(self->port[port]); + if (err < 0) + goto err_exit; + } + +err_exit: + if (err < 0) + aq_pci_func_deinit(self); + return err; +} + +int aq_pci_func_alloc_irq(struct aq_pci_func_s *self, unsigned int i, + char *name, void *aq_vec, cpumask_t *affinity_mask) +{ + struct pci_dev *pdev = self->pdev; + int err = 0; + + if (pdev->msix_enabled || pdev->msi_enabled) + err = request_irq(pci_irq_vector(pdev, i), aq_vec_isr, 0, + name, aq_vec); + else + err = request_irq(pci_irq_vector(pdev, i), aq_vec_isr_legacy, + IRQF_SHARED, name, aq_vec); + + if (err >= 0) { + self->msix_entry_mask |= (1 << i); + self->aq_vec[i] = aq_vec; + + if (pdev->msix_enabled) + irq_set_affinity_hint(pci_irq_vector(pdev, i), + affinity_mask); + } + + return err; +} + +void aq_pci_func_free_irqs(struct aq_pci_func_s *self) +{ + struct pci_dev *pdev = self->pdev; + unsigned int i = 0U; + + for (i = 32U; i--;) { + if (!((1U << i) & self->msix_entry_mask)) + continue; + + free_irq(pci_irq_vector(pdev, i), self->aq_vec[i]); + if (pdev->msix_enabled) + irq_set_affinity_hint(pci_irq_vector(pdev, i), NULL); + self->msix_entry_mask &= ~(1U << i); + } +} + +void __iomem *aq_pci_func_get_mmio(struct aq_pci_func_s *self) +{ + return self->mmio; +} + +unsigned int aq_pci_func_get_irq_type(struct aq_pci_func_s *self) +{ + if (self->pdev->msix_enabled) + return AQ_HW_IRQ_MSIX; + if (self->pdev->msi_enabled) + return AQ_HW_IRQ_MSIX; + return AQ_HW_IRQ_LEGACY; +} + +void aq_pci_func_deinit(struct aq_pci_func_s *self) +{ + if (!self) + goto err_exit; + + aq_pci_func_free_irqs(self); + pci_free_irq_vectors(self->pdev); + + if (self->is_regions) + pci_release_regions(self->pdev); + + if (self->is_pci_enabled) + pci_disable_device(self->pdev); + +err_exit:; +} + +void aq_pci_func_free(struct aq_pci_func_s *self) +{ + unsigned int port = 0U; + + if (!self) + goto err_exit; + + for (port = 0; port < self->ports; ++port) { + if (!self->port[port]) + continue; + + aq_nic_ndev_free(self->port[port]); + } + + kfree(self); + +err_exit:; +} + +int aq_pci_func_change_pm_state(struct aq_pci_func_s *self, + pm_message_t *pm_msg) +{ + int err = 0; + unsigned int port = 0U; + + if (!self) { + err = -EFAULT; + goto err_exit; + } + for (port = 0; port < self->ports; ++port) { + if (!self->port[port]) + continue; + + (void)aq_nic_change_pm_state(self->port[port], pm_msg); + } + +err_exit: + return err; +} diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h new file mode 100644 index 000000000000..ecb033791203 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h @@ -0,0 +1,34 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_pci_func.h: Declaration of PCI functions. */ + +#ifndef AQ_PCI_FUNC_H +#define AQ_PCI_FUNC_H + +#include "aq_common.h" + +struct aq_pci_func_s *aq_pci_func_alloc(struct aq_hw_ops *hw_ops, + struct pci_dev *pdev, + const struct net_device_ops *ndev_ops, + const struct ethtool_ops *eth_ops); +int aq_pci_func_init(struct aq_pci_func_s *self); +int aq_pci_func_alloc_irq(struct aq_pci_func_s *self, unsigned int i, + char *name, void *aq_vec, + cpumask_t *affinity_mask); +void aq_pci_func_free_irqs(struct aq_pci_func_s *self); +int aq_pci_func_start(struct aq_pci_func_s *self); +void __iomem *aq_pci_func_get_mmio(struct aq_pci_func_s *self); +unsigned int aq_pci_func_get_irq_type(struct aq_pci_func_s *self); +void aq_pci_func_deinit(struct aq_pci_func_s *self); +void aq_pci_func_free(struct aq_pci_func_s *self); +int aq_pci_func_change_pm_state(struct aq_pci_func_s *self, + pm_message_t *pm_msg); + +#endif /* AQ_PCI_FUNC_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c new file mode 100644 index 000000000000..0358e6072d45 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c @@ -0,0 +1,326 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_ring.c: Definition of functions for Rx/Tx rings. */ + +#include "aq_ring.h" +#include "aq_nic.h" +#include "aq_hw.h" + +#include <linux/netdevice.h> +#include <linux/etherdevice.h> + +static struct aq_ring_s *aq_ring_alloc(struct aq_ring_s *self, + struct aq_nic_s *aq_nic) +{ + int err = 0; + + self->buff_ring = + kcalloc(self->size, sizeof(struct aq_ring_buff_s), GFP_KERNEL); + + if (!self->buff_ring) { + err = -ENOMEM; + goto err_exit; + } + self->dx_ring = dma_alloc_coherent(aq_nic_get_dev(aq_nic), + self->size * self->dx_size, + &self->dx_ring_pa, GFP_KERNEL); + if (!self->dx_ring) { + err = -ENOMEM; + goto err_exit; + } + +err_exit: + if (err < 0) { + aq_ring_free(self); + self = NULL; + } + return self; +} + +struct aq_ring_s *aq_ring_tx_alloc(struct aq_ring_s *self, + struct aq_nic_s *aq_nic, + unsigned int idx, + struct aq_nic_cfg_s *aq_nic_cfg) +{ + int err = 0; + + self->aq_nic = aq_nic; + self->idx = idx; + self->size = aq_nic_cfg->txds; + self->dx_size = aq_nic_cfg->aq_hw_caps->txd_size; + + self = aq_ring_alloc(self, aq_nic); + if (!self) { + err = -ENOMEM; + goto err_exit; + } + +err_exit: + if (err < 0) { + aq_ring_free(self); + self = NULL; + } + return self; +} + +struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self, + struct aq_nic_s *aq_nic, + unsigned int idx, + struct aq_nic_cfg_s *aq_nic_cfg) +{ + int err = 0; + + self->aq_nic = aq_nic; + self->idx = idx; + self->size = aq_nic_cfg->rxds; + self->dx_size = aq_nic_cfg->aq_hw_caps->rxd_size; + + self = aq_ring_alloc(self, aq_nic); + if (!self) { + err = -ENOMEM; + goto err_exit; + } + +err_exit: + if (err < 0) { + aq_ring_free(self); + self = NULL; + } + return self; +} + +int aq_ring_init(struct aq_ring_s *self) +{ + self->hw_head = 0; + self->sw_head = 0; + self->sw_tail = 0; + return 0; +} + +void aq_ring_tx_clean(struct aq_ring_s *self) +{ + struct device *dev = aq_nic_get_dev(self->aq_nic); + + for (; self->sw_head != self->hw_head; + self->sw_head = aq_ring_next_dx(self, self->sw_head)) { + struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head]; + + if (likely(buff->is_mapped)) { + if (unlikely(buff->is_sop)) + dma_unmap_single(dev, buff->pa, buff->len, + DMA_TO_DEVICE); + else + dma_unmap_page(dev, buff->pa, buff->len, + DMA_TO_DEVICE); + } + + if (unlikely(buff->is_eop)) + dev_kfree_skb_any(buff->skb); + } +} + +static inline unsigned int aq_ring_dx_in_range(unsigned int h, unsigned int i, + unsigned int t) +{ + return (h < t) ? ((h < i) && (i < t)) : ((h < i) || (i < t)); +} + +#define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +int aq_ring_rx_clean(struct aq_ring_s *self, int *work_done, int budget) +{ + struct net_device *ndev = aq_nic_get_ndev(self->aq_nic); + int err = 0; + bool is_rsc_completed = true; + + for (; (self->sw_head != self->hw_head) && budget; + self->sw_head = aq_ring_next_dx(self, self->sw_head), + --budget, ++(*work_done)) { + struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head]; + struct sk_buff *skb = NULL; + unsigned int next_ = 0U; + unsigned int i = 0U; + struct aq_ring_buff_s *buff_ = NULL; + + if (buff->is_error) { + __free_pages(buff->page, 0); + continue; + } + + if (buff->is_cleaned) + continue; + + if (!buff->is_eop) { + for (next_ = buff->next, + buff_ = &self->buff_ring[next_]; true; + next_ = buff_->next, + buff_ = &self->buff_ring[next_]) { + is_rsc_completed = + aq_ring_dx_in_range(self->sw_head, + next_, + self->hw_head); + + if (unlikely(!is_rsc_completed)) { + is_rsc_completed = false; + break; + } + + if (buff_->is_eop) + break; + } + + if (!is_rsc_completed) { + err = 0; + goto err_exit; + } + } + + /* for single fragment packets use build_skb() */ + if (buff->is_eop) { + skb = build_skb(page_address(buff->page), + buff->len + AQ_SKB_ALIGN); + if (unlikely(!skb)) { + err = -ENOMEM; + goto err_exit; + } + + skb_put(skb, buff->len); + } else { + skb = netdev_alloc_skb(ndev, ETH_HLEN); + if (unlikely(!skb)) { + err = -ENOMEM; + goto err_exit; + } + skb_put(skb, ETH_HLEN); + memcpy(skb->data, page_address(buff->page), ETH_HLEN); + + skb_add_rx_frag(skb, 0, buff->page, ETH_HLEN, + buff->len - ETH_HLEN, + SKB_TRUESIZE(buff->len - ETH_HLEN)); + + for (i = 1U, next_ = buff->next, + buff_ = &self->buff_ring[next_]; true; + next_ = buff_->next, + buff_ = &self->buff_ring[next_], ++i) { + skb_add_rx_frag(skb, i, buff_->page, 0, + buff_->len, + SKB_TRUESIZE(buff->len - + ETH_HLEN)); + buff_->is_cleaned = 1; + + if (buff_->is_eop) + break; + } + } + + skb->protocol = eth_type_trans(skb, ndev); + if (unlikely(buff->is_cso_err)) { + ++self->stats.rx.errors; + __skb_mark_checksum_bad(skb); + } else { + if (buff->is_ip_cso) { + __skb_incr_checksum_unnecessary(skb); + if (buff->is_udp_cso || buff->is_tcp_cso) + __skb_incr_checksum_unnecessary(skb); + } else { + skb->ip_summed = CHECKSUM_NONE; + } + } + + skb_set_hash(skb, buff->rss_hash, + buff->is_hash_l4 ? PKT_HASH_TYPE_L4 : + PKT_HASH_TYPE_NONE); + + skb_record_rx_queue(skb, self->idx); + + netif_receive_skb(skb); + + ++self->stats.rx.packets; + self->stats.rx.bytes += skb->len; + } + +err_exit: + return err; +} + +int aq_ring_rx_fill(struct aq_ring_s *self) +{ + unsigned int pages_order = fls(AQ_CFG_RX_FRAME_MAX / PAGE_SIZE + + (AQ_CFG_RX_FRAME_MAX % PAGE_SIZE ? 1 : 0)) - 1; + struct aq_ring_buff_s *buff = NULL; + int err = 0; + int i = 0; + + for (i = aq_ring_avail_dx(self); i--; + self->sw_tail = aq_ring_next_dx(self, self->sw_tail)) { + buff = &self->buff_ring[self->sw_tail]; + + buff->flags = 0U; + buff->len = AQ_CFG_RX_FRAME_MAX; + + buff->page = alloc_pages(GFP_ATOMIC | __GFP_COLD | + __GFP_COMP, pages_order); + if (!buff->page) { + err = -ENOMEM; + goto err_exit; + } + + buff->pa = dma_map_page(aq_nic_get_dev(self->aq_nic), + buff->page, 0, + AQ_CFG_RX_FRAME_MAX, DMA_FROM_DEVICE); + + if (dma_mapping_error(aq_nic_get_dev(self->aq_nic), buff->pa)) { + err = -ENOMEM; + goto err_exit; + } + + buff = NULL; + } + +err_exit: + if (err < 0) { + if (buff && buff->page) + __free_pages(buff->page, 0); + } + + return err; +} + +void aq_ring_rx_deinit(struct aq_ring_s *self) +{ + if (!self) + goto err_exit; + + for (; self->sw_head != self->sw_tail; + self->sw_head = aq_ring_next_dx(self, self->sw_head)) { + struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head]; + + dma_unmap_page(aq_nic_get_dev(self->aq_nic), buff->pa, + AQ_CFG_RX_FRAME_MAX, DMA_FROM_DEVICE); + + __free_pages(buff->page, 0); + } + +err_exit:; +} + +void aq_ring_free(struct aq_ring_s *self) +{ + if (!self) + goto err_exit; + + kfree(self->buff_ring); + + if (self->dx_ring) + dma_free_coherent(aq_nic_get_dev(self->aq_nic), + self->size * self->dx_size, self->dx_ring, + self->dx_ring_pa); + +err_exit:; +} diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h new file mode 100644 index 000000000000..257254645068 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h @@ -0,0 +1,153 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_ring.h: Declaration of functions for Rx/Tx rings. */ + +#ifndef AQ_RING_H +#define AQ_RING_H + +#include "aq_common.h" + +struct page; + +/* TxC SOP DX EOP + * +----------+----------+----------+----------- + * 8bytes|len l3,l4 | pa | pa | pa + * +----------+----------+----------+----------- + * 4/8bytes|len pkt |len pkt | | skb + * +----------+----------+----------+----------- + * 4/8bytes|is_txc |len,flags |len |len,is_eop + * +----------+----------+----------+----------- + * + * This aq_ring_buff_s doesn't have endianness dependency. + * It is __packed for cache line optimizations. + */ +struct __packed aq_ring_buff_s { + union { + /* RX */ + struct { + u32 rss_hash; + u16 next; + u8 is_hash_l4; + u8 rsvd1; + struct page *page; + }; + /* EOP */ + struct { + dma_addr_t pa_eop; + struct sk_buff *skb; + }; + /* DX */ + struct { + dma_addr_t pa; + }; + /* SOP */ + struct { + dma_addr_t pa_sop; + u32 len_pkt_sop; + }; + /* TxC */ + struct { + u32 mss; + u8 len_l2; + u8 len_l3; + u8 len_l4; + u8 rsvd2; + u32 len_pkt; + }; + }; + union { + struct { + u32 len:16; + u32 is_ip_cso:1; + u32 is_udp_cso:1; + u32 is_tcp_cso:1; + u32 is_cso_err:1; + u32 is_sop:1; + u32 is_eop:1; + u32 is_txc:1; + u32 is_mapped:1; + u32 is_cleaned:1; + u32 is_error:1; + u32 rsvd3:6; + }; + u32 flags; + }; +}; + +struct aq_ring_stats_rx_s { + u64 errors; + u64 packets; + u64 bytes; + u64 lro_packets; + u64 jumbo_packets; +}; + +struct aq_ring_stats_tx_s { + u64 errors; + u64 packets; + u64 bytes; +}; + +union aq_ring_stats_s { + struct aq_ring_stats_rx_s rx; + struct aq_ring_stats_tx_s tx; +}; + +struct aq_ring_s { + struct aq_obj_s header; + struct aq_ring_buff_s *buff_ring; + u8 *dx_ring; /* descriptors ring, dma shared mem */ + struct aq_nic_s *aq_nic; + unsigned int idx; /* for HW layer registers operations */ + unsigned int hw_head; + unsigned int sw_head; + unsigned int sw_tail; + unsigned int size; /* descriptors number */ + unsigned int dx_size; /* TX or RX descriptor size, */ + /* stored here for fater math */ + union aq_ring_stats_s stats; + dma_addr_t dx_ring_pa; +}; + +struct aq_ring_param_s { + unsigned int vec_idx; + unsigned int cpu; + cpumask_t affinity_mask; +}; + +static inline unsigned int aq_ring_next_dx(struct aq_ring_s *self, + unsigned int dx) +{ + return (++dx >= self->size) ? 0U : dx; +} + +static inline unsigned int aq_ring_avail_dx(struct aq_ring_s *self) +{ + return (((self->sw_tail >= self->sw_head)) ? + (self->size - 1) - self->sw_tail + self->sw_head : + self->sw_head - self->sw_tail - 1); +} + +struct aq_ring_s *aq_ring_tx_alloc(struct aq_ring_s *self, + struct aq_nic_s *aq_nic, + unsigned int idx, + struct aq_nic_cfg_s *aq_nic_cfg); +struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self, + struct aq_nic_s *aq_nic, + unsigned int idx, + struct aq_nic_cfg_s *aq_nic_cfg); +int aq_ring_init(struct aq_ring_s *self); +void aq_ring_rx_deinit(struct aq_ring_s *self); +void aq_ring_free(struct aq_ring_s *self); +void aq_ring_tx_clean(struct aq_ring_s *self); +int aq_ring_rx_clean(struct aq_ring_s *self, int *work_done, int budget); +int aq_ring_rx_fill(struct aq_ring_s *self); + +#endif /* AQ_RING_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_rss.h b/drivers/net/ethernet/aquantia/atlantic/aq_rss.h new file mode 100644 index 000000000000..1db6eb20a8f2 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_rss.h @@ -0,0 +1,26 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_rss.h: Receive Side Scaling definitions. */ + +#ifndef AQ_RSS_H +#define AQ_RSS_H + +#include "aq_common.h" +#include "aq_cfg.h" + +struct aq_rss_parameters { + u16 base_cpu_number; + u16 indirection_table_size; + u16 hash_secret_key_size; + u32 hash_secret_key[AQ_CFG_RSS_HASHKEY_SIZE / sizeof(u32)]; + u8 indirection_table[AQ_CFG_RSS_INDIRECTION_TABLE_MAX]; +}; + +#endif /* AQ_RSS_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_utils.h b/drivers/net/ethernet/aquantia/atlantic/aq_utils.h new file mode 100644 index 000000000000..f6012b34abe6 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_utils.h @@ -0,0 +1,49 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_utils.h: Useful macro and structures used in all layers of driver. */ + +#ifndef AQ_UTILS_H +#define AQ_UTILS_H + +#include "aq_common.h" + +#define AQ_DIMOF(_ARY_) ARRAY_SIZE(_ARY_) + +struct aq_obj_s { + spinlock_t lock; /* spinlock for nic/rings processing */ + atomic_t flags; +}; + +static inline void aq_utils_obj_set(atomic_t *flags, u32 mask) +{ + unsigned long flags_old, flags_new; + + do { + flags_old = atomic_read(flags); + flags_new = flags_old | (mask); + } while (atomic_cmpxchg(flags, flags_old, flags_new) != flags_old); +} + +static inline void aq_utils_obj_clear(atomic_t *flags, u32 mask) +{ + unsigned long flags_old, flags_new; + + do { + flags_old = atomic_read(flags); + flags_new = flags_old & ~(mask); + } while (atomic_cmpxchg(flags, flags_old, flags_new) != flags_old); +} + +static inline bool aq_utils_obj_test(atomic_t *flags, u32 mask) +{ + return atomic_read(flags) & mask; +} + +#endif /* AQ_UTILS_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c new file mode 100644 index 000000000000..ad5b4d4dac7f --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c @@ -0,0 +1,396 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_vec.c: Definition of common structure for vector of Rx and Tx rings. + * Definition of functions for Rx and Tx rings. Friendly module for aq_nic. + */ + +#include "aq_vec.h" +#include "aq_nic.h" +#include "aq_ring.h" +#include "aq_hw.h" + +#include <linux/netdevice.h> + +struct aq_vec_s { + struct aq_obj_s header; + struct aq_hw_ops *aq_hw_ops; + struct aq_hw_s *aq_hw; + struct aq_nic_s *aq_nic; + unsigned int tx_rings; + unsigned int rx_rings; + struct aq_ring_param_s aq_ring_param; + struct napi_struct napi; + struct aq_ring_s ring[AQ_CFG_TCS_MAX][2]; +}; + +#define AQ_VEC_TX_ID 0 +#define AQ_VEC_RX_ID 1 + +static int aq_vec_poll(struct napi_struct *napi, int budget) +__releases(&self->lock) +__acquires(&self->lock) +{ + struct aq_vec_s *self = container_of(napi, struct aq_vec_s, napi); + struct aq_ring_s *ring = NULL; + int work_done = 0; + int err = 0; + unsigned int i = 0U; + unsigned int sw_tail_old = 0U; + bool was_tx_cleaned = false; + + if (!self) { + err = -EINVAL; + } else if (spin_trylock(&self->header.lock)) { + for (i = 0U, ring = self->ring[0]; + self->tx_rings > i; ++i, ring = self->ring[i]) { + if (self->aq_hw_ops->hw_ring_tx_head_update) { + err = self->aq_hw_ops->hw_ring_tx_head_update( + self->aq_hw, + &ring[AQ_VEC_TX_ID]); + if (err < 0) + goto err_exit; + } + + if (ring[AQ_VEC_TX_ID].sw_head != + ring[AQ_VEC_TX_ID].hw_head) { + aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]); + + if (aq_ring_avail_dx(&ring[AQ_VEC_TX_ID]) > + AQ_CFG_SKB_FRAGS_MAX) { + aq_nic_ndev_queue_start(self->aq_nic, + ring[AQ_VEC_TX_ID].idx); + } + was_tx_cleaned = true; + } + + err = self->aq_hw_ops->hw_ring_rx_receive(self->aq_hw, + &ring[AQ_VEC_RX_ID]); + if (err < 0) + goto err_exit; + + if (ring[AQ_VEC_RX_ID].sw_head != + ring[AQ_VEC_RX_ID].hw_head) { + err = aq_ring_rx_clean(&ring[AQ_VEC_RX_ID], + &work_done, + budget - work_done); + if (err < 0) + goto err_exit; + + sw_tail_old = ring[AQ_VEC_RX_ID].sw_tail; + + err = aq_ring_rx_fill(&ring[AQ_VEC_RX_ID]); + if (err < 0) + goto err_exit; + + err = self->aq_hw_ops->hw_ring_rx_fill( + self->aq_hw, + &ring[AQ_VEC_RX_ID], sw_tail_old); + if (err < 0) + goto err_exit; + } + } + + if (was_tx_cleaned) + work_done = budget; + + if (work_done < budget) { + napi_complete_done(napi, work_done); + self->aq_hw_ops->hw_irq_enable(self->aq_hw, + 1U << self->aq_ring_param.vec_idx); + } + +err_exit: + spin_unlock(&self->header.lock); + } + + return work_done; +} + +struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx, + struct aq_nic_cfg_s *aq_nic_cfg) +{ + struct aq_vec_s *self = NULL; + struct aq_ring_s *ring = NULL; + unsigned int i = 0U; + int err = 0; + + self = kzalloc(sizeof(*self), GFP_KERNEL); + if (!self) { + err = -ENOMEM; + goto err_exit; + } + + self->aq_nic = aq_nic; + self->aq_ring_param.vec_idx = idx; + self->aq_ring_param.cpu = + idx + aq_nic_cfg->aq_rss.base_cpu_number; + + cpumask_set_cpu(self->aq_ring_param.cpu, + &self->aq_ring_param.affinity_mask); + + self->tx_rings = 0; + self->rx_rings = 0; + + netif_napi_add(aq_nic_get_ndev(aq_nic), &self->napi, + aq_vec_poll, AQ_CFG_NAPI_WEIGHT); + + for (i = 0; i < aq_nic_cfg->tcs; ++i) { + unsigned int idx_ring = AQ_NIC_TCVEC2RING(self->nic, + self->tx_rings, + self->aq_ring_param.vec_idx); + + ring = aq_ring_tx_alloc(&self->ring[i][AQ_VEC_TX_ID], aq_nic, + idx_ring, aq_nic_cfg); + if (!ring) { + err = -ENOMEM; + goto err_exit; + } + + ++self->tx_rings; + + aq_nic_set_tx_ring(aq_nic, idx_ring, ring); + + ring = aq_ring_rx_alloc(&self->ring[i][AQ_VEC_RX_ID], aq_nic, + idx_ring, aq_nic_cfg); + if (!ring) { + err = -ENOMEM; + goto err_exit; + } + + ++self->rx_rings; + } + +err_exit: + if (err < 0) { + aq_vec_free(self); + self = NULL; + } + return self; +} + +int aq_vec_init(struct aq_vec_s *self, struct aq_hw_ops *aq_hw_ops, + struct aq_hw_s *aq_hw) +{ + struct aq_ring_s *ring = NULL; + unsigned int i = 0U; + int err = 0; + + self->aq_hw_ops = aq_hw_ops; + self->aq_hw = aq_hw; + + spin_lock_init(&self->header.lock); + + for (i = 0U, ring = self->ring[0]; + self->tx_rings > i; ++i, ring = self->ring[i]) { + err = aq_ring_init(&ring[AQ_VEC_TX_ID]); + if (err < 0) + goto err_exit; + + err = self->aq_hw_ops->hw_ring_tx_init(self->aq_hw, + &ring[AQ_VEC_TX_ID], + &self->aq_ring_param); + if (err < 0) + goto err_exit; + + err = aq_ring_init(&ring[AQ_VEC_RX_ID]); + if (err < 0) + goto err_exit; + + err = self->aq_hw_ops->hw_ring_rx_init(self->aq_hw, + &ring[AQ_VEC_RX_ID], + &self->aq_ring_param); + if (err < 0) + goto err_exit; + + err = aq_ring_rx_fill(&ring[AQ_VEC_RX_ID]); + if (err < 0) + goto err_exit; + + err = self->aq_hw_ops->hw_ring_rx_fill(self->aq_hw, + &ring[AQ_VEC_RX_ID], 0U); + if (err < 0) + goto err_exit; + } + +err_exit: + return err; +} + +int aq_vec_start(struct aq_vec_s *self) +{ + struct aq_ring_s *ring = NULL; + unsigned int i = 0U; + int err = 0; + + for (i = 0U, ring = self->ring[0]; + self->tx_rings > i; ++i, ring = self->ring[i]) { + err = self->aq_hw_ops->hw_ring_tx_start(self->aq_hw, + &ring[AQ_VEC_TX_ID]); + if (err < 0) + goto err_exit; + + err = self->aq_hw_ops->hw_ring_rx_start(self->aq_hw, + &ring[AQ_VEC_RX_ID]); + if (err < 0) + goto err_exit; + } + + napi_enable(&self->napi); + +err_exit: + return err; +} + +void aq_vec_stop(struct aq_vec_s *self) +{ + struct aq_ring_s *ring = NULL; + unsigned int i = 0U; + + for (i = 0U, ring = self->ring[0]; + self->tx_rings > i; ++i, ring = self->ring[i]) { + self->aq_hw_ops->hw_ring_tx_stop(self->aq_hw, + &ring[AQ_VEC_TX_ID]); + + self->aq_hw_ops->hw_ring_rx_stop(self->aq_hw, + &ring[AQ_VEC_RX_ID]); + } + + napi_disable(&self->napi); +} + +void aq_vec_deinit(struct aq_vec_s *self) +{ + struct aq_ring_s *ring = NULL; + unsigned int i = 0U; + + if (!self) + goto err_exit; + + for (i = 0U, ring = self->ring[0]; + self->tx_rings > i; ++i, ring = self->ring[i]) { + aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]); + aq_ring_rx_deinit(&ring[AQ_VEC_RX_ID]); + } +err_exit:; +} + +void aq_vec_free(struct aq_vec_s *self) +{ + struct aq_ring_s *ring = NULL; + unsigned int i = 0U; + + if (!self) + goto err_exit; + + for (i = 0U, ring = self->ring[0]; + self->tx_rings > i; ++i, ring = self->ring[i]) { + aq_ring_free(&ring[AQ_VEC_TX_ID]); + aq_ring_free(&ring[AQ_VEC_RX_ID]); + } + + netif_napi_del(&self->napi); + + kfree(self); + +err_exit:; +} + +irqreturn_t aq_vec_isr(int irq, void *private) +{ + struct aq_vec_s *self = private; + int err = 0; + + if (!self) { + err = -EINVAL; + goto err_exit; + } + napi_schedule(&self->napi); + +err_exit: + return err >= 0 ? IRQ_HANDLED : IRQ_NONE; +} + +irqreturn_t aq_vec_isr_legacy(int irq, void *private) +{ + struct aq_vec_s *self = private; + u64 irq_mask = 0U; + irqreturn_t err = 0; + + if (!self) { + err = -EINVAL; + goto err_exit; + } + err = self->aq_hw_ops->hw_irq_read(self->aq_hw, &irq_mask); + if (err < 0) + goto err_exit; + + if (irq_mask) { + self->aq_hw_ops->hw_irq_disable(self->aq_hw, + 1U << self->aq_ring_param.vec_idx); + napi_schedule(&self->napi); + } else { + self->aq_hw_ops->hw_irq_enable(self->aq_hw, 1U); + err = IRQ_NONE; + } + +err_exit: + return err >= 0 ? IRQ_HANDLED : IRQ_NONE; +} + +cpumask_t *aq_vec_get_affinity_mask(struct aq_vec_s *self) +{ + return &self->aq_ring_param.affinity_mask; +} + +void aq_vec_add_stats(struct aq_vec_s *self, + struct aq_ring_stats_rx_s *stats_rx, + struct aq_ring_stats_tx_s *stats_tx) +{ + struct aq_ring_s *ring = NULL; + unsigned int r = 0U; + + for (r = 0U, ring = self->ring[0]; + self->tx_rings > r; ++r, ring = self->ring[r]) { + struct aq_ring_stats_tx_s *tx = &ring[AQ_VEC_TX_ID].stats.tx; + struct aq_ring_stats_rx_s *rx = &ring[AQ_VEC_RX_ID].stats.rx; + + stats_rx->packets += rx->packets; + stats_rx->bytes += rx->bytes; + stats_rx->errors += rx->errors; + stats_rx->jumbo_packets += rx->jumbo_packets; + stats_rx->lro_packets += rx->lro_packets; + + stats_tx->packets += tx->packets; + stats_tx->bytes += tx->bytes; + stats_tx->errors += tx->errors; + } +} + +int aq_vec_get_sw_stats(struct aq_vec_s *self, u64 *data, unsigned int *p_count) +{ + unsigned int count = 0U; + struct aq_ring_stats_rx_s stats_rx; + struct aq_ring_stats_tx_s stats_tx; + + memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s)); + memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s)); + aq_vec_add_stats(self, &stats_rx, &stats_tx); + + data[count] += stats_rx.packets; + data[++count] += stats_tx.packets; + data[++count] += stats_rx.jumbo_packets; + data[++count] += stats_rx.lro_packets; + data[++count] += stats_rx.errors; + + if (p_count) + *p_count = ++count; + + return 0; +} diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.h b/drivers/net/ethernet/aquantia/atlantic/aq_vec.h new file mode 100644 index 000000000000..6c68b184236c --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.h @@ -0,0 +1,42 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_vec.h: Definition of common structures for vector of Rx and Tx rings. + * Declaration of functions for Rx and Tx rings. + */ + +#ifndef AQ_VEC_H +#define AQ_VEC_H + +#include "aq_common.h" +#include <linux/irqreturn.h> + +struct aq_hw_s; +struct aq_hw_ops; +struct aq_ring_stats_rx_s; +struct aq_ring_stats_tx_s; + +irqreturn_t aq_vec_isr(int irq, void *private); +irqreturn_t aq_vec_isr_legacy(int irq, void *private); +struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx, + struct aq_nic_cfg_s *aq_nic_cfg); +int aq_vec_init(struct aq_vec_s *self, struct aq_hw_ops *aq_hw_ops, + struct aq_hw_s *aq_hw); +void aq_vec_deinit(struct aq_vec_s *self); +void aq_vec_free(struct aq_vec_s *self); +int aq_vec_start(struct aq_vec_s *self); +void aq_vec_stop(struct aq_vec_s *self); +cpumask_t *aq_vec_get_affinity_mask(struct aq_vec_s *self); +int aq_vec_get_sw_stats(struct aq_vec_s *self, u64 *data, + unsigned int *p_count); +void aq_vec_add_stats(struct aq_vec_s *self, + struct aq_ring_stats_rx_s *stats_rx, + struct aq_ring_stats_tx_s *stats_tx); + +#endif /* AQ_VEC_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c new file mode 100644 index 000000000000..a2b746a2dd50 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c @@ -0,0 +1,905 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File hw_atl_a0.c: Definition of Atlantic hardware specific functions. */ + +#include "../aq_hw.h" +#include "../aq_hw_utils.h" +#include "../aq_ring.h" +#include "hw_atl_a0.h" +#include "hw_atl_utils.h" +#include "hw_atl_llh.h" +#include "hw_atl_a0_internal.h" + +static int hw_atl_a0_get_hw_caps(struct aq_hw_s *self, + struct aq_hw_caps_s *aq_hw_caps) +{ + memcpy(aq_hw_caps, &hw_atl_a0_hw_caps_, sizeof(*aq_hw_caps)); + return 0; +} + +static struct aq_hw_s *hw_atl_a0_create(struct aq_pci_func_s *aq_pci_func, + unsigned int port, + struct aq_hw_ops *ops) +{ + struct hw_atl_s *self = NULL; + + self = kzalloc(sizeof(*self), GFP_KERNEL); + if (!self) + goto err_exit; + + self->base.aq_pci_func = aq_pci_func; + + self->base.not_ff_addr = 0x10U; + +err_exit: + return (struct aq_hw_s *)self; +} + +static void hw_atl_a0_destroy(struct aq_hw_s *self) +{ + kfree(self); +} + +static int hw_atl_a0_hw_reset(struct aq_hw_s *self) +{ + int err = 0; + + glb_glb_reg_res_dis_set(self, 1U); + pci_pci_reg_res_dis_set(self, 0U); + rx_rx_reg_res_dis_set(self, 0U); + tx_tx_reg_res_dis_set(self, 0U); + + HW_ATL_FLUSH(); + glb_soft_res_set(self, 1); + + /* check 10 times by 1ms */ + AQ_HW_WAIT_FOR(glb_soft_res_get(self) == 0, 1000U, 10U); + if (err < 0) + goto err_exit; + + itr_irq_reg_res_dis_set(self, 0U); + itr_res_irq_set(self, 1U); + + /* check 10 times by 1ms */ + AQ_HW_WAIT_FOR(itr_res_irq_get(self) == 0, 1000U, 10U); + if (err < 0) + goto err_exit; + + hw_atl_utils_mpi_set(self, MPI_RESET, 0x0U); + + err = aq_hw_err_from_flags(self); + +err_exit: + return err; +} + +static int hw_atl_a0_hw_qos_set(struct aq_hw_s *self) +{ + u32 tc = 0U; + u32 buff_size = 0U; + unsigned int i_priority = 0U; + bool is_rx_flow_control = false; + + /* TPS Descriptor rate init */ + tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U); + tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA); + + /* TPS VM init */ + tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U); + + /* TPS TC credits init */ + tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U); + tps_tx_pkt_shed_data_arb_mode_set(self, 0U); + + tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U); + tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U); + tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U); + tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U); + + /* Tx buf size */ + buff_size = HW_ATL_A0_TXBUF_MAX; + + tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc); + tpb_tx_buff_hi_threshold_per_tc_set(self, + (buff_size * (1024 / 32U) * 66U) / + 100U, tc); + tpb_tx_buff_lo_threshold_per_tc_set(self, + (buff_size * (1024 / 32U) * 50U) / + 100U, tc); + + /* QoS Rx buf size per TC */ + tc = 0; + is_rx_flow_control = (AQ_NIC_FC_RX & self->aq_nic_cfg->flow_control); + buff_size = HW_ATL_A0_RXBUF_MAX; + + rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc); + rpb_rx_buff_hi_threshold_per_tc_set(self, + (buff_size * + (1024U / 32U) * 66U) / + 100U, tc); + rpb_rx_buff_lo_threshold_per_tc_set(self, + (buff_size * + (1024U / 32U) * 50U) / + 100U, tc); + rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc); + + /* QoS 802.1p priority -> TC mapping */ + for (i_priority = 8U; i_priority--;) + rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U); + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_rss_hash_set(struct aq_hw_s *self, + struct aq_rss_parameters *rss_params) +{ + struct aq_nic_cfg_s *cfg = NULL; + int err = 0; + unsigned int i = 0U; + unsigned int addr = 0U; + + cfg = self->aq_nic_cfg; + + for (i = 10, addr = 0U; i--; ++addr) { + u32 key_data = cfg->is_rss ? + __swab32(rss_params->hash_secret_key[i]) : 0U; + rpf_rss_key_wr_data_set(self, key_data); + rpf_rss_key_addr_set(self, addr); + rpf_rss_key_wr_en_set(self, 1U); + AQ_HW_WAIT_FOR(rpf_rss_key_wr_en_get(self) == 0, 1000U, 10U); + if (err < 0) + goto err_exit; + } + + err = aq_hw_err_from_flags(self); + +err_exit: + return err; +} + +static int hw_atl_a0_hw_rss_set(struct aq_hw_s *self, + struct aq_rss_parameters *rss_params) +{ + u8 *indirection_table = rss_params->indirection_table; + u32 i = 0U; + u32 num_rss_queues = max(1U, self->aq_nic_cfg->num_rss_queues); + int err = 0; + u16 bitary[(HW_ATL_A0_RSS_REDIRECTION_MAX * + HW_ATL_A0_RSS_REDIRECTION_BITS / 16U)]; + + memset(bitary, 0, sizeof(bitary)); + + for (i = HW_ATL_A0_RSS_REDIRECTION_MAX; i--; ) { + (*(u32 *)(bitary + ((i * 3U) / 16U))) |= + ((indirection_table[i] % num_rss_queues) << + ((i * 3U) & 0xFU)); + } + + for (i = AQ_DIMOF(bitary); i--;) { + rpf_rss_redir_tbl_wr_data_set(self, bitary[i]); + rpf_rss_redir_tbl_addr_set(self, i); + rpf_rss_redir_wr_en_set(self, 1U); + AQ_HW_WAIT_FOR(rpf_rss_redir_wr_en_get(self) == 0, 1000U, 10U); + if (err < 0) + goto err_exit; + } + + err = aq_hw_err_from_flags(self); + +err_exit: + return err; +} + +static int hw_atl_a0_hw_offload_set(struct aq_hw_s *self, + struct aq_nic_cfg_s *aq_nic_cfg) +{ + int err = 0; + + /* TX checksums offloads*/ + tpo_ipv4header_crc_offload_en_set(self, 1); + tpo_tcp_udp_crc_offload_en_set(self, 1); + if (err < 0) + goto err_exit; + + /* RX checksums offloads*/ + rpo_ipv4header_crc_offload_en_set(self, 1); + rpo_tcp_udp_crc_offload_en_set(self, 1); + if (err < 0) + goto err_exit; + + /* LSO offloads*/ + tdm_large_send_offload_en_set(self, 0xFFFFFFFFU); + if (err < 0) + goto err_exit; + + err = aq_hw_err_from_flags(self); + +err_exit: + return err; +} + +static int hw_atl_a0_hw_init_tx_path(struct aq_hw_s *self) +{ + thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U); + thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U); + thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU); + + /* Tx interrupts */ + tdm_tx_desc_wr_wb_irq_en_set(self, 1U); + + /* misc */ + aq_hw_write_reg(self, 0x00007040U, IS_CHIP_FEATURE(TPO2) ? + 0x00010000U : 0x00000000U); + tdm_tx_dca_en_set(self, 0U); + tdm_tx_dca_mode_set(self, 0U); + + tpb_tx_path_scp_ins_en_set(self, 1U); + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_init_rx_path(struct aq_hw_s *self) +{ + struct aq_nic_cfg_s *cfg = self->aq_nic_cfg; + int i; + + /* Rx TC/RSS number config */ + rpb_rpf_rx_traf_class_mode_set(self, 1U); + + /* Rx flow control */ + rpb_rx_flow_ctl_mode_set(self, 1U); + + /* RSS Ring selection */ + reg_rx_flr_rss_control1set(self, cfg->is_rss ? + 0xB3333333U : 0x00000000U); + + /* Multicast filters */ + for (i = HW_ATL_A0_MAC_MAX; i--;) { + rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i); + rpfl2unicast_flr_act_set(self, 1U, i); + } + + reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U); + reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U); + + /* Vlan filters */ + rpf_vlan_outer_etht_set(self, 0x88A8U); + rpf_vlan_inner_etht_set(self, 0x8100U); + rpf_vlan_prom_mode_en_set(self, 1); + + /* Rx Interrupts */ + rdm_rx_desc_wr_wb_irq_en_set(self, 1U); + + /* misc */ + rpfl2broadcast_flr_act_set(self, 1U); + rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U)); + + rdm_rx_dca_en_set(self, 0U); + rdm_rx_dca_mode_set(self, 0U); + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr) +{ + int err = 0; + unsigned int h = 0U; + unsigned int l = 0U; + + if (!mac_addr) { + err = -EINVAL; + goto err_exit; + } + h = (mac_addr[0] << 8) | (mac_addr[1]); + l = (mac_addr[2] << 24) | (mac_addr[3] << 16) | + (mac_addr[4] << 8) | mac_addr[5]; + + rpfl2_uc_flr_en_set(self, 0U, HW_ATL_A0_MAC); + rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_A0_MAC); + rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_A0_MAC); + rpfl2_uc_flr_en_set(self, 1U, HW_ATL_A0_MAC); + + err = aq_hw_err_from_flags(self); + +err_exit: + return err; +} + +static int hw_atl_a0_hw_init(struct aq_hw_s *self, + struct aq_nic_cfg_s *aq_nic_cfg, + u8 *mac_addr) +{ + static u32 aq_hw_atl_igcr_table_[4][2] = { + { 0x20000000U, 0x20000000U }, /* AQ_IRQ_INVALID */ + { 0x20000080U, 0x20000080U }, /* AQ_IRQ_LEGACY */ + { 0x20000021U, 0x20000025U }, /* AQ_IRQ_MSI */ + { 0x20000022U, 0x20000026U } /* AQ_IRQ_MSIX */ + }; + + int err = 0; + + self->aq_nic_cfg = aq_nic_cfg; + + hw_atl_utils_hw_chip_features_init(self, + &PHAL_ATLANTIC_A0->chip_features); + + hw_atl_a0_hw_init_tx_path(self); + hw_atl_a0_hw_init_rx_path(self); + + hw_atl_a0_hw_mac_addr_set(self, mac_addr); + + hw_atl_utils_mpi_set(self, MPI_INIT, aq_nic_cfg->link_speed_msk); + + reg_tx_dma_debug_ctl_set(self, 0x800000b8U); + reg_tx_dma_debug_ctl_set(self, 0x000000b8U); + + hw_atl_a0_hw_qos_set(self); + hw_atl_a0_hw_rss_set(self, &aq_nic_cfg->aq_rss); + hw_atl_a0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss); + + err = aq_hw_err_from_flags(self); + if (err < 0) + goto err_exit; + + /* Interrupts */ + reg_irq_glb_ctl_set(self, + aq_hw_atl_igcr_table_[aq_nic_cfg->irq_type] + [(aq_nic_cfg->vecs > 1U) ? + 1 : 0]); + + itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask); + + /* Interrupts */ + reg_gen_irq_map_set(self, + ((HW_ATL_A0_ERR_INT << 0x18) | (1U << 0x1F)) | + ((HW_ATL_A0_ERR_INT << 0x10) | (1U << 0x17)) | + ((HW_ATL_A0_ERR_INT << 8) | (1U << 0xF)) | + ((HW_ATL_A0_ERR_INT) | (1U << 0x7)), 0U); + + hw_atl_a0_hw_offload_set(self, aq_nic_cfg); + +err_exit: + return err; +} + +static int hw_atl_a0_hw_ring_tx_start(struct aq_hw_s *self, + struct aq_ring_s *ring) +{ + tdm_tx_desc_en_set(self, 1, ring->idx); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_ring_rx_start(struct aq_hw_s *self, + struct aq_ring_s *ring) +{ + rdm_rx_desc_en_set(self, 1, ring->idx); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_start(struct aq_hw_s *self) +{ + tpb_tx_buff_en_set(self, 1); + rpb_rx_buff_en_set(self, 1); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_tx_ring_tail_update(struct aq_hw_s *self, + struct aq_ring_s *ring) +{ + reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx); + return 0; +} + +static int hw_atl_a0_hw_ring_tx_xmit(struct aq_hw_s *self, + struct aq_ring_s *ring, + unsigned int frags) +{ + struct aq_ring_buff_s *buff = NULL; + struct hw_atl_txd_s *txd = NULL; + unsigned int buff_pa_len = 0U; + unsigned int pkt_len = 0U; + unsigned int frag_count = 0U; + bool is_gso = false; + + buff = &ring->buff_ring[ring->sw_tail]; + pkt_len = (buff->is_eop && buff->is_sop) ? buff->len : buff->len_pkt; + + for (frag_count = 0; frag_count < frags; frag_count++) { + txd = (struct hw_atl_txd_s *)&ring->dx_ring[ring->sw_tail * + HW_ATL_A0_TXD_SIZE]; + txd->ctl = 0; + txd->ctl2 = 0; + txd->buf_addr = 0; + + buff = &ring->buff_ring[ring->sw_tail]; + + if (buff->is_txc) { + txd->ctl |= (buff->len_l3 << 31) | + (buff->len_l2 << 24) | + HW_ATL_A0_TXD_CTL_CMD_TCP | + HW_ATL_A0_TXD_CTL_DESC_TYPE_TXC; + txd->ctl2 |= (buff->mss << 16) | + (buff->len_l4 << 8) | + (buff->len_l3 >> 1); + + pkt_len -= (buff->len_l4 + + buff->len_l3 + + buff->len_l2); + is_gso = true; + } else { + buff_pa_len = buff->len; + + txd->buf_addr = buff->pa; + txd->ctl |= (HW_ATL_A0_TXD_CTL_BLEN & + ((u32)buff_pa_len << 4)); + txd->ctl |= HW_ATL_A0_TXD_CTL_DESC_TYPE_TXD; + /* PAY_LEN */ + txd->ctl2 |= HW_ATL_A0_TXD_CTL2_LEN & (pkt_len << 14); + + if (is_gso) { + txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_LSO; + txd->ctl2 |= HW_ATL_A0_TXD_CTL2_CTX_EN; + } + + /* Tx checksum offloads */ + if (buff->is_ip_cso) + txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_IPCSO; + + if (buff->is_udp_cso || buff->is_tcp_cso) + txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_TUCSO; + + if (unlikely(buff->is_eop)) { + txd->ctl |= HW_ATL_A0_TXD_CTL_EOP; + txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_WB; + } + } + + ring->sw_tail = aq_ring_next_dx(ring, ring->sw_tail); + } + + hw_atl_a0_hw_tx_ring_tail_update(self, ring); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_ring_rx_init(struct aq_hw_s *self, + struct aq_ring_s *aq_ring, + struct aq_ring_param_s *aq_ring_param) +{ + u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa; + u32 dma_desc_addr_msw = (u32)(((u64)aq_ring->dx_ring_pa) >> 32); + + rdm_rx_desc_en_set(self, false, aq_ring->idx); + + rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx); + + reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw, + aq_ring->idx); + + reg_rx_dma_desc_base_addressmswset(self, + dma_desc_addr_msw, aq_ring->idx); + + rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx); + + rdm_rx_desc_data_buff_size_set(self, + AQ_CFG_RX_FRAME_MAX / 1024U, + aq_ring->idx); + + rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx); + rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx); + rpo_rx_desc_vlan_stripping_set(self, 0U, aq_ring->idx); + + /* Rx ring set mode */ + + /* Mapping interrupt vector */ + itr_irq_map_rx_set(self, aq_ring_param->vec_idx, aq_ring->idx); + itr_irq_map_en_rx_set(self, true, aq_ring->idx); + + rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx); + rdm_rx_desc_dca_en_set(self, 0U, aq_ring->idx); + rdm_rx_head_dca_en_set(self, 0U, aq_ring->idx); + rdm_rx_pld_dca_en_set(self, 0U, aq_ring->idx); + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_ring_tx_init(struct aq_hw_s *self, + struct aq_ring_s *aq_ring, + struct aq_ring_param_s *aq_ring_param) +{ + u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa; + u32 dma_desc_msw_addr = (u32)(((u64)aq_ring->dx_ring_pa) >> 32); + + reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr, + aq_ring->idx); + + reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr, + aq_ring->idx); + + tdm_tx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx); + + hw_atl_a0_hw_tx_ring_tail_update(self, aq_ring); + + /* Set Tx threshold */ + tdm_tx_desc_wr_wb_threshold_set(self, 0U, aq_ring->idx); + + /* Mapping interrupt vector */ + itr_irq_map_tx_set(self, aq_ring_param->vec_idx, aq_ring->idx); + itr_irq_map_en_tx_set(self, true, aq_ring->idx); + + tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx); + tdm_tx_desc_dca_en_set(self, 0U, aq_ring->idx); + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_ring_rx_fill(struct aq_hw_s *self, + struct aq_ring_s *ring, + unsigned int sw_tail_old) +{ + for (; sw_tail_old != ring->sw_tail; + sw_tail_old = aq_ring_next_dx(ring, sw_tail_old)) { + struct hw_atl_rxd_s *rxd = + (struct hw_atl_rxd_s *)&ring->dx_ring[sw_tail_old * + HW_ATL_A0_RXD_SIZE]; + + struct aq_ring_buff_s *buff = &ring->buff_ring[sw_tail_old]; + + rxd->buf_addr = buff->pa; + rxd->hdr_addr = 0U; + } + + reg_rx_dma_desc_tail_ptr_set(self, sw_tail_old, ring->idx); + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_ring_tx_head_update(struct aq_hw_s *self, + struct aq_ring_s *ring) +{ + int err = 0; + unsigned int hw_head_ = tdm_tx_desc_head_ptr_get(self, ring->idx); + + if (aq_utils_obj_test(&self->header.flags, AQ_HW_FLAG_ERR_UNPLUG)) { + err = -ENXIO; + goto err_exit; + } + ring->hw_head = hw_head_; + err = aq_hw_err_from_flags(self); + +err_exit: + return err; +} + +static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s *self, + struct aq_ring_s *ring) +{ + struct device *ndev = aq_nic_get_dev(ring->aq_nic); + + for (; ring->hw_head != ring->sw_tail; + ring->hw_head = aq_ring_next_dx(ring, ring->hw_head)) { + struct aq_ring_buff_s *buff = NULL; + struct hw_atl_rxd_wb_s *rxd_wb = (struct hw_atl_rxd_wb_s *) + &ring->dx_ring[ring->hw_head * HW_ATL_A0_RXD_SIZE]; + + unsigned int is_err = 1U; + unsigned int is_rx_check_sum_enabled = 0U; + unsigned int pkt_type = 0U; + + if (!(rxd_wb->status & 0x5U)) { /* RxD is not done */ + if ((1U << 4) & + reg_rx_dma_desc_status_get(self, ring->idx)) { + rdm_rx_desc_en_set(self, false, ring->idx); + rdm_rx_desc_res_set(self, true, ring->idx); + rdm_rx_desc_res_set(self, false, ring->idx); + rdm_rx_desc_en_set(self, true, ring->idx); + } + + if (ring->hw_head || + (rdm_rx_desc_head_ptr_get(self, ring->idx) < 2U)) { + break; + } else if (!(rxd_wb->status & 0x1U)) { + struct hw_atl_rxd_wb_s *rxd_wb1 = + (struct hw_atl_rxd_wb_s *) + (&ring->dx_ring[(1U) * + HW_ATL_A0_RXD_SIZE]); + + if ((rxd_wb1->status & 0x1U)) { + rxd_wb->pkt_len = 1514U; + rxd_wb->status = 3U; + } else { + break; + } + } + } + + buff = &ring->buff_ring[ring->hw_head]; + + if (0x3U != (rxd_wb->status & 0x3U)) + rxd_wb->status |= 4; + + is_err = (0x0000001CU & rxd_wb->status); + is_rx_check_sum_enabled = (rxd_wb->type) & (0x3U << 19); + pkt_type = 0xFFU & (rxd_wb->type >> 4); + + if (is_rx_check_sum_enabled) { + if (0x0U == (pkt_type & 0x3U)) + buff->is_ip_cso = (is_err & 0x08U) ? 0 : 1; + + if (0x4U == (pkt_type & 0x1CU)) + buff->is_udp_cso = (is_err & 0x10U) ? 0 : 1; + else if (0x0U == (pkt_type & 0x1CU)) + buff->is_tcp_cso = (is_err & 0x10U) ? 0 : 1; + } + + is_err &= ~0x18U; + is_err &= ~0x04U; + + dma_unmap_page(ndev, buff->pa, buff->len, DMA_FROM_DEVICE); + + if (is_err || rxd_wb->type & 0x1000U) { + /* status error or DMA error */ + buff->is_error = 1U; + } else { + if (self->aq_nic_cfg->is_rss) { + /* last 4 byte */ + u16 rss_type = rxd_wb->type & 0xFU; + + if (rss_type && rss_type < 0x8U) { + buff->is_hash_l4 = (rss_type == 0x4 || + rss_type == 0x5); + buff->rss_hash = rxd_wb->rss_hash; + } + } + + if (HW_ATL_A0_RXD_WB_STAT2_EOP & rxd_wb->status) { + buff->len = rxd_wb->pkt_len % + AQ_CFG_RX_FRAME_MAX; + buff->len = buff->len ? + buff->len : AQ_CFG_RX_FRAME_MAX; + buff->next = 0U; + buff->is_eop = 1U; + } else { + /* jumbo */ + buff->next = aq_ring_next_dx(ring, + ring->hw_head); + ++ring->stats.rx.jumbo_packets; + } + } + } + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_irq_enable(struct aq_hw_s *self, u64 mask) +{ + itr_irq_msk_setlsw_set(self, LODWORD(mask) | + (1U << HW_ATL_A0_ERR_INT)); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_irq_disable(struct aq_hw_s *self, u64 mask) +{ + itr_irq_msk_clearlsw_set(self, LODWORD(mask)); + itr_irq_status_clearlsw_set(self, LODWORD(mask)); + + if ((1U << 16) & reg_gen_irq_status_get(self)) + + atomic_inc(&PHAL_ATLANTIC_A0->dpc); + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_irq_read(struct aq_hw_s *self, u64 *mask) +{ + *mask = itr_irq_statuslsw_get(self); + return aq_hw_err_from_flags(self); +} + +#define IS_FILTER_ENABLED(_F_) ((packet_filter & (_F_)) ? 1U : 0U) + +static int hw_atl_a0_hw_packet_filter_set(struct aq_hw_s *self, + unsigned int packet_filter) +{ + unsigned int i = 0U; + + rpfl2promiscuous_mode_en_set(self, IS_FILTER_ENABLED(IFF_PROMISC)); + rpfl2multicast_flr_en_set(self, IS_FILTER_ENABLED(IFF_MULTICAST), 0); + rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST)); + + self->aq_nic_cfg->is_mc_list_enabled = + IS_FILTER_ENABLED(IFF_MULTICAST); + + for (i = HW_ATL_A0_MAC_MIN; i < HW_ATL_A0_MAC_MAX; ++i) + rpfl2_uc_flr_en_set(self, + (self->aq_nic_cfg->is_mc_list_enabled && + (i <= self->aq_nic_cfg->mc_list_count)) ? + 1U : 0U, i); + + return aq_hw_err_from_flags(self); +} + +#undef IS_FILTER_ENABLED + +static int hw_atl_a0_hw_multicast_list_set(struct aq_hw_s *self, + u8 ar_mac + [AQ_CFG_MULTICAST_ADDRESS_MAX] + [ETH_ALEN], + u32 count) +{ + int err = 0; + + if (count > (HW_ATL_A0_MAC_MAX - HW_ATL_A0_MAC_MIN)) { + err = EBADRQC; + goto err_exit; + } + for (self->aq_nic_cfg->mc_list_count = 0U; + self->aq_nic_cfg->mc_list_count < count; + ++self->aq_nic_cfg->mc_list_count) { + u32 i = self->aq_nic_cfg->mc_list_count; + u32 h = (ar_mac[i][0] << 8) | (ar_mac[i][1]); + u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) | + (ar_mac[i][4] << 8) | ar_mac[i][5]; + + rpfl2_uc_flr_en_set(self, 0U, HW_ATL_A0_MAC_MIN + i); + + rpfl2unicast_dest_addresslsw_set(self, + l, HW_ATL_A0_MAC_MIN + i); + + rpfl2unicast_dest_addressmsw_set(self, + h, HW_ATL_A0_MAC_MIN + i); + + rpfl2_uc_flr_en_set(self, + (self->aq_nic_cfg->is_mc_list_enabled), + HW_ATL_A0_MAC_MIN + i); + } + + err = aq_hw_err_from_flags(self); + +err_exit: + return err; +} + +static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self, + bool itr_enabled) +{ + unsigned int i = 0U; + + if (itr_enabled && self->aq_nic_cfg->itr) { + if (self->aq_nic_cfg->itr != 0xFFFFU) { + u32 itr_ = (self->aq_nic_cfg->itr >> 1); + + itr_ = min(AQ_CFG_IRQ_MASK, itr_); + + PHAL_ATLANTIC_A0->itr_rx = 0x80000000U | + (itr_ << 0x10); + } else { + u32 n = 0xFFFFU & aq_hw_read_reg(self, 0x00002A00U); + + if (n < self->aq_link_status.mbps) { + PHAL_ATLANTIC_A0->itr_rx = 0U; + } else { + static unsigned int hw_timers_tbl_[] = { + 0x01CU, /* 10Gbit */ + 0x039U, /* 5Gbit */ + 0x039U, /* 5Gbit 5GS */ + 0x073U, /* 2.5Gbit */ + 0x120U, /* 1Gbit */ + 0x1FFU, /* 100Mbit */ + }; + + unsigned int speed_index = + hw_atl_utils_mbps_2_speed_index( + self->aq_link_status.mbps); + + PHAL_ATLANTIC_A0->itr_rx = + 0x80000000U | + (hw_timers_tbl_[speed_index] << 0x10U); + } + + aq_hw_write_reg(self, 0x00002A00U, 0x40000000U); + aq_hw_write_reg(self, 0x00002A00U, 0x8D000000U); + } + } else { + PHAL_ATLANTIC_A0->itr_rx = 0U; + } + + for (i = HW_ATL_A0_RINGS_MAX; i--;) + reg_irq_thr_set(self, PHAL_ATLANTIC_A0->itr_rx, i); + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_stop(struct aq_hw_s *self) +{ + hw_atl_a0_hw_irq_disable(self, HW_ATL_A0_INT_MASK); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_ring_tx_stop(struct aq_hw_s *self, + struct aq_ring_s *ring) +{ + tdm_tx_desc_en_set(self, 0U, ring->idx); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_ring_rx_stop(struct aq_hw_s *self, + struct aq_ring_s *ring) +{ + rdm_rx_desc_en_set(self, 0U, ring->idx); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_set_speed(struct aq_hw_s *self, u32 speed) +{ + int err = 0; + + err = hw_atl_utils_mpi_set_speed(self, speed, MPI_INIT); + if (err < 0) + goto err_exit; + +err_exit: + return err; +} + +static struct aq_hw_ops hw_atl_ops_ = { + .create = hw_atl_a0_create, + .destroy = hw_atl_a0_destroy, + .get_hw_caps = hw_atl_a0_get_hw_caps, + + .hw_get_mac_permanent = hw_atl_utils_get_mac_permanent, + .hw_set_mac_address = hw_atl_a0_hw_mac_addr_set, + .hw_get_link_status = hw_atl_utils_mpi_get_link_status, + .hw_set_link_speed = hw_atl_a0_hw_set_speed, + .hw_init = hw_atl_a0_hw_init, + .hw_deinit = hw_atl_utils_hw_deinit, + .hw_set_power = hw_atl_utils_hw_set_power, + .hw_reset = hw_atl_a0_hw_reset, + .hw_start = hw_atl_a0_hw_start, + .hw_ring_tx_start = hw_atl_a0_hw_ring_tx_start, + .hw_ring_tx_stop = hw_atl_a0_hw_ring_tx_stop, + .hw_ring_rx_start = hw_atl_a0_hw_ring_rx_start, + .hw_ring_rx_stop = hw_atl_a0_hw_ring_rx_stop, + .hw_stop = hw_atl_a0_hw_stop, + + .hw_ring_tx_xmit = hw_atl_a0_hw_ring_tx_xmit, + .hw_ring_tx_head_update = hw_atl_a0_hw_ring_tx_head_update, + + .hw_ring_rx_receive = hw_atl_a0_hw_ring_rx_receive, + .hw_ring_rx_fill = hw_atl_a0_hw_ring_rx_fill, + + .hw_irq_enable = hw_atl_a0_hw_irq_enable, + .hw_irq_disable = hw_atl_a0_hw_irq_disable, + .hw_irq_read = hw_atl_a0_hw_irq_read, + + .hw_ring_rx_init = hw_atl_a0_hw_ring_rx_init, + .hw_ring_tx_init = hw_atl_a0_hw_ring_tx_init, + .hw_packet_filter_set = hw_atl_a0_hw_packet_filter_set, + .hw_multicast_list_set = hw_atl_a0_hw_multicast_list_set, + .hw_interrupt_moderation_set = hw_atl_a0_hw_interrupt_moderation_set, + .hw_rss_set = hw_atl_a0_hw_rss_set, + .hw_rss_hash_set = hw_atl_a0_hw_rss_hash_set, + .hw_get_regs = hw_atl_utils_hw_get_regs, + .hw_get_hw_stats = hw_atl_utils_get_hw_stats, + .hw_get_fw_version = hw_atl_utils_get_fw_version, +}; + +struct aq_hw_ops *hw_atl_a0_get_ops_by_id(struct pci_dev *pdev) +{ + bool is_vid_ok = (pdev->vendor == PCI_VENDOR_ID_AQUANTIA); + bool is_did_ok = ((pdev->device == HW_ATL_DEVICE_ID_0001) || + (pdev->device == HW_ATL_DEVICE_ID_D100) || + (pdev->device == HW_ATL_DEVICE_ID_D107) || + (pdev->device == HW_ATL_DEVICE_ID_D108) || + (pdev->device == HW_ATL_DEVICE_ID_D109)); + + bool is_rev_ok = (pdev->revision == 1U); + + return (is_vid_ok && is_did_ok && is_rev_ok) ? &hw_atl_ops_ : NULL; +} diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.h new file mode 100644 index 000000000000..6e1d527954c9 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.h @@ -0,0 +1,34 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File hw_atl_a0.h: Declaration of abstract interface for Atlantic hardware + * specific functions. + */ + +#ifndef HW_ATL_A0_H +#define HW_ATL_A0_H + +#include "../aq_common.h" + +#ifndef PCI_VENDOR_ID_AQUANTIA + +#define PCI_VENDOR_ID_AQUANTIA 0x1D6A +#define HW_ATL_DEVICE_ID_0001 0x0001 +#define HW_ATL_DEVICE_ID_D100 0xD100 +#define HW_ATL_DEVICE_ID_D107 0xD107 +#define HW_ATL_DEVICE_ID_D108 0xD108 +#define HW_ATL_DEVICE_ID_D109 0xD109 + +#define HW_ATL_NIC_NAME "aQuantia AQtion 5Gbit Network Adapter" + +#endif + +struct aq_hw_ops *hw_atl_a0_get_ops_by_id(struct pci_dev *pdev); + +#endif /* HW_ATL_A0_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h new file mode 100644 index 000000000000..1093ea18823a --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h @@ -0,0 +1,155 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File hw_atl_a0_internal.h: Definition of Atlantic A0 chip specific + * constants. + */ + +#ifndef HW_ATL_A0_INTERNAL_H +#define HW_ATL_A0_INTERNAL_H + +#include "../aq_common.h" + +#define HW_ATL_A0_MTU_JUMBO 9014U + +#define HW_ATL_A0_TX_RINGS 4U +#define HW_ATL_A0_RX_RINGS 4U + +#define HW_ATL_A0_RINGS_MAX 32U +#define HW_ATL_A0_TXD_SIZE 16U +#define HW_ATL_A0_RXD_SIZE 16U + +#define HW_ATL_A0_MAC 0U +#define HW_ATL_A0_MAC_MIN 1U +#define HW_ATL_A0_MAC_MAX 33U + +/* interrupts */ +#define HW_ATL_A0_ERR_INT 8U +#define HW_ATL_A0_INT_MASK 0xFFFFFFFFU + +#define HW_ATL_A0_TXD_CTL2_LEN 0xFFFFC000U +#define HW_ATL_A0_TXD_CTL2_CTX_EN 0x00002000U +#define HW_ATL_A0_TXD_CTL2_CTX_IDX 0x00001000U + +#define HW_ATL_A0_TXD_CTL_DESC_TYPE_TXD 0x00000001U +#define HW_ATL_A0_TXD_CTL_DESC_TYPE_TXC 0x00000002U +#define HW_ATL_A0_TXD_CTL_BLEN 0x000FFFF0U +#define HW_ATL_A0_TXD_CTL_DD 0x00100000U +#define HW_ATL_A0_TXD_CTL_EOP 0x00200000U + +#define HW_ATL_A0_TXD_CTL_CMD_X 0x3FC00000U + +#define HW_ATL_A0_TXD_CTL_CMD_VLAN BIT(22) +#define HW_ATL_A0_TXD_CTL_CMD_FCS BIT(23) +#define HW_ATL_A0_TXD_CTL_CMD_IPCSO BIT(24) +#define HW_ATL_A0_TXD_CTL_CMD_TUCSO BIT(25) +#define HW_ATL_A0_TXD_CTL_CMD_LSO BIT(26) +#define HW_ATL_A0_TXD_CTL_CMD_WB BIT(27) +#define HW_ATL_A0_TXD_CTL_CMD_VXLAN BIT(28) + +#define HW_ATL_A0_TXD_CTL_CMD_IPV6 BIT(21) +#define HW_ATL_A0_TXD_CTL_CMD_TCP BIT(22) + +#define HW_ATL_A0_MPI_CONTROL_ADR 0x0368U +#define HW_ATL_A0_MPI_STATE_ADR 0x036CU + +#define HW_ATL_A0_MPI_SPEED_MSK 0xFFFFU +#define HW_ATL_A0_MPI_SPEED_SHIFT 16U + +#define HW_ATL_A0_RATE_10G BIT(0) +#define HW_ATL_A0_RATE_5G BIT(1) +#define HW_ATL_A0_RATE_2G5 BIT(3) +#define HW_ATL_A0_RATE_1G BIT(4) +#define HW_ATL_A0_RATE_100M BIT(5) + +#define HW_ATL_A0_TXBUF_MAX 160U +#define HW_ATL_A0_RXBUF_MAX 320U + +#define HW_ATL_A0_RSS_REDIRECTION_MAX 64U +#define HW_ATL_A0_RSS_REDIRECTION_BITS 3U + +#define HW_ATL_A0_TC_MAX 1U +#define HW_ATL_A0_RSS_MAX 8U + +#define HW_ATL_A0_FW_SEMA_RAM 0x2U + +#define HW_ATL_A0_RXD_DD 0x1U +#define HW_ATL_A0_RXD_NCEA0 0x1U + +#define HW_ATL_A0_RXD_WB_STAT2_EOP 0x0002U + +#define HW_ATL_A0_UCP_0X370_REG 0x370U + +#define HW_ATL_A0_FW_VER_EXPECTED 0x01050006U + +/* Hardware tx descriptor */ +struct __packed hw_atl_txd_s { + u64 buf_addr; + u32 ctl; + u32 ctl2; /* 63..46 - payload length, 45 - ctx enable, 44 - ctx index */ +}; + +/* Hardware tx context descriptor */ +struct __packed hw_atl_txc_s { + u32 rsvd; + u32 len; + u32 ctl; + u32 len2; +}; + +/* Hardware rx descriptor */ +struct __packed hw_atl_rxd_s { + u64 buf_addr; + u64 hdr_addr; +}; + +/* Hardware rx descriptor writeback */ +struct __packed hw_atl_rxd_wb_s { + u32 type; + u32 rss_hash; + u16 status; + u16 pkt_len; + u16 next_desc_ptr; + u16 vlan; +}; + +/* HW layer capabilities */ +static struct aq_hw_caps_s hw_atl_a0_hw_caps_ = { + .ports = 1U, + .is_64_dma = true, + .msix_irqs = 4U, + .irq_mask = ~0U, + .vecs = HW_ATL_A0_RSS_MAX, + .tcs = HW_ATL_A0_TC_MAX, + .rxd_alignment = 1U, + .rxd_size = HW_ATL_A0_RXD_SIZE, + .rxds = 248U, + .txd_alignment = 1U, + .txd_size = HW_ATL_A0_TXD_SIZE, + .txds = 8U * 1024U, + .txhwb_alignment = 4096U, + .tx_rings = HW_ATL_A0_TX_RINGS, + .rx_rings = HW_ATL_A0_RX_RINGS, + .hw_features = NETIF_F_HW_CSUM | + NETIF_F_RXHASH | + NETIF_F_SG | + NETIF_F_TSO, + .hw_priv_flags = IFF_UNICAST_FLT, + .link_speed_msk = (HW_ATL_A0_RATE_10G | + HW_ATL_A0_RATE_5G | + HW_ATL_A0_RATE_2G5 | + HW_ATL_A0_RATE_1G | + HW_ATL_A0_RATE_100M), + .flow_control = true, + .mtu = HW_ATL_A0_MTU_JUMBO, + .mac_regs_count = 88, + .fw_ver_expected = HW_ATL_A0_FW_VER_EXPECTED, +}; + +#endif /* HW_ATL_A0_INTERNAL_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c new file mode 100644 index 000000000000..cab2931dab9a --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c @@ -0,0 +1,958 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File hw_atl_b0.c: Definition of Atlantic hardware specific functions. */ + +#include "../aq_hw.h" +#include "../aq_hw_utils.h" +#include "../aq_ring.h" +#include "hw_atl_b0.h" +#include "hw_atl_utils.h" +#include "hw_atl_llh.h" +#include "hw_atl_b0_internal.h" + +static int hw_atl_b0_get_hw_caps(struct aq_hw_s *self, + struct aq_hw_caps_s *aq_hw_caps) +{ + memcpy(aq_hw_caps, &hw_atl_b0_hw_caps_, sizeof(*aq_hw_caps)); + return 0; +} + +static struct aq_hw_s *hw_atl_b0_create(struct aq_pci_func_s *aq_pci_func, + unsigned int port, + struct aq_hw_ops *ops) +{ + struct hw_atl_s *self = NULL; + + self = kzalloc(sizeof(*self), GFP_KERNEL); + if (!self) + goto err_exit; + + self->base.aq_pci_func = aq_pci_func; + + self->base.not_ff_addr = 0x10U; + +err_exit: + return (struct aq_hw_s *)self; +} + +static void hw_atl_b0_destroy(struct aq_hw_s *self) +{ + kfree(self); +} + +static int hw_atl_b0_hw_reset(struct aq_hw_s *self) +{ + int err = 0; + + glb_glb_reg_res_dis_set(self, 1U); + pci_pci_reg_res_dis_set(self, 0U); + rx_rx_reg_res_dis_set(self, 0U); + tx_tx_reg_res_dis_set(self, 0U); + + HW_ATL_FLUSH(); + glb_soft_res_set(self, 1); + + /* check 10 times by 1ms */ + AQ_HW_WAIT_FOR(glb_soft_res_get(self) == 0, 1000U, 10U); + if (err < 0) + goto err_exit; + + itr_irq_reg_res_dis_set(self, 0U); + itr_res_irq_set(self, 1U); + + /* check 10 times by 1ms */ + AQ_HW_WAIT_FOR(itr_res_irq_get(self) == 0, 1000U, 10U); + if (err < 0) + goto err_exit; + + hw_atl_utils_mpi_set(self, MPI_RESET, 0x0U); + + err = aq_hw_err_from_flags(self); + +err_exit: + return err; +} + +static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self) +{ + u32 tc = 0U; + u32 buff_size = 0U; + unsigned int i_priority = 0U; + bool is_rx_flow_control = false; + + /* TPS Descriptor rate init */ + tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U); + tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA); + + /* TPS VM init */ + tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U); + + /* TPS TC credits init */ + tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U); + tps_tx_pkt_shed_data_arb_mode_set(self, 0U); + + tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U); + tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U); + tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U); + tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U); + + /* Tx buf size */ + buff_size = HW_ATL_B0_TXBUF_MAX; + + tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc); + tpb_tx_buff_hi_threshold_per_tc_set(self, + (buff_size * (1024 / 32U) * 66U) / + 100U, tc); + tpb_tx_buff_lo_threshold_per_tc_set(self, + (buff_size * (1024 / 32U) * 50U) / + 100U, tc); + + /* QoS Rx buf size per TC */ + tc = 0; + is_rx_flow_control = (AQ_NIC_FC_RX & self->aq_nic_cfg->flow_control); + buff_size = HW_ATL_B0_RXBUF_MAX; + + rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc); + rpb_rx_buff_hi_threshold_per_tc_set(self, + (buff_size * + (1024U / 32U) * 66U) / + 100U, tc); + rpb_rx_buff_lo_threshold_per_tc_set(self, + (buff_size * + (1024U / 32U) * 50U) / + 100U, tc); + rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc); + + /* QoS 802.1p priority -> TC mapping */ + for (i_priority = 8U; i_priority--;) + rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U); + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_rss_hash_set(struct aq_hw_s *self, + struct aq_rss_parameters *rss_params) +{ + struct aq_nic_cfg_s *cfg = NULL; + int err = 0; + unsigned int i = 0U; + unsigned int addr = 0U; + + cfg = self->aq_nic_cfg; + + for (i = 10, addr = 0U; i--; ++addr) { + u32 key_data = cfg->is_rss ? + __swab32(rss_params->hash_secret_key[i]) : 0U; + rpf_rss_key_wr_data_set(self, key_data); + rpf_rss_key_addr_set(self, addr); + rpf_rss_key_wr_en_set(self, 1U); + AQ_HW_WAIT_FOR(rpf_rss_key_wr_en_get(self) == 0, 1000U, 10U); + if (err < 0) + goto err_exit; + } + + err = aq_hw_err_from_flags(self); + +err_exit: + return err; +} + +static int hw_atl_b0_hw_rss_set(struct aq_hw_s *self, + struct aq_rss_parameters *rss_params) +{ + u8 *indirection_table = rss_params->indirection_table; + u32 i = 0U; + u32 num_rss_queues = max(1U, self->aq_nic_cfg->num_rss_queues); + int err = 0; + u16 bitary[(HW_ATL_B0_RSS_REDIRECTION_MAX * + HW_ATL_B0_RSS_REDIRECTION_BITS / 16U)]; + + memset(bitary, 0, sizeof(bitary)); + + for (i = HW_ATL_B0_RSS_REDIRECTION_MAX; i--;) { + (*(u32 *)(bitary + ((i * 3U) / 16U))) |= + ((indirection_table[i] % num_rss_queues) << + ((i * 3U) & 0xFU)); + } + + for (i = AQ_DIMOF(bitary); i--;) { + rpf_rss_redir_tbl_wr_data_set(self, bitary[i]); + rpf_rss_redir_tbl_addr_set(self, i); + rpf_rss_redir_wr_en_set(self, 1U); + AQ_HW_WAIT_FOR(rpf_rss_redir_wr_en_get(self) == 0, 1000U, 10U); + if (err < 0) + goto err_exit; + } + + err = aq_hw_err_from_flags(self); + +err_exit: + return err; +} + +static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self, + struct aq_nic_cfg_s *aq_nic_cfg) +{ + int err = 0; + unsigned int i; + + /* TX checksums offloads*/ + tpo_ipv4header_crc_offload_en_set(self, 1); + tpo_tcp_udp_crc_offload_en_set(self, 1); + if (err < 0) + goto err_exit; + + /* RX checksums offloads*/ + rpo_ipv4header_crc_offload_en_set(self, 1); + rpo_tcp_udp_crc_offload_en_set(self, 1); + if (err < 0) + goto err_exit; + + /* LSO offloads*/ + tdm_large_send_offload_en_set(self, 0xFFFFFFFFU); + if (err < 0) + goto err_exit; + +/* LRO offloads */ + { + unsigned int val = (8U < HW_ATL_B0_LRO_RXD_MAX) ? 0x3U : + ((4U < HW_ATL_B0_LRO_RXD_MAX) ? 0x2U : + ((2U < HW_ATL_B0_LRO_RXD_MAX) ? 0x1U : 0x0)); + + for (i = 0; i < HW_ATL_B0_RINGS_MAX; i++) + rpo_lro_max_num_of_descriptors_set(self, val, i); + + rpo_lro_time_base_divider_set(self, 0x61AU); + rpo_lro_inactive_interval_set(self, 0); + rpo_lro_max_coalescing_interval_set(self, 2); + + rpo_lro_qsessions_lim_set(self, 1U); + + rpo_lro_total_desc_lim_set(self, 2U); + + rpo_lro_patch_optimization_en_set(self, 0U); + + rpo_lro_min_pay_of_first_pkt_set(self, 10U); + + rpo_lro_pkt_lim_set(self, 1U); + + rpo_lro_en_set(self, aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U); + } + err = aq_hw_err_from_flags(self); + +err_exit: + return err; +} + +static int hw_atl_b0_hw_init_tx_path(struct aq_hw_s *self) +{ + thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U); + thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U); + thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU); + + /* Tx interrupts */ + tdm_tx_desc_wr_wb_irq_en_set(self, 1U); + + /* misc */ + aq_hw_write_reg(self, 0x00007040U, IS_CHIP_FEATURE(TPO2) ? + 0x00010000U : 0x00000000U); + tdm_tx_dca_en_set(self, 0U); + tdm_tx_dca_mode_set(self, 0U); + + tpb_tx_path_scp_ins_en_set(self, 1U); + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self) +{ + struct aq_nic_cfg_s *cfg = self->aq_nic_cfg; + int i; + + /* Rx TC/RSS number config */ + rpb_rpf_rx_traf_class_mode_set(self, 1U); + + /* Rx flow control */ + rpb_rx_flow_ctl_mode_set(self, 1U); + + /* RSS Ring selection */ + reg_rx_flr_rss_control1set(self, cfg->is_rss ? + 0xB3333333U : 0x00000000U); + + /* Multicast filters */ + for (i = HW_ATL_B0_MAC_MAX; i--;) { + rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i); + rpfl2unicast_flr_act_set(self, 1U, i); + } + + reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U); + reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U); + + /* Vlan filters */ + rpf_vlan_outer_etht_set(self, 0x88A8U); + rpf_vlan_inner_etht_set(self, 0x8100U); + + if (cfg->vlan_id) { + rpf_vlan_flr_act_set(self, 1U, 0U); + rpf_vlan_id_flr_set(self, 0U, 0U); + rpf_vlan_flr_en_set(self, 0U, 0U); + + rpf_vlan_accept_untagged_packets_set(self, 1U); + rpf_vlan_untagged_act_set(self, 1U); + + rpf_vlan_flr_act_set(self, 1U, 1U); + rpf_vlan_id_flr_set(self, cfg->vlan_id, 0U); + rpf_vlan_flr_en_set(self, 1U, 1U); + } else { + rpf_vlan_prom_mode_en_set(self, 1); + } + + /* Rx Interrupts */ + rdm_rx_desc_wr_wb_irq_en_set(self, 1U); + + /* misc */ + aq_hw_write_reg(self, 0x00005040U, + IS_CHIP_FEATURE(RPF2) ? 0x000F0000U : 0x00000000U); + + rpfl2broadcast_flr_act_set(self, 1U); + rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U)); + + rdm_rx_dca_en_set(self, 0U); + rdm_rx_dca_mode_set(self, 0U); + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr) +{ + int err = 0; + unsigned int h = 0U; + unsigned int l = 0U; + + if (!mac_addr) { + err = -EINVAL; + goto err_exit; + } + h = (mac_addr[0] << 8) | (mac_addr[1]); + l = (mac_addr[2] << 24) | (mac_addr[3] << 16) | + (mac_addr[4] << 8) | mac_addr[5]; + + rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC); + rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_B0_MAC); + rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_B0_MAC); + rpfl2_uc_flr_en_set(self, 1U, HW_ATL_B0_MAC); + + err = aq_hw_err_from_flags(self); + +err_exit: + return err; +} + +static int hw_atl_b0_hw_init(struct aq_hw_s *self, + struct aq_nic_cfg_s *aq_nic_cfg, + u8 *mac_addr) +{ + static u32 aq_hw_atl_igcr_table_[4][2] = { + { 0x20000000U, 0x20000000U }, /* AQ_IRQ_INVALID */ + { 0x20000080U, 0x20000080U }, /* AQ_IRQ_LEGACY */ + { 0x20000021U, 0x20000025U }, /* AQ_IRQ_MSI */ + { 0x20000022U, 0x20000026U } /* AQ_IRQ_MSIX */ + }; + + int err = 0; + + self->aq_nic_cfg = aq_nic_cfg; + + hw_atl_utils_hw_chip_features_init(self, + &PHAL_ATLANTIC_B0->chip_features); + + hw_atl_b0_hw_init_tx_path(self); + hw_atl_b0_hw_init_rx_path(self); + + hw_atl_b0_hw_mac_addr_set(self, mac_addr); + + hw_atl_utils_mpi_set(self, MPI_INIT, aq_nic_cfg->link_speed_msk); + + hw_atl_b0_hw_qos_set(self); + hw_atl_b0_hw_rss_set(self, &aq_nic_cfg->aq_rss); + hw_atl_b0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss); + + err = aq_hw_err_from_flags(self); + if (err < 0) + goto err_exit; + + /* Interrupts */ + reg_irq_glb_ctl_set(self, + aq_hw_atl_igcr_table_[aq_nic_cfg->irq_type] + [(aq_nic_cfg->vecs > 1U) ? + 1 : 0]); + + itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask); + + /* Interrupts */ + reg_gen_irq_map_set(self, + ((HW_ATL_B0_ERR_INT << 0x18) | (1U << 0x1F)) | + ((HW_ATL_B0_ERR_INT << 0x10) | (1U << 0x17)), 0U); + + hw_atl_b0_hw_offload_set(self, aq_nic_cfg); + +err_exit: + return err; +} + +static int hw_atl_b0_hw_ring_tx_start(struct aq_hw_s *self, + struct aq_ring_s *ring) +{ + tdm_tx_desc_en_set(self, 1, ring->idx); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_ring_rx_start(struct aq_hw_s *self, + struct aq_ring_s *ring) +{ + rdm_rx_desc_en_set(self, 1, ring->idx); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_start(struct aq_hw_s *self) +{ + tpb_tx_buff_en_set(self, 1); + rpb_rx_buff_en_set(self, 1); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_tx_ring_tail_update(struct aq_hw_s *self, + struct aq_ring_s *ring) +{ + reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx); + return 0; +} + +static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self, + struct aq_ring_s *ring, + unsigned int frags) +{ + struct aq_ring_buff_s *buff = NULL; + struct hw_atl_txd_s *txd = NULL; + unsigned int buff_pa_len = 0U; + unsigned int pkt_len = 0U; + unsigned int frag_count = 0U; + bool is_gso = false; + + buff = &ring->buff_ring[ring->sw_tail]; + pkt_len = (buff->is_eop && buff->is_sop) ? buff->len : buff->len_pkt; + + for (frag_count = 0; frag_count < frags; frag_count++) { + txd = (struct hw_atl_txd_s *)&ring->dx_ring[ring->sw_tail * + HW_ATL_B0_TXD_SIZE]; + txd->ctl = 0; + txd->ctl2 = 0; + txd->buf_addr = 0; + + buff = &ring->buff_ring[ring->sw_tail]; + + if (buff->is_txc) { + txd->ctl |= (buff->len_l3 << 31) | + (buff->len_l2 << 24) | + HW_ATL_B0_TXD_CTL_CMD_TCP | + HW_ATL_B0_TXD_CTL_DESC_TYPE_TXC; + txd->ctl2 |= (buff->mss << 16) | + (buff->len_l4 << 8) | + (buff->len_l3 >> 1); + + pkt_len -= (buff->len_l4 + + buff->len_l3 + + buff->len_l2); + is_gso = true; + } else { + buff_pa_len = buff->len; + + txd->buf_addr = buff->pa; + txd->ctl |= (HW_ATL_B0_TXD_CTL_BLEN & + ((u32)buff_pa_len << 4)); + txd->ctl |= HW_ATL_B0_TXD_CTL_DESC_TYPE_TXD; + /* PAY_LEN */ + txd->ctl2 |= HW_ATL_B0_TXD_CTL2_LEN & (pkt_len << 14); + + if (is_gso) { + txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_LSO; + txd->ctl2 |= HW_ATL_B0_TXD_CTL2_CTX_EN; + } + + /* Tx checksum offloads */ + if (buff->is_ip_cso) + txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_IPCSO; + + if (buff->is_udp_cso || buff->is_tcp_cso) + txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_TUCSO; + + if (unlikely(buff->is_eop)) { + txd->ctl |= HW_ATL_B0_TXD_CTL_EOP; + txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_WB; + } + } + + ring->sw_tail = aq_ring_next_dx(ring, ring->sw_tail); + } + + hw_atl_b0_hw_tx_ring_tail_update(self, ring); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self, + struct aq_ring_s *aq_ring, + struct aq_ring_param_s *aq_ring_param) +{ + u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa; + u32 dma_desc_addr_msw = (u32)(((u64)aq_ring->dx_ring_pa) >> 32); + + rdm_rx_desc_en_set(self, false, aq_ring->idx); + + rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx); + + reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw, + aq_ring->idx); + + reg_rx_dma_desc_base_addressmswset(self, + dma_desc_addr_msw, aq_ring->idx); + + rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx); + + rdm_rx_desc_data_buff_size_set(self, + AQ_CFG_RX_FRAME_MAX / 1024U, + aq_ring->idx); + + rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx); + rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx); + rpo_rx_desc_vlan_stripping_set(self, 0U, aq_ring->idx); + + /* Rx ring set mode */ + + /* Mapping interrupt vector */ + itr_irq_map_rx_set(self, aq_ring_param->vec_idx, aq_ring->idx); + itr_irq_map_en_rx_set(self, true, aq_ring->idx); + + rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx); + rdm_rx_desc_dca_en_set(self, 0U, aq_ring->idx); + rdm_rx_head_dca_en_set(self, 0U, aq_ring->idx); + rdm_rx_pld_dca_en_set(self, 0U, aq_ring->idx); + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_ring_tx_init(struct aq_hw_s *self, + struct aq_ring_s *aq_ring, + struct aq_ring_param_s *aq_ring_param) +{ + u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa; + u32 dma_desc_msw_addr = (u32)(((u64)aq_ring->dx_ring_pa) >> 32); + + reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr, + aq_ring->idx); + + reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr, + aq_ring->idx); + + tdm_tx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx); + + hw_atl_b0_hw_tx_ring_tail_update(self, aq_ring); + + /* Set Tx threshold */ + tdm_tx_desc_wr_wb_threshold_set(self, 0U, aq_ring->idx); + + /* Mapping interrupt vector */ + itr_irq_map_tx_set(self, aq_ring_param->vec_idx, aq_ring->idx); + itr_irq_map_en_tx_set(self, true, aq_ring->idx); + + tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx); + tdm_tx_desc_dca_en_set(self, 0U, aq_ring->idx); + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_ring_rx_fill(struct aq_hw_s *self, + struct aq_ring_s *ring, + unsigned int sw_tail_old) +{ + for (; sw_tail_old != ring->sw_tail; + sw_tail_old = aq_ring_next_dx(ring, sw_tail_old)) { + struct hw_atl_rxd_s *rxd = + (struct hw_atl_rxd_s *)&ring->dx_ring[sw_tail_old * + HW_ATL_B0_RXD_SIZE]; + + struct aq_ring_buff_s *buff = &ring->buff_ring[sw_tail_old]; + + rxd->buf_addr = buff->pa; + rxd->hdr_addr = 0U; + } + + reg_rx_dma_desc_tail_ptr_set(self, sw_tail_old, ring->idx); + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self, + struct aq_ring_s *ring) +{ + int err = 0; + unsigned int hw_head_ = tdm_tx_desc_head_ptr_get(self, ring->idx); + + if (aq_utils_obj_test(&self->header.flags, AQ_HW_FLAG_ERR_UNPLUG)) { + err = -ENXIO; + goto err_exit; + } + ring->hw_head = hw_head_; + err = aq_hw_err_from_flags(self); + +err_exit: + return err; +} + +static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self, + struct aq_ring_s *ring) +{ + struct device *ndev = aq_nic_get_dev(ring->aq_nic); + + for (; ring->hw_head != ring->sw_tail; + ring->hw_head = aq_ring_next_dx(ring, ring->hw_head)) { + struct aq_ring_buff_s *buff = NULL; + struct hw_atl_rxd_wb_s *rxd_wb = (struct hw_atl_rxd_wb_s *) + &ring->dx_ring[ring->hw_head * HW_ATL_B0_RXD_SIZE]; + + unsigned int is_err = 1U; + unsigned int is_rx_check_sum_enabled = 0U; + unsigned int pkt_type = 0U; + + if (!(rxd_wb->status & 0x1U)) { /* RxD is not done */ + break; + } + + buff = &ring->buff_ring[ring->hw_head]; + + is_err = (0x0000003CU & rxd_wb->status); + + is_rx_check_sum_enabled = (rxd_wb->type) & (0x3U << 19); + is_err &= ~0x20U; /* exclude validity bit */ + + pkt_type = 0xFFU & (rxd_wb->type >> 4); + + if (is_rx_check_sum_enabled) { + if (0x0U == (pkt_type & 0x3U)) + buff->is_ip_cso = (is_err & 0x08U) ? 0U : 1U; + + if (0x4U == (pkt_type & 0x1CU)) + buff->is_udp_cso = buff->is_cso_err ? 0U : 1U; + else if (0x0U == (pkt_type & 0x1CU)) + buff->is_tcp_cso = buff->is_cso_err ? 0U : 1U; + } + + is_err &= ~0x18U; + + dma_unmap_page(ndev, buff->pa, buff->len, DMA_FROM_DEVICE); + + if (is_err || rxd_wb->type & 0x1000U) { + /* status error or DMA error */ + buff->is_error = 1U; + } else { + if (self->aq_nic_cfg->is_rss) { + /* last 4 byte */ + u16 rss_type = rxd_wb->type & 0xFU; + + if (rss_type && rss_type < 0x8U) { + buff->is_hash_l4 = (rss_type == 0x4 || + rss_type == 0x5); + buff->rss_hash = rxd_wb->rss_hash; + } + } + + if (HW_ATL_B0_RXD_WB_STAT2_EOP & rxd_wb->status) { + buff->len = rxd_wb->pkt_len % + AQ_CFG_RX_FRAME_MAX; + buff->len = buff->len ? + buff->len : AQ_CFG_RX_FRAME_MAX; + buff->next = 0U; + buff->is_eop = 1U; + } else { + if (HW_ATL_B0_RXD_WB_STAT2_RSCCNT & + rxd_wb->status) { + /* LRO */ + buff->next = rxd_wb->next_desc_ptr; + ++ring->stats.rx.lro_packets; + } else { + /* jumbo */ + buff->next = + aq_ring_next_dx(ring, + ring->hw_head); + ++ring->stats.rx.jumbo_packets; + } + } + } + } + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_irq_enable(struct aq_hw_s *self, u64 mask) +{ + itr_irq_msk_setlsw_set(self, LODWORD(mask)); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_irq_disable(struct aq_hw_s *self, u64 mask) +{ + itr_irq_msk_clearlsw_set(self, LODWORD(mask)); + itr_irq_status_clearlsw_set(self, LODWORD(mask)); + + atomic_inc(&PHAL_ATLANTIC_B0->dpc); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_irq_read(struct aq_hw_s *self, u64 *mask) +{ + *mask = itr_irq_statuslsw_get(self); + return aq_hw_err_from_flags(self); +} + +#define IS_FILTER_ENABLED(_F_) ((packet_filter & (_F_)) ? 1U : 0U) + +static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self, + unsigned int packet_filter) +{ + unsigned int i = 0U; + + rpfl2promiscuous_mode_en_set(self, IS_FILTER_ENABLED(IFF_PROMISC)); + rpfl2multicast_flr_en_set(self, + IS_FILTER_ENABLED(IFF_MULTICAST), 0); + + rpfl2_accept_all_mc_packets_set(self, + IS_FILTER_ENABLED(IFF_ALLMULTI)); + + rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST)); + + self->aq_nic_cfg->is_mc_list_enabled = IS_FILTER_ENABLED(IFF_MULTICAST); + + for (i = HW_ATL_B0_MAC_MIN; i < HW_ATL_B0_MAC_MAX; ++i) + rpfl2_uc_flr_en_set(self, + (self->aq_nic_cfg->is_mc_list_enabled && + (i <= self->aq_nic_cfg->mc_list_count)) ? + 1U : 0U, i); + + return aq_hw_err_from_flags(self); +} + +#undef IS_FILTER_ENABLED + +static int hw_atl_b0_hw_multicast_list_set(struct aq_hw_s *self, + u8 ar_mac + [AQ_CFG_MULTICAST_ADDRESS_MAX] + [ETH_ALEN], + u32 count) +{ + int err = 0; + + if (count > (HW_ATL_B0_MAC_MAX - HW_ATL_B0_MAC_MIN)) { + err = -EBADRQC; + goto err_exit; + } + for (self->aq_nic_cfg->mc_list_count = 0U; + self->aq_nic_cfg->mc_list_count < count; + ++self->aq_nic_cfg->mc_list_count) { + u32 i = self->aq_nic_cfg->mc_list_count; + u32 h = (ar_mac[i][0] << 8) | (ar_mac[i][1]); + u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) | + (ar_mac[i][4] << 8) | ar_mac[i][5]; + + rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC_MIN + i); + + rpfl2unicast_dest_addresslsw_set(self, + l, HW_ATL_B0_MAC_MIN + i); + + rpfl2unicast_dest_addressmsw_set(self, + h, HW_ATL_B0_MAC_MIN + i); + + rpfl2_uc_flr_en_set(self, + (self->aq_nic_cfg->is_mc_list_enabled), + HW_ATL_B0_MAC_MIN + i); + } + + err = aq_hw_err_from_flags(self); + +err_exit: + return err; +} + +static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self, + bool itr_enabled) +{ + unsigned int i = 0U; + + if (itr_enabled && self->aq_nic_cfg->itr) { + tdm_tx_desc_wr_wb_irq_en_set(self, 0U); + tdm_tdm_intr_moder_en_set(self, 1U); + rdm_rx_desc_wr_wb_irq_en_set(self, 0U); + rdm_rdm_intr_moder_en_set(self, 1U); + + PHAL_ATLANTIC_B0->itr_tx = 2U; + PHAL_ATLANTIC_B0->itr_rx = 2U; + + if (self->aq_nic_cfg->itr != 0xFFFFU) { + unsigned int max_timer = self->aq_nic_cfg->itr / 2U; + unsigned int min_timer = self->aq_nic_cfg->itr / 32U; + + max_timer = min(0x1FFU, max_timer); + min_timer = min(0xFFU, min_timer); + + PHAL_ATLANTIC_B0->itr_tx |= min_timer << 0x8U; + PHAL_ATLANTIC_B0->itr_tx |= max_timer << 0x10U; + PHAL_ATLANTIC_B0->itr_rx |= min_timer << 0x8U; + PHAL_ATLANTIC_B0->itr_rx |= max_timer << 0x10U; + } else { + static unsigned int hw_atl_b0_timers_table_tx_[][2] = { + {0xffU, 0xffU}, /* 10Gbit */ + {0xffU, 0x1ffU}, /* 5Gbit */ + {0xffU, 0x1ffU}, /* 5Gbit 5GS */ + {0xffU, 0x1ffU}, /* 2.5Gbit */ + {0xffU, 0x1ffU}, /* 1Gbit */ + {0xffU, 0x1ffU}, /* 100Mbit */ + }; + + static unsigned int hw_atl_b0_timers_table_rx_[][2] = { + {0x6U, 0x38U},/* 10Gbit */ + {0xCU, 0x70U},/* 5Gbit */ + {0xCU, 0x70U},/* 5Gbit 5GS */ + {0x18U, 0xE0U},/* 2.5Gbit */ + {0x30U, 0x80U},/* 1Gbit */ + {0x4U, 0x50U},/* 100Mbit */ + }; + + unsigned int speed_index = + hw_atl_utils_mbps_2_speed_index( + self->aq_link_status.mbps); + + PHAL_ATLANTIC_B0->itr_tx |= + hw_atl_b0_timers_table_tx_[speed_index] + [0] << 0x8U; /* set min timer value */ + PHAL_ATLANTIC_B0->itr_tx |= + hw_atl_b0_timers_table_tx_[speed_index] + [1] << 0x10U; /* set max timer value */ + + PHAL_ATLANTIC_B0->itr_rx |= + hw_atl_b0_timers_table_rx_[speed_index] + [0] << 0x8U; /* set min timer value */ + PHAL_ATLANTIC_B0->itr_rx |= + hw_atl_b0_timers_table_rx_[speed_index] + [1] << 0x10U; /* set max timer value */ + } + } else { + tdm_tx_desc_wr_wb_irq_en_set(self, 1U); + tdm_tdm_intr_moder_en_set(self, 0U); + rdm_rx_desc_wr_wb_irq_en_set(self, 1U); + rdm_rdm_intr_moder_en_set(self, 0U); + PHAL_ATLANTIC_B0->itr_tx = 0U; + PHAL_ATLANTIC_B0->itr_rx = 0U; + } + + for (i = HW_ATL_B0_RINGS_MAX; i--;) { + reg_tx_intr_moder_ctrl_set(self, + PHAL_ATLANTIC_B0->itr_tx, i); + reg_rx_intr_moder_ctrl_set(self, + PHAL_ATLANTIC_B0->itr_rx, i); + } + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_stop(struct aq_hw_s *self) +{ + hw_atl_b0_hw_irq_disable(self, HW_ATL_B0_INT_MASK); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self, + struct aq_ring_s *ring) +{ + tdm_tx_desc_en_set(self, 0U, ring->idx); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self, + struct aq_ring_s *ring) +{ + rdm_rx_desc_en_set(self, 0U, ring->idx); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_set_speed(struct aq_hw_s *self, u32 speed) +{ + int err = 0; + + err = hw_atl_utils_mpi_set_speed(self, speed, MPI_INIT); + if (err < 0) + goto err_exit; + +err_exit: + return err; +} + +static struct aq_hw_ops hw_atl_ops_ = { + .create = hw_atl_b0_create, + .destroy = hw_atl_b0_destroy, + .get_hw_caps = hw_atl_b0_get_hw_caps, + + .hw_get_mac_permanent = hw_atl_utils_get_mac_permanent, + .hw_set_mac_address = hw_atl_b0_hw_mac_addr_set, + .hw_get_link_status = hw_atl_utils_mpi_get_link_status, + .hw_set_link_speed = hw_atl_b0_hw_set_speed, + .hw_init = hw_atl_b0_hw_init, + .hw_deinit = hw_atl_utils_hw_deinit, + .hw_set_power = hw_atl_utils_hw_set_power, + .hw_reset = hw_atl_b0_hw_reset, + .hw_start = hw_atl_b0_hw_start, + .hw_ring_tx_start = hw_atl_b0_hw_ring_tx_start, + .hw_ring_tx_stop = hw_atl_b0_hw_ring_tx_stop, + .hw_ring_rx_start = hw_atl_b0_hw_ring_rx_start, + .hw_ring_rx_stop = hw_atl_b0_hw_ring_rx_stop, + .hw_stop = hw_atl_b0_hw_stop, + + .hw_ring_tx_xmit = hw_atl_b0_hw_ring_tx_xmit, + .hw_ring_tx_head_update = hw_atl_b0_hw_ring_tx_head_update, + + .hw_ring_rx_receive = hw_atl_b0_hw_ring_rx_receive, + .hw_ring_rx_fill = hw_atl_b0_hw_ring_rx_fill, + + .hw_irq_enable = hw_atl_b0_hw_irq_enable, + .hw_irq_disable = hw_atl_b0_hw_irq_disable, + .hw_irq_read = hw_atl_b0_hw_irq_read, + + .hw_ring_rx_init = hw_atl_b0_hw_ring_rx_init, + .hw_ring_tx_init = hw_atl_b0_hw_ring_tx_init, + .hw_packet_filter_set = hw_atl_b0_hw_packet_filter_set, + .hw_multicast_list_set = hw_atl_b0_hw_multicast_list_set, + .hw_interrupt_moderation_set = hw_atl_b0_hw_interrupt_moderation_set, + .hw_rss_set = hw_atl_b0_hw_rss_set, + .hw_rss_hash_set = hw_atl_b0_hw_rss_hash_set, + .hw_get_regs = hw_atl_utils_hw_get_regs, + .hw_get_hw_stats = hw_atl_utils_get_hw_stats, + .hw_get_fw_version = hw_atl_utils_get_fw_version, +}; + +struct aq_hw_ops *hw_atl_b0_get_ops_by_id(struct pci_dev *pdev) +{ + bool is_vid_ok = (pdev->vendor == PCI_VENDOR_ID_AQUANTIA); + bool is_did_ok = ((pdev->device == HW_ATL_DEVICE_ID_0001) || + (pdev->device == HW_ATL_DEVICE_ID_D100) || + (pdev->device == HW_ATL_DEVICE_ID_D107) || + (pdev->device == HW_ATL_DEVICE_ID_D108) || + (pdev->device == HW_ATL_DEVICE_ID_D109)); + + bool is_rev_ok = (pdev->revision == 2U); + + return (is_vid_ok && is_did_ok && is_rev_ok) ? &hw_atl_ops_ : NULL; +} diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h new file mode 100644 index 000000000000..a1e1bce6c1f3 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h @@ -0,0 +1,34 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File hw_atl_b0.h: Declaration of abstract interface for Atlantic hardware + * specific functions. + */ + +#ifndef HW_ATL_B0_H +#define HW_ATL_B0_H + +#include "../aq_common.h" + +#ifndef PCI_VENDOR_ID_AQUANTIA + +#define PCI_VENDOR_ID_AQUANTIA 0x1D6A +#define HW_ATL_DEVICE_ID_0001 0x0001 +#define HW_ATL_DEVICE_ID_D100 0xD100 +#define HW_ATL_DEVICE_ID_D107 0xD107 +#define HW_ATL_DEVICE_ID_D108 0xD108 +#define HW_ATL_DEVICE_ID_D109 0xD109 + +#define HW_ATL_NIC_NAME "aQuantia AQtion 5Gbit Network Adapter" + +#endif + +struct aq_hw_ops *hw_atl_b0_get_ops_by_id(struct pci_dev *pdev); + +#endif /* HW_ATL_B0_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h new file mode 100644 index 000000000000..8bdee3ddd5a0 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h @@ -0,0 +1,207 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File hw_atl_b0_internal.h: Definition of Atlantic B0 chip specific + * constants. + */ + +#ifndef HW_ATL_B0_INTERNAL_H +#define HW_ATL_B0_INTERNAL_H + +#include "../aq_common.h" + +#define HW_ATL_B0_MTU_JUMBO (16000U) +#define HW_ATL_B0_MTU 1514U + +#define HW_ATL_B0_TX_RINGS 4U +#define HW_ATL_B0_RX_RINGS 4U + +#define HW_ATL_B0_RINGS_MAX 32U +#define HW_ATL_B0_TXD_SIZE (16U) +#define HW_ATL_B0_RXD_SIZE (16U) + +#define HW_ATL_B0_MAC 0U +#define HW_ATL_B0_MAC_MIN 1U +#define HW_ATL_B0_MAC_MAX 33U + +/* UCAST/MCAST filters */ +#define HW_ATL_B0_UCAST_FILTERS_MAX 38 +#define HW_ATL_B0_MCAST_FILTERS_MAX 8 + +/* interrupts */ +#define HW_ATL_B0_ERR_INT 8U +#define HW_ATL_B0_INT_MASK (0xFFFFFFFFU) + +#define HW_ATL_B0_TXD_CTL2_LEN (0xFFFFC000) +#define HW_ATL_B0_TXD_CTL2_CTX_EN (0x00002000) +#define HW_ATL_B0_TXD_CTL2_CTX_IDX (0x00001000) + +#define HW_ATL_B0_TXD_CTL_DESC_TYPE_TXD (0x00000001) +#define HW_ATL_B0_TXD_CTL_DESC_TYPE_TXC (0x00000002) +#define HW_ATL_B0_TXD_CTL_BLEN (0x000FFFF0) +#define HW_ATL_B0_TXD_CTL_DD (0x00100000) +#define HW_ATL_B0_TXD_CTL_EOP (0x00200000) + +#define HW_ATL_B0_TXD_CTL_CMD_X (0x3FC00000) + +#define HW_ATL_B0_TXD_CTL_CMD_VLAN BIT(22) +#define HW_ATL_B0_TXD_CTL_CMD_FCS BIT(23) +#define HW_ATL_B0_TXD_CTL_CMD_IPCSO BIT(24) +#define HW_ATL_B0_TXD_CTL_CMD_TUCSO BIT(25) +#define HW_ATL_B0_TXD_CTL_CMD_LSO BIT(26) +#define HW_ATL_B0_TXD_CTL_CMD_WB BIT(27) +#define HW_ATL_B0_TXD_CTL_CMD_VXLAN BIT(28) + +#define HW_ATL_B0_TXD_CTL_CMD_IPV6 BIT(21) +#define HW_ATL_B0_TXD_CTL_CMD_TCP BIT(22) + +#define HW_ATL_B0_MPI_CONTROL_ADR 0x0368U +#define HW_ATL_B0_MPI_STATE_ADR 0x036CU + +#define HW_ATL_B0_MPI_SPEED_MSK 0xFFFFU +#define HW_ATL_B0_MPI_SPEED_SHIFT 16U + +#define HW_ATL_B0_RATE_10G BIT(0) +#define HW_ATL_B0_RATE_5G BIT(1) +#define HW_ATL_B0_RATE_2G5 BIT(3) +#define HW_ATL_B0_RATE_1G BIT(4) +#define HW_ATL_B0_RATE_100M BIT(5) + +#define HW_ATL_B0_TXBUF_MAX 160U +#define HW_ATL_B0_RXBUF_MAX 320U + +#define HW_ATL_B0_RSS_REDIRECTION_MAX 64U +#define HW_ATL_B0_RSS_REDIRECTION_BITS 3U +#define HW_ATL_B0_RSS_HASHKEY_BITS 320U + +#define HW_ATL_B0_TCRSS_4_8 1 +#define HW_ATL_B0_TC_MAX 1U +#define HW_ATL_B0_RSS_MAX 8U + +#define HW_ATL_B0_LRO_RXD_MAX 2U +#define HW_ATL_B0_RS_SLIP_ENABLED 0U + +/* (256k -1(max pay_len) - 54(header)) */ +#define HAL_ATL_B0_LSO_MAX_SEGMENT_SIZE 262089U + +/* (256k -1(max pay_len) - 74(header)) */ +#define HAL_ATL_B0_LSO_IPV6_MAX_SEGMENT_SIZE 262069U + +#define HW_ATL_B0_CHIP_REVISION_B0 0xA0U +#define HW_ATL_B0_CHIP_REVISION_UNKNOWN 0xFFU + +#define HW_ATL_B0_FW_SEMA_RAM 0x2U + +#define HW_ATL_B0_TXC_LEN_TUNLEN (0x0000FF00) +#define HW_ATL_B0_TXC_LEN_OUTLEN (0xFFFF0000) + +#define HW_ATL_B0_TXC_CTL_DESC_TYPE (0x00000007) +#define HW_ATL_B0_TXC_CTL_CTX_ID (0x00000008) +#define HW_ATL_B0_TXC_CTL_VLAN (0x000FFFF0) +#define HW_ATL_B0_TXC_CTL_CMD (0x00F00000) +#define HW_ATL_B0_TXC_CTL_L2LEN (0x7F000000) + +#define HW_ATL_B0_TXC_CTL_L3LEN (0x80000000) /* L3LEN lsb */ +#define HW_ATL_B0_TXC_LEN2_L3LEN (0x000000FF) /* L3LE upper bits */ +#define HW_ATL_B0_TXC_LEN2_L4LEN (0x0000FF00) +#define HW_ATL_B0_TXC_LEN2_MSSLEN (0xFFFF0000) + +#define HW_ATL_B0_RXD_DD (0x1) +#define HW_ATL_B0_RXD_NCEA0 (0x1) + +#define HW_ATL_B0_RXD_WB_STAT_RSSTYPE (0x0000000F) +#define HW_ATL_B0_RXD_WB_STAT_PKTTYPE (0x00000FF0) +#define HW_ATL_B0_RXD_WB_STAT_RXCTRL (0x00180000) +#define HW_ATL_B0_RXD_WB_STAT_SPLHDR (0x00200000) +#define HW_ATL_B0_RXD_WB_STAT_HDRLEN (0xFFC00000) + +#define HW_ATL_B0_RXD_WB_STAT2_DD (0x0001) +#define HW_ATL_B0_RXD_WB_STAT2_EOP (0x0002) +#define HW_ATL_B0_RXD_WB_STAT2_RXSTAT (0x003C) +#define HW_ATL_B0_RXD_WB_STAT2_MACERR (0x0004) +#define HW_ATL_B0_RXD_WB_STAT2_IP4ERR (0x0008) +#define HW_ATL_B0_RXD_WB_STAT2_TCPUPDERR (0x0010) +#define HW_ATL_B0_RXD_WB_STAT2_RXESTAT (0x0FC0) +#define HW_ATL_B0_RXD_WB_STAT2_RSCCNT (0xF000) + +#define L2_FILTER_ACTION_DISCARD (0x0) +#define L2_FILTER_ACTION_HOST (0x1) + +#define HW_ATL_B0_UCP_0X370_REG (0x370) + +#define HW_ATL_B0_FLUSH() AQ_HW_READ_REG(self, 0x10) + +#define HW_ATL_B0_FW_VER_EXPECTED 0x01050006U + +/* Hardware tx descriptor */ +struct __packed hw_atl_txd_s { + u64 buf_addr; + u32 ctl; + u32 ctl2; /* 63..46 - payload length, 45 - ctx enable, 44 - ctx index */ +}; + +/* Hardware tx context descriptor */ +struct __packed hw_atl_txc_s { + u32 rsvd; + u32 len; + u32 ctl; + u32 len2; +}; + +/* Hardware rx descriptor */ +struct __packed hw_atl_rxd_s { + u64 buf_addr; + u64 hdr_addr; +}; + +/* Hardware rx descriptor writeback */ +struct __packed hw_atl_rxd_wb_s { + u32 type; + u32 rss_hash; + u16 status; + u16 pkt_len; + u16 next_desc_ptr; + u16 vlan; +}; + +/* HW layer capabilities */ +static struct aq_hw_caps_s hw_atl_b0_hw_caps_ = { + .ports = 1U, + .is_64_dma = true, + .msix_irqs = 4U, + .irq_mask = ~0U, + .vecs = HW_ATL_B0_RSS_MAX, + .tcs = HW_ATL_B0_TC_MAX, + .rxd_alignment = 1U, + .rxd_size = HW_ATL_B0_RXD_SIZE, + .rxds = 8U * 1024U, + .txd_alignment = 1U, + .txd_size = HW_ATL_B0_TXD_SIZE, + .txds = 8U * 1024U, + .txhwb_alignment = 4096U, + .tx_rings = HW_ATL_B0_TX_RINGS, + .rx_rings = HW_ATL_B0_RX_RINGS, + .hw_features = NETIF_F_HW_CSUM | + NETIF_F_RXHASH | + NETIF_F_SG | + NETIF_F_TSO | + NETIF_F_LRO, + .hw_priv_flags = IFF_UNICAST_FLT, + .link_speed_msk = (HW_ATL_B0_RATE_10G | + HW_ATL_B0_RATE_5G | + HW_ATL_B0_RATE_2G5 | + HW_ATL_B0_RATE_1G | + HW_ATL_B0_RATE_100M), + .flow_control = true, + .mtu = HW_ATL_B0_MTU_JUMBO, + .mac_regs_count = 88, + .fw_ver_expected = HW_ATL_B0_FW_VER_EXPECTED, +}; + +#endif /* HW_ATL_B0_INTERNAL_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c new file mode 100644 index 000000000000..3de651afa8c7 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c @@ -0,0 +1,1394 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File hw_atl_llh.c: Definitions of bitfield and register access functions for + * Atlantic registers. + */ + +#include "hw_atl_llh.h" +#include "hw_atl_llh_internal.h" +#include "../aq_hw_utils.h" + +/* global */ +void reg_glb_cpu_sem_set(struct aq_hw_s *aq_hw, u32 glb_cpu_sem, u32 semaphore) +{ + aq_hw_write_reg(aq_hw, glb_cpu_sem_adr(semaphore), glb_cpu_sem); +} + +u32 reg_glb_cpu_sem_get(struct aq_hw_s *aq_hw, u32 semaphore) +{ + return aq_hw_read_reg(aq_hw, glb_cpu_sem_adr(semaphore)); +} + +void glb_glb_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 glb_reg_res_dis) +{ + aq_hw_write_reg_bit(aq_hw, glb_reg_res_dis_adr, + glb_reg_res_dis_msk, + glb_reg_res_dis_shift, + glb_reg_res_dis); +} + +void glb_soft_res_set(struct aq_hw_s *aq_hw, u32 soft_res) +{ + aq_hw_write_reg_bit(aq_hw, glb_soft_res_adr, glb_soft_res_msk, + glb_soft_res_shift, soft_res); +} + +u32 glb_soft_res_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg_bit(aq_hw, glb_soft_res_adr, + glb_soft_res_msk, + glb_soft_res_shift); +} + +u32 reg_rx_dma_stat_counter7get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, rx_dma_stat_counter7_adr); +} + +u32 reg_glb_mif_id_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, glb_mif_id_adr); +} + +/* stats */ +u32 rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, rpb_rx_dma_drop_pkt_cnt_adr); +} + +u32 stats_rx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, stats_rx_dma_good_octet_counterlsw__adr); +} + +u32 stats_rx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, stats_rx_dma_good_pkt_counterlsw__adr); +} + +u32 stats_tx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, stats_tx_dma_good_octet_counterlsw__adr); +} + +u32 stats_tx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, stats_tx_dma_good_pkt_counterlsw__adr); +} + +u32 stats_rx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, stats_rx_dma_good_octet_countermsw__adr); +} + +u32 stats_rx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, stats_rx_dma_good_pkt_countermsw__adr); +} + +u32 stats_tx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, stats_tx_dma_good_octet_countermsw__adr); +} + +u32 stats_tx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, stats_tx_dma_good_pkt_countermsw__adr); +} + +/* interrupt */ +void itr_irq_auto_masklsw_set(struct aq_hw_s *aq_hw, u32 irq_auto_masklsw) +{ + aq_hw_write_reg(aq_hw, itr_iamrlsw_adr, irq_auto_masklsw); +} + +void itr_irq_map_en_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_rx, u32 rx) +{ +/* register address for bitfield imr_rx{r}_en */ + static u32 itr_imr_rxren_adr[32] = { + 0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U, + 0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU, + 0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U, + 0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU, + 0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U, + 0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU, + 0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U, + 0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU + }; + +/* bitmask for bitfield imr_rx{r}_en */ + static u32 itr_imr_rxren_msk[32] = { + 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U, + 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U, + 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U, + 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U, + 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U, + 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U, + 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U, + 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U + }; + +/* lower bit position of bitfield imr_rx{r}_en */ + static u32 itr_imr_rxren_shift[32] = { + 15U, 7U, 15U, 7U, 15U, 7U, 15U, 7U, + 15U, 7U, 15U, 7U, 15U, 7U, 15U, 7U, + 15U, 7U, 15U, 7U, 15U, 7U, 15U, 7U, + 15U, 7U, 15U, 7U, 15U, 7U, 15U, 7U + }; + + aq_hw_write_reg_bit(aq_hw, itr_imr_rxren_adr[rx], + itr_imr_rxren_msk[rx], + itr_imr_rxren_shift[rx], + irq_map_en_rx); +} + +void itr_irq_map_en_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_tx, u32 tx) +{ +/* register address for bitfield imr_tx{t}_en */ + static u32 itr_imr_txten_adr[32] = { + 0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U, + 0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU, + 0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U, + 0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU, + 0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U, + 0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU, + 0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U, + 0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU + }; + +/* bitmask for bitfield imr_tx{t}_en */ + static u32 itr_imr_txten_msk[32] = { + 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U, + 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U, + 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U, + 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U, + 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U, + 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U, + 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U, + 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U + }; + +/* lower bit position of bitfield imr_tx{t}_en */ + static u32 itr_imr_txten_shift[32] = { + 31U, 23U, 31U, 23U, 31U, 23U, 31U, 23U, + 31U, 23U, 31U, 23U, 31U, 23U, 31U, 23U, + 31U, 23U, 31U, 23U, 31U, 23U, 31U, 23U, + 31U, 23U, 31U, 23U, 31U, 23U, 31U, 23U + }; + + aq_hw_write_reg_bit(aq_hw, itr_imr_txten_adr[tx], + itr_imr_txten_msk[tx], + itr_imr_txten_shift[tx], + irq_map_en_tx); +} + +void itr_irq_map_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_rx, u32 rx) +{ +/* register address for bitfield imr_rx{r}[4:0] */ + static u32 itr_imr_rxr_adr[32] = { + 0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U, + 0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU, + 0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U, + 0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU, + 0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U, + 0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU, + 0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U, + 0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU + }; + +/* bitmask for bitfield imr_rx{r}[4:0] */ + static u32 itr_imr_rxr_msk[32] = { + 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU, + 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU, + 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU, + 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU, + 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU, + 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU, + 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU, + 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU + }; + +/* lower bit position of bitfield imr_rx{r}[4:0] */ + static u32 itr_imr_rxr_shift[32] = { + 8U, 0U, 8U, 0U, 8U, 0U, 8U, 0U, + 8U, 0U, 8U, 0U, 8U, 0U, 8U, 0U, + 8U, 0U, 8U, 0U, 8U, 0U, 8U, 0U, + 8U, 0U, 8U, 0U, 8U, 0U, 8U, 0U + }; + + aq_hw_write_reg_bit(aq_hw, itr_imr_rxr_adr[rx], + itr_imr_rxr_msk[rx], + itr_imr_rxr_shift[rx], + irq_map_rx); +} + +void itr_irq_map_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_tx, u32 tx) +{ +/* register address for bitfield imr_tx{t}[4:0] */ + static u32 itr_imr_txt_adr[32] = { + 0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U, + 0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU, + 0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U, + 0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU, + 0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U, + 0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU, + 0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U, + 0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU + }; + +/* bitmask for bitfield imr_tx{t}[4:0] */ + static u32 itr_imr_txt_msk[32] = { + 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U, + 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U, + 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U, + 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U, + 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U, + 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U, + 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U, + 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U + }; + +/* lower bit position of bitfield imr_tx{t}[4:0] */ + static u32 itr_imr_txt_shift[32] = { + 24U, 16U, 24U, 16U, 24U, 16U, 24U, 16U, + 24U, 16U, 24U, 16U, 24U, 16U, 24U, 16U, + 24U, 16U, 24U, 16U, 24U, 16U, 24U, 16U, + 24U, 16U, 24U, 16U, 24U, 16U, 24U, 16U + }; + + aq_hw_write_reg_bit(aq_hw, itr_imr_txt_adr[tx], + itr_imr_txt_msk[tx], + itr_imr_txt_shift[tx], + irq_map_tx); +} + +void itr_irq_msk_clearlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_clearlsw) +{ + aq_hw_write_reg(aq_hw, itr_imcrlsw_adr, irq_msk_clearlsw); +} + +void itr_irq_msk_setlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_setlsw) +{ + aq_hw_write_reg(aq_hw, itr_imsrlsw_adr, irq_msk_setlsw); +} + +void itr_irq_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 irq_reg_res_dis) +{ + aq_hw_write_reg_bit(aq_hw, itr_reg_res_dsbl_adr, + itr_reg_res_dsbl_msk, + itr_reg_res_dsbl_shift, irq_reg_res_dis); +} + +void itr_irq_status_clearlsw_set(struct aq_hw_s *aq_hw, + u32 irq_status_clearlsw) +{ + aq_hw_write_reg(aq_hw, itr_iscrlsw_adr, irq_status_clearlsw); +} + +u32 itr_irq_statuslsw_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, itr_isrlsw_adr); +} + +u32 itr_res_irq_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg_bit(aq_hw, itr_res_adr, itr_res_msk, + itr_res_shift); +} + +void itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq) +{ + aq_hw_write_reg_bit(aq_hw, itr_res_adr, itr_res_msk, + itr_res_shift, res_irq); +} + +/* rdm */ +void rdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca) +{ + aq_hw_write_reg_bit(aq_hw, rdm_dcadcpuid_adr(dca), + rdm_dcadcpuid_msk, + rdm_dcadcpuid_shift, cpuid); +} + +void rdm_rx_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_dca_en) +{ + aq_hw_write_reg_bit(aq_hw, rdm_dca_en_adr, rdm_dca_en_msk, + rdm_dca_en_shift, rx_dca_en); +} + +void rdm_rx_dca_mode_set(struct aq_hw_s *aq_hw, u32 rx_dca_mode) +{ + aq_hw_write_reg_bit(aq_hw, rdm_dca_mode_adr, rdm_dca_mode_msk, + rdm_dca_mode_shift, rx_dca_mode); +} + +void rdm_rx_desc_data_buff_size_set(struct aq_hw_s *aq_hw, + u32 rx_desc_data_buff_size, u32 descriptor) +{ + aq_hw_write_reg_bit(aq_hw, rdm_descddata_size_adr(descriptor), + rdm_descddata_size_msk, + rdm_descddata_size_shift, + rx_desc_data_buff_size); +} + +void rdm_rx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_dca_en, u32 dca) +{ + aq_hw_write_reg_bit(aq_hw, rdm_dcaddesc_en_adr(dca), + rdm_dcaddesc_en_msk, + rdm_dcaddesc_en_shift, + rx_desc_dca_en); +} + +void rdm_rx_desc_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_en, u32 descriptor) +{ + aq_hw_write_reg_bit(aq_hw, rdm_descden_adr(descriptor), + rdm_descden_msk, + rdm_descden_shift, + rx_desc_en); +} + +void rdm_rx_desc_head_buff_size_set(struct aq_hw_s *aq_hw, + u32 rx_desc_head_buff_size, u32 descriptor) +{ + aq_hw_write_reg_bit(aq_hw, rdm_descdhdr_size_adr(descriptor), + rdm_descdhdr_size_msk, + rdm_descdhdr_size_shift, + rx_desc_head_buff_size); +} + +void rdm_rx_desc_head_splitting_set(struct aq_hw_s *aq_hw, + u32 rx_desc_head_splitting, u32 descriptor) +{ + aq_hw_write_reg_bit(aq_hw, rdm_descdhdr_split_adr(descriptor), + rdm_descdhdr_split_msk, + rdm_descdhdr_split_shift, + rx_desc_head_splitting); +} + +u32 rdm_rx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor) +{ + return aq_hw_read_reg_bit(aq_hw, rdm_descdhd_adr(descriptor), + rdm_descdhd_msk, rdm_descdhd_shift); +} + +void rdm_rx_desc_len_set(struct aq_hw_s *aq_hw, u32 rx_desc_len, u32 descriptor) +{ + aq_hw_write_reg_bit(aq_hw, rdm_descdlen_adr(descriptor), + rdm_descdlen_msk, rdm_descdlen_shift, + rx_desc_len); +} + +void rdm_rx_desc_res_set(struct aq_hw_s *aq_hw, u32 rx_desc_res, u32 descriptor) +{ + aq_hw_write_reg_bit(aq_hw, rdm_descdreset_adr(descriptor), + rdm_descdreset_msk, rdm_descdreset_shift, + rx_desc_res); +} + +void rdm_rx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw, + u32 rx_desc_wr_wb_irq_en) +{ + aq_hw_write_reg_bit(aq_hw, rdm_int_desc_wrb_en_adr, + rdm_int_desc_wrb_en_msk, + rdm_int_desc_wrb_en_shift, + rx_desc_wr_wb_irq_en); +} + +void rdm_rx_head_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_head_dca_en, u32 dca) +{ + aq_hw_write_reg_bit(aq_hw, rdm_dcadhdr_en_adr(dca), + rdm_dcadhdr_en_msk, + rdm_dcadhdr_en_shift, + rx_head_dca_en); +} + +void rdm_rx_pld_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_pld_dca_en, u32 dca) +{ + aq_hw_write_reg_bit(aq_hw, rdm_dcadpay_en_adr(dca), + rdm_dcadpay_en_msk, rdm_dcadpay_en_shift, + rx_pld_dca_en); +} + +void rdm_rdm_intr_moder_en_set(struct aq_hw_s *aq_hw, u32 rdm_intr_moder_en) +{ + aq_hw_write_reg_bit(aq_hw, rdm_int_rim_en_adr, + rdm_int_rim_en_msk, + rdm_int_rim_en_shift, + rdm_intr_moder_en); +} + +/* reg */ +void reg_gen_irq_map_set(struct aq_hw_s *aq_hw, u32 gen_intr_map, u32 regidx) +{ + aq_hw_write_reg(aq_hw, gen_intr_map_adr(regidx), gen_intr_map); +} + +u32 reg_gen_irq_status_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, gen_intr_stat_adr); +} + +void reg_irq_glb_ctl_set(struct aq_hw_s *aq_hw, u32 intr_glb_ctl) +{ + aq_hw_write_reg(aq_hw, intr_glb_ctl_adr, intr_glb_ctl); +} + +void reg_irq_thr_set(struct aq_hw_s *aq_hw, u32 intr_thr, u32 throttle) +{ + aq_hw_write_reg(aq_hw, intr_thr_adr(throttle), intr_thr); +} + +void reg_rx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw, + u32 rx_dma_desc_base_addrlsw, + u32 descriptor) +{ + aq_hw_write_reg(aq_hw, rx_dma_desc_base_addrlsw_adr(descriptor), + rx_dma_desc_base_addrlsw); +} + +void reg_rx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw, + u32 rx_dma_desc_base_addrmsw, + u32 descriptor) +{ + aq_hw_write_reg(aq_hw, rx_dma_desc_base_addrmsw_adr(descriptor), + rx_dma_desc_base_addrmsw); +} + +u32 reg_rx_dma_desc_status_get(struct aq_hw_s *aq_hw, u32 descriptor) +{ + return aq_hw_read_reg(aq_hw, rx_dma_desc_stat_adr(descriptor)); +} + +void reg_rx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw, + u32 rx_dma_desc_tail_ptr, u32 descriptor) +{ + aq_hw_write_reg(aq_hw, rx_dma_desc_tail_ptr_adr(descriptor), + rx_dma_desc_tail_ptr); +} + +void reg_rx_flr_mcst_flr_msk_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr_msk) +{ + aq_hw_write_reg(aq_hw, rx_flr_mcst_flr_msk_adr, rx_flr_mcst_flr_msk); +} + +void reg_rx_flr_mcst_flr_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr, + u32 filter) +{ + aq_hw_write_reg(aq_hw, rx_flr_mcst_flr_adr(filter), rx_flr_mcst_flr); +} + +void reg_rx_flr_rss_control1set(struct aq_hw_s *aq_hw, u32 rx_flr_rss_control1) +{ + aq_hw_write_reg(aq_hw, rx_flr_rss_control1_adr, rx_flr_rss_control1); +} + +void reg_rx_flr_control2_set(struct aq_hw_s *aq_hw, u32 rx_filter_control2) +{ + aq_hw_write_reg(aq_hw, rx_flr_control2_adr, rx_filter_control2); +} + +void reg_rx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw, + u32 rx_intr_moderation_ctl, + u32 queue) +{ + aq_hw_write_reg(aq_hw, rx_intr_moderation_ctl_adr(queue), + rx_intr_moderation_ctl); +} + +void reg_tx_dma_debug_ctl_set(struct aq_hw_s *aq_hw, u32 tx_dma_debug_ctl) +{ + aq_hw_write_reg(aq_hw, tx_dma_debug_ctl_adr, tx_dma_debug_ctl); +} + +void reg_tx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw, + u32 tx_dma_desc_base_addrlsw, + u32 descriptor) +{ + aq_hw_write_reg(aq_hw, tx_dma_desc_base_addrlsw_adr(descriptor), + tx_dma_desc_base_addrlsw); +} + +void reg_tx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw, + u32 tx_dma_desc_base_addrmsw, + u32 descriptor) +{ + aq_hw_write_reg(aq_hw, tx_dma_desc_base_addrmsw_adr(descriptor), + tx_dma_desc_base_addrmsw); +} + +void reg_tx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw, + u32 tx_dma_desc_tail_ptr, u32 descriptor) +{ + aq_hw_write_reg(aq_hw, tx_dma_desc_tail_ptr_adr(descriptor), + tx_dma_desc_tail_ptr); +} + +void reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw, + u32 tx_intr_moderation_ctl, + u32 queue) +{ + aq_hw_write_reg(aq_hw, tx_intr_moderation_ctl_adr(queue), + tx_intr_moderation_ctl); +} + +/* RPB: rx packet buffer */ +void rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk) +{ + aq_hw_write_reg_bit(aq_hw, rpb_dma_sys_lbk_adr, + rpb_dma_sys_lbk_msk, + rpb_dma_sys_lbk_shift, dma_sys_lbk); +} + +void rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw, + u32 rx_traf_class_mode) +{ + aq_hw_write_reg_bit(aq_hw, rpb_rpf_rx_tc_mode_adr, + rpb_rpf_rx_tc_mode_msk, + rpb_rpf_rx_tc_mode_shift, + rx_traf_class_mode); +} + +void rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en) +{ + aq_hw_write_reg_bit(aq_hw, rpb_rx_buf_en_adr, rpb_rx_buf_en_msk, + rpb_rx_buf_en_shift, rx_buff_en); +} + +void rpb_rx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw, + u32 rx_buff_hi_threshold_per_tc, + u32 buffer) +{ + aq_hw_write_reg_bit(aq_hw, rpb_rxbhi_thresh_adr(buffer), + rpb_rxbhi_thresh_msk, rpb_rxbhi_thresh_shift, + rx_buff_hi_threshold_per_tc); +} + +void rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw, + u32 rx_buff_lo_threshold_per_tc, + u32 buffer) +{ + aq_hw_write_reg_bit(aq_hw, rpb_rxblo_thresh_adr(buffer), + rpb_rxblo_thresh_msk, + rpb_rxblo_thresh_shift, + rx_buff_lo_threshold_per_tc); +} + +void rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode) +{ + aq_hw_write_reg_bit(aq_hw, rpb_rx_fc_mode_adr, + rpb_rx_fc_mode_msk, + rpb_rx_fc_mode_shift, rx_flow_ctl_mode); +} + +void rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw, + u32 rx_pkt_buff_size_per_tc, u32 buffer) +{ + aq_hw_write_reg_bit(aq_hw, rpb_rxbbuf_size_adr(buffer), + rpb_rxbbuf_size_msk, rpb_rxbbuf_size_shift, + rx_pkt_buff_size_per_tc); +} + +void rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc, + u32 buffer) +{ + aq_hw_write_reg_bit(aq_hw, rpb_rxbxoff_en_adr(buffer), + rpb_rxbxoff_en_msk, rpb_rxbxoff_en_shift, + rx_xoff_en_per_tc); +} + +/* rpf */ + +void rpfl2broadcast_count_threshold_set(struct aq_hw_s *aq_hw, + u32 l2broadcast_count_threshold) +{ + aq_hw_write_reg_bit(aq_hw, rpfl2bc_thresh_adr, + rpfl2bc_thresh_msk, + rpfl2bc_thresh_shift, + l2broadcast_count_threshold); +} + +void rpfl2broadcast_en_set(struct aq_hw_s *aq_hw, u32 l2broadcast_en) +{ + aq_hw_write_reg_bit(aq_hw, rpfl2bc_en_adr, rpfl2bc_en_msk, + rpfl2bc_en_shift, l2broadcast_en); +} + +void rpfl2broadcast_flr_act_set(struct aq_hw_s *aq_hw, u32 l2broadcast_flr_act) +{ + aq_hw_write_reg_bit(aq_hw, rpfl2bc_act_adr, rpfl2bc_act_msk, + rpfl2bc_act_shift, l2broadcast_flr_act); +} + +void rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw, u32 l2multicast_flr_en, + u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, rpfl2mc_enf_adr(filter), + rpfl2mc_enf_msk, + rpfl2mc_enf_shift, l2multicast_flr_en); +} + +void rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw, + u32 l2promiscuous_mode_en) +{ + aq_hw_write_reg_bit(aq_hw, rpfl2promis_mode_adr, + rpfl2promis_mode_msk, + rpfl2promis_mode_shift, + l2promiscuous_mode_en); +} + +void rpfl2unicast_flr_act_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_act, + u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, rpfl2uc_actf_adr(filter), + rpfl2uc_actf_msk, rpfl2uc_actf_shift, + l2unicast_flr_act); +} + +void rpfl2_uc_flr_en_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_en, + u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, rpfl2uc_enf_adr(filter), + rpfl2uc_enf_msk, + rpfl2uc_enf_shift, l2unicast_flr_en); +} + +void rpfl2unicast_dest_addresslsw_set(struct aq_hw_s *aq_hw, + u32 l2unicast_dest_addresslsw, + u32 filter) +{ + aq_hw_write_reg(aq_hw, rpfl2uc_daflsw_adr(filter), + l2unicast_dest_addresslsw); +} + +void rpfl2unicast_dest_addressmsw_set(struct aq_hw_s *aq_hw, + u32 l2unicast_dest_addressmsw, + u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, rpfl2uc_dafmsw_adr(filter), + rpfl2uc_dafmsw_msk, rpfl2uc_dafmsw_shift, + l2unicast_dest_addressmsw); +} + +void rpfl2_accept_all_mc_packets_set(struct aq_hw_s *aq_hw, + u32 l2_accept_all_mc_packets) +{ + aq_hw_write_reg_bit(aq_hw, rpfl2mc_accept_all_adr, + rpfl2mc_accept_all_msk, + rpfl2mc_accept_all_shift, + l2_accept_all_mc_packets); +} + +void rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw, + u32 user_priority_tc_map, u32 tc) +{ +/* register address for bitfield rx_tc_up{t}[2:0] */ + static u32 rpf_rpb_rx_tc_upt_adr[8] = { + 0x000054c4U, 0x000054c4U, 0x000054c4U, 0x000054c4U, + 0x000054c4U, 0x000054c4U, 0x000054c4U, 0x000054c4U + }; + +/* bitmask for bitfield rx_tc_up{t}[2:0] */ + static u32 rpf_rpb_rx_tc_upt_msk[8] = { + 0x00000007U, 0x00000070U, 0x00000700U, 0x00007000U, + 0x00070000U, 0x00700000U, 0x07000000U, 0x70000000U + }; + +/* lower bit position of bitfield rx_tc_up{t}[2:0] */ + static u32 rpf_rpb_rx_tc_upt_shft[8] = { + 0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U + }; + + aq_hw_write_reg_bit(aq_hw, rpf_rpb_rx_tc_upt_adr[tc], + rpf_rpb_rx_tc_upt_msk[tc], + rpf_rpb_rx_tc_upt_shft[tc], + user_priority_tc_map); +} + +void rpf_rss_key_addr_set(struct aq_hw_s *aq_hw, u32 rss_key_addr) +{ + aq_hw_write_reg_bit(aq_hw, rpf_rss_key_addr_adr, + rpf_rss_key_addr_msk, + rpf_rss_key_addr_shift, + rss_key_addr); +} + +void rpf_rss_key_wr_data_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_data) +{ + aq_hw_write_reg(aq_hw, rpf_rss_key_wr_data_adr, + rss_key_wr_data); +} + +u32 rpf_rss_key_wr_en_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg_bit(aq_hw, rpf_rss_key_wr_eni_adr, + rpf_rss_key_wr_eni_msk, + rpf_rss_key_wr_eni_shift); +} + +void rpf_rss_key_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_en) +{ + aq_hw_write_reg_bit(aq_hw, rpf_rss_key_wr_eni_adr, + rpf_rss_key_wr_eni_msk, + rpf_rss_key_wr_eni_shift, + rss_key_wr_en); +} + +void rpf_rss_redir_tbl_addr_set(struct aq_hw_s *aq_hw, u32 rss_redir_tbl_addr) +{ + aq_hw_write_reg_bit(aq_hw, rpf_rss_redir_addr_adr, + rpf_rss_redir_addr_msk, + rpf_rss_redir_addr_shift, rss_redir_tbl_addr); +} + +void rpf_rss_redir_tbl_wr_data_set(struct aq_hw_s *aq_hw, + u32 rss_redir_tbl_wr_data) +{ + aq_hw_write_reg_bit(aq_hw, rpf_rss_redir_wr_data_adr, + rpf_rss_redir_wr_data_msk, + rpf_rss_redir_wr_data_shift, + rss_redir_tbl_wr_data); +} + +u32 rpf_rss_redir_wr_en_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg_bit(aq_hw, rpf_rss_redir_wr_eni_adr, + rpf_rss_redir_wr_eni_msk, + rpf_rss_redir_wr_eni_shift); +} + +void rpf_rss_redir_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_redir_wr_en) +{ + aq_hw_write_reg_bit(aq_hw, rpf_rss_redir_wr_eni_adr, + rpf_rss_redir_wr_eni_msk, + rpf_rss_redir_wr_eni_shift, rss_redir_wr_en); +} + +void rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw_s *aq_hw, u32 tpo_to_rpf_sys_lbk) +{ + aq_hw_write_reg_bit(aq_hw, rpf_tpo_rpf_sys_lbk_adr, + rpf_tpo_rpf_sys_lbk_msk, + rpf_tpo_rpf_sys_lbk_shift, + tpo_to_rpf_sys_lbk); +} + +void rpf_vlan_inner_etht_set(struct aq_hw_s *aq_hw, u32 vlan_inner_etht) +{ + aq_hw_write_reg_bit(aq_hw, rpf_vl_inner_tpid_adr, + rpf_vl_inner_tpid_msk, + rpf_vl_inner_tpid_shift, + vlan_inner_etht); +} + +void rpf_vlan_outer_etht_set(struct aq_hw_s *aq_hw, u32 vlan_outer_etht) +{ + aq_hw_write_reg_bit(aq_hw, rpf_vl_outer_tpid_adr, + rpf_vl_outer_tpid_msk, + rpf_vl_outer_tpid_shift, + vlan_outer_etht); +} + +void rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw, u32 vlan_prom_mode_en) +{ + aq_hw_write_reg_bit(aq_hw, rpf_vl_promis_mode_adr, + rpf_vl_promis_mode_msk, + rpf_vl_promis_mode_shift, + vlan_prom_mode_en); +} + +void rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw, + u32 vlan_accept_untagged_packets) +{ + aq_hw_write_reg_bit(aq_hw, rpf_vl_accept_untagged_mode_adr, + rpf_vl_accept_untagged_mode_msk, + rpf_vl_accept_untagged_mode_shift, + vlan_accept_untagged_packets); +} + +void rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw, u32 vlan_untagged_act) +{ + aq_hw_write_reg_bit(aq_hw, rpf_vl_untagged_act_adr, + rpf_vl_untagged_act_msk, + rpf_vl_untagged_act_shift, + vlan_untagged_act); +} + +void rpf_vlan_flr_en_set(struct aq_hw_s *aq_hw, u32 vlan_flr_en, u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, rpf_vl_en_f_adr(filter), + rpf_vl_en_f_msk, + rpf_vl_en_f_shift, + vlan_flr_en); +} + +void rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_flr_act, u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, rpf_vl_act_f_adr(filter), + rpf_vl_act_f_msk, + rpf_vl_act_f_shift, + vlan_flr_act); +} + +void rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr, u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, rpf_vl_id_f_adr(filter), + rpf_vl_id_f_msk, + rpf_vl_id_f_shift, + vlan_id_flr); +} + +void rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en, u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, rpf_et_enf_adr(filter), + rpf_et_enf_msk, + rpf_et_enf_shift, etht_flr_en); +} + +void rpf_etht_user_priority_en_set(struct aq_hw_s *aq_hw, + u32 etht_user_priority_en, u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, rpf_et_upfen_adr(filter), + rpf_et_upfen_msk, rpf_et_upfen_shift, + etht_user_priority_en); +} + +void rpf_etht_rx_queue_en_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue_en, + u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, rpf_et_rxqfen_adr(filter), + rpf_et_rxqfen_msk, rpf_et_rxqfen_shift, + etht_rx_queue_en); +} + +void rpf_etht_user_priority_set(struct aq_hw_s *aq_hw, u32 etht_user_priority, + u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, rpf_et_upf_adr(filter), + rpf_et_upf_msk, + rpf_et_upf_shift, etht_user_priority); +} + +void rpf_etht_rx_queue_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue, + u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, rpf_et_rxqf_adr(filter), + rpf_et_rxqf_msk, + rpf_et_rxqf_shift, etht_rx_queue); +} + +void rpf_etht_mgt_queue_set(struct aq_hw_s *aq_hw, u32 etht_mgt_queue, + u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, rpf_et_mng_rxqf_adr(filter), + rpf_et_mng_rxqf_msk, rpf_et_mng_rxqf_shift, + etht_mgt_queue); +} + +void rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act, u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, rpf_et_actf_adr(filter), + rpf_et_actf_msk, + rpf_et_actf_shift, etht_flr_act); +} + +void rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, rpf_et_valf_adr(filter), + rpf_et_valf_msk, + rpf_et_valf_shift, etht_flr); +} + +/* RPO: rx packet offload */ +void rpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw, + u32 ipv4header_crc_offload_en) +{ + aq_hw_write_reg_bit(aq_hw, rpo_ipv4chk_en_adr, + rpo_ipv4chk_en_msk, + rpo_ipv4chk_en_shift, + ipv4header_crc_offload_en); +} + +void rpo_rx_desc_vlan_stripping_set(struct aq_hw_s *aq_hw, + u32 rx_desc_vlan_stripping, u32 descriptor) +{ + aq_hw_write_reg_bit(aq_hw, rpo_descdvl_strip_adr(descriptor), + rpo_descdvl_strip_msk, + rpo_descdvl_strip_shift, + rx_desc_vlan_stripping); +} + +void rpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw, + u32 tcp_udp_crc_offload_en) +{ + aq_hw_write_reg_bit(aq_hw, rpol4chk_en_adr, rpol4chk_en_msk, + rpol4chk_en_shift, tcp_udp_crc_offload_en); +} + +void rpo_lro_en_set(struct aq_hw_s *aq_hw, u32 lro_en) +{ + aq_hw_write_reg(aq_hw, rpo_lro_en_adr, lro_en); +} + +void rpo_lro_patch_optimization_en_set(struct aq_hw_s *aq_hw, + u32 lro_patch_optimization_en) +{ + aq_hw_write_reg_bit(aq_hw, rpo_lro_ptopt_en_adr, + rpo_lro_ptopt_en_msk, + rpo_lro_ptopt_en_shift, + lro_patch_optimization_en); +} + +void rpo_lro_qsessions_lim_set(struct aq_hw_s *aq_hw, + u32 lro_qsessions_lim) +{ + aq_hw_write_reg_bit(aq_hw, rpo_lro_qses_lmt_adr, + rpo_lro_qses_lmt_msk, + rpo_lro_qses_lmt_shift, + lro_qsessions_lim); +} + +void rpo_lro_total_desc_lim_set(struct aq_hw_s *aq_hw, u32 lro_total_desc_lim) +{ + aq_hw_write_reg_bit(aq_hw, rpo_lro_tot_dsc_lmt_adr, + rpo_lro_tot_dsc_lmt_msk, + rpo_lro_tot_dsc_lmt_shift, + lro_total_desc_lim); +} + +void rpo_lro_min_pay_of_first_pkt_set(struct aq_hw_s *aq_hw, + u32 lro_min_pld_of_first_pkt) +{ + aq_hw_write_reg_bit(aq_hw, rpo_lro_pkt_min_adr, + rpo_lro_pkt_min_msk, + rpo_lro_pkt_min_shift, + lro_min_pld_of_first_pkt); +} + +void rpo_lro_pkt_lim_set(struct aq_hw_s *aq_hw, u32 lro_pkt_lim) +{ + aq_hw_write_reg(aq_hw, rpo_lro_rsc_max_adr, lro_pkt_lim); +} + +void rpo_lro_max_num_of_descriptors_set(struct aq_hw_s *aq_hw, + u32 lro_max_number_of_descriptors, + u32 lro) +{ +/* Register address for bitfield lro{L}_des_max[1:0] */ + static u32 rpo_lro_ldes_max_adr[32] = { + 0x000055A0U, 0x000055A0U, 0x000055A0U, 0x000055A0U, + 0x000055A0U, 0x000055A0U, 0x000055A0U, 0x000055A0U, + 0x000055A4U, 0x000055A4U, 0x000055A4U, 0x000055A4U, + 0x000055A4U, 0x000055A4U, 0x000055A4U, 0x000055A4U, + 0x000055A8U, 0x000055A8U, 0x000055A8U, 0x000055A8U, + 0x000055A8U, 0x000055A8U, 0x000055A8U, 0x000055A8U, + 0x000055ACU, 0x000055ACU, 0x000055ACU, 0x000055ACU, + 0x000055ACU, 0x000055ACU, 0x000055ACU, 0x000055ACU + }; + +/* Bitmask for bitfield lro{L}_des_max[1:0] */ + static u32 rpo_lro_ldes_max_msk[32] = { + 0x00000003U, 0x00000030U, 0x00000300U, 0x00003000U, + 0x00030000U, 0x00300000U, 0x03000000U, 0x30000000U, + 0x00000003U, 0x00000030U, 0x00000300U, 0x00003000U, + 0x00030000U, 0x00300000U, 0x03000000U, 0x30000000U, + 0x00000003U, 0x00000030U, 0x00000300U, 0x00003000U, + 0x00030000U, 0x00300000U, 0x03000000U, 0x30000000U, + 0x00000003U, 0x00000030U, 0x00000300U, 0x00003000U, + 0x00030000U, 0x00300000U, 0x03000000U, 0x30000000U + }; + +/* Lower bit position of bitfield lro{L}_des_max[1:0] */ + static u32 rpo_lro_ldes_max_shift[32] = { + 0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U, + 0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U, + 0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U, + 0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U + }; + + aq_hw_write_reg_bit(aq_hw, rpo_lro_ldes_max_adr[lro], + rpo_lro_ldes_max_msk[lro], + rpo_lro_ldes_max_shift[lro], + lro_max_number_of_descriptors); +} + +void rpo_lro_time_base_divider_set(struct aq_hw_s *aq_hw, + u32 lro_time_base_divider) +{ + aq_hw_write_reg_bit(aq_hw, rpo_lro_tb_div_adr, + rpo_lro_tb_div_msk, + rpo_lro_tb_div_shift, + lro_time_base_divider); +} + +void rpo_lro_inactive_interval_set(struct aq_hw_s *aq_hw, + u32 lro_inactive_interval) +{ + aq_hw_write_reg_bit(aq_hw, rpo_lro_ina_ival_adr, + rpo_lro_ina_ival_msk, + rpo_lro_ina_ival_shift, + lro_inactive_interval); +} + +void rpo_lro_max_coalescing_interval_set(struct aq_hw_s *aq_hw, + u32 lro_max_coalescing_interval) +{ + aq_hw_write_reg_bit(aq_hw, rpo_lro_max_ival_adr, + rpo_lro_max_ival_msk, + rpo_lro_max_ival_shift, + lro_max_coalescing_interval); +} + +/* rx */ +void rx_rx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 rx_reg_res_dis) +{ + aq_hw_write_reg_bit(aq_hw, rx_reg_res_dsbl_adr, + rx_reg_res_dsbl_msk, + rx_reg_res_dsbl_shift, + rx_reg_res_dis); +} + +/* tdm */ +void tdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca) +{ + aq_hw_write_reg_bit(aq_hw, tdm_dcadcpuid_adr(dca), + tdm_dcadcpuid_msk, + tdm_dcadcpuid_shift, cpuid); +} + +void tdm_large_send_offload_en_set(struct aq_hw_s *aq_hw, + u32 large_send_offload_en) +{ + aq_hw_write_reg(aq_hw, tdm_lso_en_adr, large_send_offload_en); +} + +void tdm_tx_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_dca_en) +{ + aq_hw_write_reg_bit(aq_hw, tdm_dca_en_adr, tdm_dca_en_msk, + tdm_dca_en_shift, tx_dca_en); +} + +void tdm_tx_dca_mode_set(struct aq_hw_s *aq_hw, u32 tx_dca_mode) +{ + aq_hw_write_reg_bit(aq_hw, tdm_dca_mode_adr, tdm_dca_mode_msk, + tdm_dca_mode_shift, tx_dca_mode); +} + +void tdm_tx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_dca_en, u32 dca) +{ + aq_hw_write_reg_bit(aq_hw, tdm_dcaddesc_en_adr(dca), + tdm_dcaddesc_en_msk, tdm_dcaddesc_en_shift, + tx_desc_dca_en); +} + +void tdm_tx_desc_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_en, u32 descriptor) +{ + aq_hw_write_reg_bit(aq_hw, tdm_descden_adr(descriptor), + tdm_descden_msk, + tdm_descden_shift, + tx_desc_en); +} + +u32 tdm_tx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor) +{ + return aq_hw_read_reg_bit(aq_hw, tdm_descdhd_adr(descriptor), + tdm_descdhd_msk, tdm_descdhd_shift); +} + +void tdm_tx_desc_len_set(struct aq_hw_s *aq_hw, u32 tx_desc_len, + u32 descriptor) +{ + aq_hw_write_reg_bit(aq_hw, tdm_descdlen_adr(descriptor), + tdm_descdlen_msk, + tdm_descdlen_shift, + tx_desc_len); +} + +void tdm_tx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw, + u32 tx_desc_wr_wb_irq_en) +{ + aq_hw_write_reg_bit(aq_hw, tdm_int_desc_wrb_en_adr, + tdm_int_desc_wrb_en_msk, + tdm_int_desc_wrb_en_shift, + tx_desc_wr_wb_irq_en); +} + +void tdm_tx_desc_wr_wb_threshold_set(struct aq_hw_s *aq_hw, + u32 tx_desc_wr_wb_threshold, + u32 descriptor) +{ + aq_hw_write_reg_bit(aq_hw, tdm_descdwrb_thresh_adr(descriptor), + tdm_descdwrb_thresh_msk, + tdm_descdwrb_thresh_shift, + tx_desc_wr_wb_threshold); +} + +void tdm_tdm_intr_moder_en_set(struct aq_hw_s *aq_hw, + u32 tdm_irq_moderation_en) +{ + aq_hw_write_reg_bit(aq_hw, tdm_int_mod_en_adr, + tdm_int_mod_en_msk, + tdm_int_mod_en_shift, + tdm_irq_moderation_en); +} + +/* thm */ +void thm_lso_tcp_flag_of_first_pkt_set(struct aq_hw_s *aq_hw, + u32 lso_tcp_flag_of_first_pkt) +{ + aq_hw_write_reg_bit(aq_hw, thm_lso_tcp_flag_first_adr, + thm_lso_tcp_flag_first_msk, + thm_lso_tcp_flag_first_shift, + lso_tcp_flag_of_first_pkt); +} + +void thm_lso_tcp_flag_of_last_pkt_set(struct aq_hw_s *aq_hw, + u32 lso_tcp_flag_of_last_pkt) +{ + aq_hw_write_reg_bit(aq_hw, thm_lso_tcp_flag_last_adr, + thm_lso_tcp_flag_last_msk, + thm_lso_tcp_flag_last_shift, + lso_tcp_flag_of_last_pkt); +} + +void thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw, + u32 lso_tcp_flag_of_middle_pkt) +{ + aq_hw_write_reg_bit(aq_hw, thm_lso_tcp_flag_mid_adr, + thm_lso_tcp_flag_mid_msk, + thm_lso_tcp_flag_mid_shift, + lso_tcp_flag_of_middle_pkt); +} + +/* TPB: tx packet buffer */ +void tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en) +{ + aq_hw_write_reg_bit(aq_hw, tpb_tx_buf_en_adr, tpb_tx_buf_en_msk, + tpb_tx_buf_en_shift, tx_buff_en); +} + +void tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw, + u32 tx_buff_hi_threshold_per_tc, + u32 buffer) +{ + aq_hw_write_reg_bit(aq_hw, tpb_txbhi_thresh_adr(buffer), + tpb_txbhi_thresh_msk, tpb_txbhi_thresh_shift, + tx_buff_hi_threshold_per_tc); +} + +void tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw, + u32 tx_buff_lo_threshold_per_tc, + u32 buffer) +{ + aq_hw_write_reg_bit(aq_hw, tpb_txblo_thresh_adr(buffer), + tpb_txblo_thresh_msk, tpb_txblo_thresh_shift, + tx_buff_lo_threshold_per_tc); +} + +void tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_dma_sys_lbk_en) +{ + aq_hw_write_reg_bit(aq_hw, tpb_dma_sys_lbk_adr, + tpb_dma_sys_lbk_msk, + tpb_dma_sys_lbk_shift, + tx_dma_sys_lbk_en); +} + +void tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_buff_size_per_tc, u32 buffer) +{ + aq_hw_write_reg_bit(aq_hw, tpb_txbbuf_size_adr(buffer), + tpb_txbbuf_size_msk, + tpb_txbbuf_size_shift, + tx_pkt_buff_size_per_tc); +} + +void tpb_tx_path_scp_ins_en_set(struct aq_hw_s *aq_hw, u32 tx_path_scp_ins_en) +{ + aq_hw_write_reg_bit(aq_hw, tpb_tx_scp_ins_en_adr, + tpb_tx_scp_ins_en_msk, + tpb_tx_scp_ins_en_shift, + tx_path_scp_ins_en); +} + +/* TPO: tx packet offload */ +void tpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw, + u32 ipv4header_crc_offload_en) +{ + aq_hw_write_reg_bit(aq_hw, tpo_ipv4chk_en_adr, + tpo_ipv4chk_en_msk, + tpo_ipv4chk_en_shift, + ipv4header_crc_offload_en); +} + +void tpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw, + u32 tcp_udp_crc_offload_en) +{ + aq_hw_write_reg_bit(aq_hw, tpol4chk_en_adr, + tpol4chk_en_msk, + tpol4chk_en_shift, + tcp_udp_crc_offload_en); +} + +void tpo_tx_pkt_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_pkt_sys_lbk_en) +{ + aq_hw_write_reg_bit(aq_hw, tpo_pkt_sys_lbk_adr, + tpo_pkt_sys_lbk_msk, + tpo_pkt_sys_lbk_shift, + tx_pkt_sys_lbk_en); +} + +/* TPS: tx packet scheduler */ +void tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_data_arb_mode) +{ + aq_hw_write_reg_bit(aq_hw, tps_data_tc_arb_mode_adr, + tps_data_tc_arb_mode_msk, + tps_data_tc_arb_mode_shift, + tx_pkt_shed_data_arb_mode); +} + +void tps_tx_pkt_shed_desc_rate_curr_time_res_set(struct aq_hw_s *aq_hw, + u32 curr_time_res) +{ + aq_hw_write_reg_bit(aq_hw, tps_desc_rate_ta_rst_adr, + tps_desc_rate_ta_rst_msk, + tps_desc_rate_ta_rst_shift, + curr_time_res); +} + +void tps_tx_pkt_shed_desc_rate_lim_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_desc_rate_lim) +{ + aq_hw_write_reg_bit(aq_hw, tps_desc_rate_lim_adr, + tps_desc_rate_lim_msk, + tps_desc_rate_lim_shift, + tx_pkt_shed_desc_rate_lim); +} + +void tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_desc_tc_arb_mode) +{ + aq_hw_write_reg_bit(aq_hw, tps_desc_tc_arb_mode_adr, + tps_desc_tc_arb_mode_msk, + tps_desc_tc_arb_mode_shift, + tx_pkt_shed_desc_tc_arb_mode); +} + +void tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_desc_tc_max_credit, + u32 tc) +{ + aq_hw_write_reg_bit(aq_hw, tps_desc_tctcredit_max_adr(tc), + tps_desc_tctcredit_max_msk, + tps_desc_tctcredit_max_shift, + tx_pkt_shed_desc_tc_max_credit); +} + +void tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_desc_tc_weight, u32 tc) +{ + aq_hw_write_reg_bit(aq_hw, tps_desc_tctweight_adr(tc), + tps_desc_tctweight_msk, + tps_desc_tctweight_shift, + tx_pkt_shed_desc_tc_weight); +} + +void tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_desc_vm_arb_mode) +{ + aq_hw_write_reg_bit(aq_hw, tps_desc_vm_arb_mode_adr, + tps_desc_vm_arb_mode_msk, + tps_desc_vm_arb_mode_shift, + tx_pkt_shed_desc_vm_arb_mode); +} + +void tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_tc_data_max_credit, + u32 tc) +{ + aq_hw_write_reg_bit(aq_hw, tps_data_tctcredit_max_adr(tc), + tps_data_tctcredit_max_msk, + tps_data_tctcredit_max_shift, + tx_pkt_shed_tc_data_max_credit); +} + +void tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_tc_data_weight, u32 tc) +{ + aq_hw_write_reg_bit(aq_hw, tps_data_tctweight_adr(tc), + tps_data_tctweight_msk, + tps_data_tctweight_shift, + tx_pkt_shed_tc_data_weight); +} + +/* tx */ +void tx_tx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 tx_reg_res_dis) +{ + aq_hw_write_reg_bit(aq_hw, tx_reg_res_dsbl_adr, + tx_reg_res_dsbl_msk, + tx_reg_res_dsbl_shift, tx_reg_res_dis); +} + +/* msm */ +u32 msm_reg_access_status_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg_bit(aq_hw, msm_reg_access_busy_adr, + msm_reg_access_busy_msk, + msm_reg_access_busy_shift); +} + +void msm_reg_addr_for_indirect_addr_set(struct aq_hw_s *aq_hw, + u32 reg_addr_for_indirect_addr) +{ + aq_hw_write_reg_bit(aq_hw, msm_reg_addr_adr, + msm_reg_addr_msk, + msm_reg_addr_shift, + reg_addr_for_indirect_addr); +} + +void msm_reg_rd_strobe_set(struct aq_hw_s *aq_hw, u32 reg_rd_strobe) +{ + aq_hw_write_reg_bit(aq_hw, msm_reg_rd_strobe_adr, + msm_reg_rd_strobe_msk, + msm_reg_rd_strobe_shift, + reg_rd_strobe); +} + +u32 msm_reg_rd_data_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, msm_reg_rd_data_adr); +} + +void msm_reg_wr_data_set(struct aq_hw_s *aq_hw, u32 reg_wr_data) +{ + aq_hw_write_reg(aq_hw, msm_reg_wr_data_adr, reg_wr_data); +} + +void msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe) +{ + aq_hw_write_reg_bit(aq_hw, msm_reg_wr_strobe_adr, + msm_reg_wr_strobe_msk, + msm_reg_wr_strobe_shift, + reg_wr_strobe); +} + +/* pci */ +void pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis) +{ + aq_hw_write_reg_bit(aq_hw, pci_reg_res_dsbl_adr, + pci_reg_res_dsbl_msk, + pci_reg_res_dsbl_shift, + pci_reg_res_dis); +} + +void reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw, u32 glb_cpu_scratch_scp, + u32 scratch_scp) +{ + aq_hw_write_reg(aq_hw, glb_cpu_scratch_scp_adr(scratch_scp), + glb_cpu_scratch_scp); +} diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h new file mode 100644 index 000000000000..ed1085b95adb --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h @@ -0,0 +1,677 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File hw_atl_llh.h: Declarations of bitfield and register access functions for + * Atlantic registers. + */ + +#ifndef HW_ATL_LLH_H +#define HW_ATL_LLH_H + +#include <linux/types.h> + +struct aq_hw_s; + +/* global */ + +/* set global microprocessor semaphore */ +void reg_glb_cpu_sem_set(struct aq_hw_s *aq_hw, u32 glb_cpu_sem, + u32 semaphore); + +/* get global microprocessor semaphore */ +u32 reg_glb_cpu_sem_get(struct aq_hw_s *aq_hw, u32 semaphore); + +/* set global register reset disable */ +void glb_glb_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 glb_reg_res_dis); + +/* set soft reset */ +void glb_soft_res_set(struct aq_hw_s *aq_hw, u32 soft_res); + +/* get soft reset */ +u32 glb_soft_res_get(struct aq_hw_s *aq_hw); + +/* stats */ + +u32 rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw_s *aq_hw); + +/* get rx dma good octet counter lsw */ +u32 stats_rx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw); + +/* get rx dma good packet counter lsw */ +u32 stats_rx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw); + +/* get tx dma good octet counter lsw */ +u32 stats_tx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw); + +/* get tx dma good packet counter lsw */ +u32 stats_tx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw); + +/* get rx dma good octet counter msw */ +u32 stats_rx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw); + +/* get rx dma good packet counter msw */ +u32 stats_rx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw); + +/* get tx dma good octet counter msw */ +u32 stats_tx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw); + +/* get tx dma good packet counter msw */ +u32 stats_tx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw); + +/* get msm rx errors counter register */ +u32 reg_mac_msm_rx_errs_cnt_get(struct aq_hw_s *aq_hw); + +/* get msm rx unicast frames counter register */ +u32 reg_mac_msm_rx_ucst_frm_cnt_get(struct aq_hw_s *aq_hw); + +/* get msm rx multicast frames counter register */ +u32 reg_mac_msm_rx_mcst_frm_cnt_get(struct aq_hw_s *aq_hw); + +/* get msm rx broadcast frames counter register */ +u32 reg_mac_msm_rx_bcst_frm_cnt_get(struct aq_hw_s *aq_hw); + +/* get msm rx broadcast octets counter register 1 */ +u32 reg_mac_msm_rx_bcst_octets_counter1get(struct aq_hw_s *aq_hw); + +/* get msm rx unicast octets counter register 0 */ +u32 reg_mac_msm_rx_ucst_octets_counter0get(struct aq_hw_s *aq_hw); + +/* get rx dma statistics counter 7 */ +u32 reg_rx_dma_stat_counter7get(struct aq_hw_s *aq_hw); + +/* get msm tx errors counter register */ +u32 reg_mac_msm_tx_errs_cnt_get(struct aq_hw_s *aq_hw); + +/* get msm tx unicast frames counter register */ +u32 reg_mac_msm_tx_ucst_frm_cnt_get(struct aq_hw_s *aq_hw); + +/* get msm tx multicast frames counter register */ +u32 reg_mac_msm_tx_mcst_frm_cnt_get(struct aq_hw_s *aq_hw); + +/* get msm tx broadcast frames counter register */ +u32 reg_mac_msm_tx_bcst_frm_cnt_get(struct aq_hw_s *aq_hw); + +/* get msm tx multicast octets counter register 1 */ +u32 reg_mac_msm_tx_mcst_octets_counter1get(struct aq_hw_s *aq_hw); + +/* get msm tx broadcast octets counter register 1 */ +u32 reg_mac_msm_tx_bcst_octets_counter1get(struct aq_hw_s *aq_hw); + +/* get msm tx unicast octets counter register 0 */ +u32 reg_mac_msm_tx_ucst_octets_counter0get(struct aq_hw_s *aq_hw); + +/* get global mif identification */ +u32 reg_glb_mif_id_get(struct aq_hw_s *aq_hw); + +/* interrupt */ + +/* set interrupt auto mask lsw */ +void itr_irq_auto_masklsw_set(struct aq_hw_s *aq_hw, u32 irq_auto_masklsw); + +/* set interrupt mapping enable rx */ +void itr_irq_map_en_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_rx, u32 rx); + +/* set interrupt mapping enable tx */ +void itr_irq_map_en_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_tx, u32 tx); + +/* set interrupt mapping rx */ +void itr_irq_map_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_rx, u32 rx); + +/* set interrupt mapping tx */ +void itr_irq_map_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_tx, u32 tx); + +/* set interrupt mask clear lsw */ +void itr_irq_msk_clearlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_clearlsw); + +/* set interrupt mask set lsw */ +void itr_irq_msk_setlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_setlsw); + +/* set interrupt register reset disable */ +void itr_irq_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 irq_reg_res_dis); + +/* set interrupt status clear lsw */ +void itr_irq_status_clearlsw_set(struct aq_hw_s *aq_hw, + u32 irq_status_clearlsw); + +/* get interrupt status lsw */ +u32 itr_irq_statuslsw_get(struct aq_hw_s *aq_hw); + +/* get reset interrupt */ +u32 itr_res_irq_get(struct aq_hw_s *aq_hw); + +/* set reset interrupt */ +void itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq); + +/* rdm */ + +/* set cpu id */ +void rdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca); + +/* set rx dca enable */ +void rdm_rx_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_dca_en); + +/* set rx dca mode */ +void rdm_rx_dca_mode_set(struct aq_hw_s *aq_hw, u32 rx_dca_mode); + +/* set rx descriptor data buffer size */ +void rdm_rx_desc_data_buff_size_set(struct aq_hw_s *aq_hw, + u32 rx_desc_data_buff_size, + u32 descriptor); + +/* set rx descriptor dca enable */ +void rdm_rx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_dca_en, + u32 dca); + +/* set rx descriptor enable */ +void rdm_rx_desc_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_en, + u32 descriptor); + +/* set rx descriptor header splitting */ +void rdm_rx_desc_head_splitting_set(struct aq_hw_s *aq_hw, + u32 rx_desc_head_splitting, + u32 descriptor); + +/* get rx descriptor head pointer */ +u32 rdm_rx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor); + +/* set rx descriptor length */ +void rdm_rx_desc_len_set(struct aq_hw_s *aq_hw, u32 rx_desc_len, + u32 descriptor); + +/* set rx descriptor write-back interrupt enable */ +void rdm_rx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw, + u32 rx_desc_wr_wb_irq_en); + +/* set rx header dca enable */ +void rdm_rx_head_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_head_dca_en, + u32 dca); + +/* set rx payload dca enable */ +void rdm_rx_pld_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_pld_dca_en, u32 dca); + +/* set rx descriptor header buffer size */ +void rdm_rx_desc_head_buff_size_set(struct aq_hw_s *aq_hw, + u32 rx_desc_head_buff_size, + u32 descriptor); + +/* set rx descriptor reset */ +void rdm_rx_desc_res_set(struct aq_hw_s *aq_hw, u32 rx_desc_res, + u32 descriptor); + +/* Set RDM Interrupt Moderation Enable */ +void rdm_rdm_intr_moder_en_set(struct aq_hw_s *aq_hw, u32 rdm_intr_moder_en); + +/* reg */ + +/* set general interrupt mapping register */ +void reg_gen_irq_map_set(struct aq_hw_s *aq_hw, u32 gen_intr_map, u32 regidx); + +/* get general interrupt status register */ +u32 reg_gen_irq_status_get(struct aq_hw_s *aq_hw); + +/* set interrupt global control register */ +void reg_irq_glb_ctl_set(struct aq_hw_s *aq_hw, u32 intr_glb_ctl); + +/* set interrupt throttle register */ +void reg_irq_thr_set(struct aq_hw_s *aq_hw, u32 intr_thr, u32 throttle); + +/* set rx dma descriptor base address lsw */ +void reg_rx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw, + u32 rx_dma_desc_base_addrlsw, + u32 descriptor); + +/* set rx dma descriptor base address msw */ +void reg_rx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw, + u32 rx_dma_desc_base_addrmsw, + u32 descriptor); + +/* get rx dma descriptor status register */ +u32 reg_rx_dma_desc_status_get(struct aq_hw_s *aq_hw, u32 descriptor); + +/* set rx dma descriptor tail pointer register */ +void reg_rx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw, + u32 rx_dma_desc_tail_ptr, + u32 descriptor); + +/* set rx filter multicast filter mask register */ +void reg_rx_flr_mcst_flr_msk_set(struct aq_hw_s *aq_hw, + u32 rx_flr_mcst_flr_msk); + +/* set rx filter multicast filter register */ +void reg_rx_flr_mcst_flr_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr, + u32 filter); + +/* set rx filter rss control register 1 */ +void reg_rx_flr_rss_control1set(struct aq_hw_s *aq_hw, + u32 rx_flr_rss_control1); + +/* Set RX Filter Control Register 2 */ +void reg_rx_flr_control2_set(struct aq_hw_s *aq_hw, u32 rx_flr_control2); + +/* Set RX Interrupt Moderation Control Register */ +void reg_rx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw, + u32 rx_intr_moderation_ctl, + u32 queue); + +/* set tx dma debug control */ +void reg_tx_dma_debug_ctl_set(struct aq_hw_s *aq_hw, u32 tx_dma_debug_ctl); + +/* set tx dma descriptor base address lsw */ +void reg_tx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw, + u32 tx_dma_desc_base_addrlsw, + u32 descriptor); + +/* set tx dma descriptor base address msw */ +void reg_tx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw, + u32 tx_dma_desc_base_addrmsw, + u32 descriptor); + +/* set tx dma descriptor tail pointer register */ +void reg_tx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw, + u32 tx_dma_desc_tail_ptr, + u32 descriptor); + +/* Set TX Interrupt Moderation Control Register */ +void reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw, + u32 tx_intr_moderation_ctl, + u32 queue); + +/* set global microprocessor scratch pad */ +void reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw, + u32 glb_cpu_scratch_scp, u32 scratch_scp); + +/* rpb */ + +/* set dma system loopback */ +void rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk); + +/* set rx traffic class mode */ +void rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw, + u32 rx_traf_class_mode); + +/* set rx buffer enable */ +void rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en); + +/* set rx buffer high threshold (per tc) */ +void rpb_rx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw, + u32 rx_buff_hi_threshold_per_tc, + u32 buffer); + +/* set rx buffer low threshold (per tc) */ +void rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw, + u32 rx_buff_lo_threshold_per_tc, + u32 buffer); + +/* set rx flow control mode */ +void rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode); + +/* set rx packet buffer size (per tc) */ +void rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw, + u32 rx_pkt_buff_size_per_tc, + u32 buffer); + +/* set rx xoff enable (per tc) */ +void rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc, + u32 buffer); + +/* rpf */ + +/* set l2 broadcast count threshold */ +void rpfl2broadcast_count_threshold_set(struct aq_hw_s *aq_hw, + u32 l2broadcast_count_threshold); + +/* set l2 broadcast enable */ +void rpfl2broadcast_en_set(struct aq_hw_s *aq_hw, u32 l2broadcast_en); + +/* set l2 broadcast filter action */ +void rpfl2broadcast_flr_act_set(struct aq_hw_s *aq_hw, + u32 l2broadcast_flr_act); + +/* set l2 multicast filter enable */ +void rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw, u32 l2multicast_flr_en, + u32 filter); + +/* set l2 promiscuous mode enable */ +void rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw, + u32 l2promiscuous_mode_en); + +/* set l2 unicast filter action */ +void rpfl2unicast_flr_act_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_act, + u32 filter); + +/* set l2 unicast filter enable */ +void rpfl2_uc_flr_en_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_en, + u32 filter); + +/* set l2 unicast destination address lsw */ +void rpfl2unicast_dest_addresslsw_set(struct aq_hw_s *aq_hw, + u32 l2unicast_dest_addresslsw, + u32 filter); + +/* set l2 unicast destination address msw */ +void rpfl2unicast_dest_addressmsw_set(struct aq_hw_s *aq_hw, + u32 l2unicast_dest_addressmsw, + u32 filter); + +/* Set L2 Accept all Multicast packets */ +void rpfl2_accept_all_mc_packets_set(struct aq_hw_s *aq_hw, + u32 l2_accept_all_mc_packets); + +/* set user-priority tc mapping */ +void rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw, + u32 user_priority_tc_map, u32 tc); + +/* set rss key address */ +void rpf_rss_key_addr_set(struct aq_hw_s *aq_hw, u32 rss_key_addr); + +/* set rss key write data */ +void rpf_rss_key_wr_data_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_data); + +/* get rss key write enable */ +u32 rpf_rss_key_wr_en_get(struct aq_hw_s *aq_hw); + +/* set rss key write enable */ +void rpf_rss_key_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_en); + +/* set rss redirection table address */ +void rpf_rss_redir_tbl_addr_set(struct aq_hw_s *aq_hw, + u32 rss_redir_tbl_addr); + +/* set rss redirection table write data */ +void rpf_rss_redir_tbl_wr_data_set(struct aq_hw_s *aq_hw, + u32 rss_redir_tbl_wr_data); + +/* get rss redirection write enable */ +u32 rpf_rss_redir_wr_en_get(struct aq_hw_s *aq_hw); + +/* set rss redirection write enable */ +void rpf_rss_redir_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_redir_wr_en); + +/* set tpo to rpf system loopback */ +void rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw_s *aq_hw, + u32 tpo_to_rpf_sys_lbk); + +/* set vlan inner ethertype */ +void rpf_vlan_inner_etht_set(struct aq_hw_s *aq_hw, u32 vlan_inner_etht); + +/* set vlan outer ethertype */ +void rpf_vlan_outer_etht_set(struct aq_hw_s *aq_hw, u32 vlan_outer_etht); + +/* set vlan promiscuous mode enable */ +void rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw, u32 vlan_prom_mode_en); + +/* Set VLAN untagged action */ +void rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw, u32 vlan_untagged_act); + +/* Set VLAN accept untagged packets */ +void rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw, + u32 vlan_accept_untagged_packets); + +/* Set VLAN filter enable */ +void rpf_vlan_flr_en_set(struct aq_hw_s *aq_hw, u32 vlan_flr_en, u32 filter); + +/* Set VLAN Filter Action */ +void rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_filter_act, + u32 filter); + +/* Set VLAN ID Filter */ +void rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr, u32 filter); + +/* set ethertype filter enable */ +void rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en, u32 filter); + +/* set ethertype user-priority enable */ +void rpf_etht_user_priority_en_set(struct aq_hw_s *aq_hw, + u32 etht_user_priority_en, u32 filter); + +/* set ethertype rx queue enable */ +void rpf_etht_rx_queue_en_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue_en, + u32 filter); + +/* set ethertype rx queue */ +void rpf_etht_rx_queue_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue, + u32 filter); + +/* set ethertype user-priority */ +void rpf_etht_user_priority_set(struct aq_hw_s *aq_hw, u32 etht_user_priority, + u32 filter); + +/* set ethertype management queue */ +void rpf_etht_mgt_queue_set(struct aq_hw_s *aq_hw, u32 etht_mgt_queue, + u32 filter); + +/* set ethertype filter action */ +void rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act, + u32 filter); + +/* set ethertype filter */ +void rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter); + +/* rpo */ + +/* set ipv4 header checksum offload enable */ +void rpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw, + u32 ipv4header_crc_offload_en); + +/* set rx descriptor vlan stripping */ +void rpo_rx_desc_vlan_stripping_set(struct aq_hw_s *aq_hw, + u32 rx_desc_vlan_stripping, + u32 descriptor); + +/* set tcp/udp checksum offload enable */ +void rpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw, + u32 tcp_udp_crc_offload_en); + +/* Set LRO Patch Optimization Enable. */ +void rpo_lro_patch_optimization_en_set(struct aq_hw_s *aq_hw, + u32 lro_patch_optimization_en); + +/* Set Large Receive Offload Enable */ +void rpo_lro_en_set(struct aq_hw_s *aq_hw, u32 lro_en); + +/* Set LRO Q Sessions Limit */ +void rpo_lro_qsessions_lim_set(struct aq_hw_s *aq_hw, u32 lro_qsessions_lim); + +/* Set LRO Total Descriptor Limit */ +void rpo_lro_total_desc_lim_set(struct aq_hw_s *aq_hw, u32 lro_total_desc_lim); + +/* Set LRO Min Payload of First Packet */ +void rpo_lro_min_pay_of_first_pkt_set(struct aq_hw_s *aq_hw, + u32 lro_min_pld_of_first_pkt); + +/* Set LRO Packet Limit */ +void rpo_lro_pkt_lim_set(struct aq_hw_s *aq_hw, u32 lro_packet_lim); + +/* Set LRO Max Number of Descriptors */ +void rpo_lro_max_num_of_descriptors_set(struct aq_hw_s *aq_hw, + u32 lro_max_desc_num, u32 lro); + +/* Set LRO Time Base Divider */ +void rpo_lro_time_base_divider_set(struct aq_hw_s *aq_hw, + u32 lro_time_base_divider); + +/*Set LRO Inactive Interval */ +void rpo_lro_inactive_interval_set(struct aq_hw_s *aq_hw, + u32 lro_inactive_interval); + +/*Set LRO Max Coalescing Interval */ +void rpo_lro_max_coalescing_interval_set(struct aq_hw_s *aq_hw, + u32 lro_max_coalescing_interval); + +/* rx */ + +/* set rx register reset disable */ +void rx_rx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 rx_reg_res_dis); + +/* tdm */ + +/* set cpu id */ +void tdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca); + +/* set large send offload enable */ +void tdm_large_send_offload_en_set(struct aq_hw_s *aq_hw, + u32 large_send_offload_en); + +/* set tx descriptor enable */ +void tdm_tx_desc_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_en, u32 descriptor); + +/* set tx dca enable */ +void tdm_tx_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_dca_en); + +/* set tx dca mode */ +void tdm_tx_dca_mode_set(struct aq_hw_s *aq_hw, u32 tx_dca_mode); + +/* set tx descriptor dca enable */ +void tdm_tx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_dca_en, u32 dca); + +/* get tx descriptor head pointer */ +u32 tdm_tx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor); + +/* set tx descriptor length */ +void tdm_tx_desc_len_set(struct aq_hw_s *aq_hw, u32 tx_desc_len, + u32 descriptor); + +/* set tx descriptor write-back interrupt enable */ +void tdm_tx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw, + u32 tx_desc_wr_wb_irq_en); + +/* set tx descriptor write-back threshold */ +void tdm_tx_desc_wr_wb_threshold_set(struct aq_hw_s *aq_hw, + u32 tx_desc_wr_wb_threshold, + u32 descriptor); + +/* Set TDM Interrupt Moderation Enable */ +void tdm_tdm_intr_moder_en_set(struct aq_hw_s *aq_hw, + u32 tdm_irq_moderation_en); +/* thm */ + +/* set lso tcp flag of first packet */ +void thm_lso_tcp_flag_of_first_pkt_set(struct aq_hw_s *aq_hw, + u32 lso_tcp_flag_of_first_pkt); + +/* set lso tcp flag of last packet */ +void thm_lso_tcp_flag_of_last_pkt_set(struct aq_hw_s *aq_hw, + u32 lso_tcp_flag_of_last_pkt); + +/* set lso tcp flag of middle packet */ +void thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw, + u32 lso_tcp_flag_of_middle_pkt); + +/* tpb */ + +/* set tx buffer enable */ +void tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en); + +/* set tx buffer high threshold (per tc) */ +void tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw, + u32 tx_buff_hi_threshold_per_tc, + u32 buffer); + +/* set tx buffer low threshold (per tc) */ +void tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw, + u32 tx_buff_lo_threshold_per_tc, + u32 buffer); + +/* set tx dma system loopback enable */ +void tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_dma_sys_lbk_en); + +/* set tx packet buffer size (per tc) */ +void tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_buff_size_per_tc, u32 buffer); + +/* set tx path pad insert enable */ +void tpb_tx_path_scp_ins_en_set(struct aq_hw_s *aq_hw, u32 tx_path_scp_ins_en); + +/* tpo */ + +/* set ipv4 header checksum offload enable */ +void tpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw, + u32 ipv4header_crc_offload_en); + +/* set tcp/udp checksum offload enable */ +void tpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw, + u32 tcp_udp_crc_offload_en); + +/* set tx pkt system loopback enable */ +void tpo_tx_pkt_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_pkt_sys_lbk_en); + +/* tps */ + +/* set tx packet scheduler data arbitration mode */ +void tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_data_arb_mode); + +/* set tx packet scheduler descriptor rate current time reset */ +void tps_tx_pkt_shed_desc_rate_curr_time_res_set(struct aq_hw_s *aq_hw, + u32 curr_time_res); + +/* set tx packet scheduler descriptor rate limit */ +void tps_tx_pkt_shed_desc_rate_lim_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_desc_rate_lim); + +/* set tx packet scheduler descriptor tc arbitration mode */ +void tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_desc_tc_arb_mode); + +/* set tx packet scheduler descriptor tc max credit */ +void tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_desc_tc_max_credit, + u32 tc); + +/* set tx packet scheduler descriptor tc weight */ +void tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_desc_tc_weight, + u32 tc); + +/* set tx packet scheduler descriptor vm arbitration mode */ +void tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_desc_vm_arb_mode); + +/* set tx packet scheduler tc data max credit */ +void tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_tc_data_max_credit, + u32 tc); + +/* set tx packet scheduler tc data weight */ +void tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_tc_data_weight, + u32 tc); + +/* tx */ + +/* set tx register reset disable */ +void tx_tx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 tx_reg_res_dis); + +/* msm */ + +/* get register access status */ +u32 msm_reg_access_status_get(struct aq_hw_s *aq_hw); + +/* set register address for indirect address */ +void msm_reg_addr_for_indirect_addr_set(struct aq_hw_s *aq_hw, + u32 reg_addr_for_indirect_addr); + +/* set register read strobe */ +void msm_reg_rd_strobe_set(struct aq_hw_s *aq_hw, u32 reg_rd_strobe); + +/* get register read data */ +u32 msm_reg_rd_data_get(struct aq_hw_s *aq_hw); + +/* set register write data */ +void msm_reg_wr_data_set(struct aq_hw_s *aq_hw, u32 reg_wr_data); + +/* set register write strobe */ +void msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe); + +/* pci */ + +/* set pci register reset disable */ +void pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis); + +#endif /* HW_ATL_LLH_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h new file mode 100644 index 000000000000..5527fc0e5942 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h @@ -0,0 +1,2375 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File hw_atl_llh_internal.h: Preprocessor definitions + * for Atlantic registers. + */ + +#ifndef HW_ATL_LLH_INTERNAL_H +#define HW_ATL_LLH_INTERNAL_H + +/* global microprocessor semaphore definitions + * base address: 0x000003a0 + * parameter: semaphore {s} | stride size 0x4 | range [0, 15] + */ +#define glb_cpu_sem_adr(semaphore) (0x000003a0u + (semaphore) * 0x4) +/* register address for bitfield rx dma good octet counter lsw [1f:0] */ +#define stats_rx_dma_good_octet_counterlsw__adr 0x00006808 +/* register address for bitfield rx dma good packet counter lsw [1f:0] */ +#define stats_rx_dma_good_pkt_counterlsw__adr 0x00006800 +/* register address for bitfield tx dma good octet counter lsw [1f:0] */ +#define stats_tx_dma_good_octet_counterlsw__adr 0x00008808 +/* register address for bitfield tx dma good packet counter lsw [1f:0] */ +#define stats_tx_dma_good_pkt_counterlsw__adr 0x00008800 + +/* register address for bitfield rx dma good octet counter msw [3f:20] */ +#define stats_rx_dma_good_octet_countermsw__adr 0x0000680c +/* register address for bitfield rx dma good packet counter msw [3f:20] */ +#define stats_rx_dma_good_pkt_countermsw__adr 0x00006804 +/* register address for bitfield tx dma good octet counter msw [3f:20] */ +#define stats_tx_dma_good_octet_countermsw__adr 0x0000880c +/* register address for bitfield tx dma good packet counter msw [3f:20] */ +#define stats_tx_dma_good_pkt_countermsw__adr 0x00008804 + +/* preprocessor definitions for msm rx errors counter register */ +#define mac_msm_rx_errs_cnt_adr 0x00000120u + +/* preprocessor definitions for msm rx unicast frames counter register */ +#define mac_msm_rx_ucst_frm_cnt_adr 0x000000e0u + +/* preprocessor definitions for msm rx multicast frames counter register */ +#define mac_msm_rx_mcst_frm_cnt_adr 0x000000e8u + +/* preprocessor definitions for msm rx broadcast frames counter register */ +#define mac_msm_rx_bcst_frm_cnt_adr 0x000000f0u + +/* preprocessor definitions for msm rx broadcast octets counter register 1 */ +#define mac_msm_rx_bcst_octets_counter1_adr 0x000001b0u + +/* preprocessor definitions for msm rx broadcast octets counter register 2 */ +#define mac_msm_rx_bcst_octets_counter2_adr 0x000001b4u + +/* preprocessor definitions for msm rx unicast octets counter register 0 */ +#define mac_msm_rx_ucst_octets_counter0_adr 0x000001b8u + +/* preprocessor definitions for rx dma statistics counter 7 */ +#define rx_dma_stat_counter7_adr 0x00006818u + +/* preprocessor definitions for msm tx unicast frames counter register */ +#define mac_msm_tx_ucst_frm_cnt_adr 0x00000108u + +/* preprocessor definitions for msm tx multicast frames counter register */ +#define mac_msm_tx_mcst_frm_cnt_adr 0x00000110u + +/* preprocessor definitions for global mif identification */ +#define glb_mif_id_adr 0x0000001cu + +/* register address for bitfield iamr_lsw[1f:0] */ +#define itr_iamrlsw_adr 0x00002090 +/* register address for bitfield rx dma drop packet counter [1f:0] */ +#define rpb_rx_dma_drop_pkt_cnt_adr 0x00006818 + +/* register address for bitfield imcr_lsw[1f:0] */ +#define itr_imcrlsw_adr 0x00002070 +/* register address for bitfield imsr_lsw[1f:0] */ +#define itr_imsrlsw_adr 0x00002060 +/* register address for bitfield itr_reg_res_dsbl */ +#define itr_reg_res_dsbl_adr 0x00002300 +/* bitmask for bitfield itr_reg_res_dsbl */ +#define itr_reg_res_dsbl_msk 0x20000000 +/* lower bit position of bitfield itr_reg_res_dsbl */ +#define itr_reg_res_dsbl_shift 29 +/* register address for bitfield iscr_lsw[1f:0] */ +#define itr_iscrlsw_adr 0x00002050 +/* register address for bitfield isr_lsw[1f:0] */ +#define itr_isrlsw_adr 0x00002000 +/* register address for bitfield itr_reset */ +#define itr_res_adr 0x00002300 +/* bitmask for bitfield itr_reset */ +#define itr_res_msk 0x80000000 +/* lower bit position of bitfield itr_reset */ +#define itr_res_shift 31 +/* register address for bitfield dca{d}_cpuid[7:0] */ +#define rdm_dcadcpuid_adr(dca) (0x00006100 + (dca) * 0x4) +/* bitmask for bitfield dca{d}_cpuid[7:0] */ +#define rdm_dcadcpuid_msk 0x000000ff +/* lower bit position of bitfield dca{d}_cpuid[7:0] */ +#define rdm_dcadcpuid_shift 0 +/* register address for bitfield dca_en */ +#define rdm_dca_en_adr 0x00006180 + +/* rx dca_en bitfield definitions + * preprocessor definitions for the bitfield "dca_en". + * port="pif_rdm_dca_en_i" + */ + +/* register address for bitfield dca_en */ +#define rdm_dca_en_adr 0x00006180 +/* bitmask for bitfield dca_en */ +#define rdm_dca_en_msk 0x80000000 +/* inverted bitmask for bitfield dca_en */ +#define rdm_dca_en_mskn 0x7fffffff +/* lower bit position of bitfield dca_en */ +#define rdm_dca_en_shift 31 +/* width of bitfield dca_en */ +#define rdm_dca_en_width 1 +/* default value of bitfield dca_en */ +#define rdm_dca_en_default 0x1 + +/* rx dca_mode[3:0] bitfield definitions + * preprocessor definitions for the bitfield "dca_mode[3:0]". + * port="pif_rdm_dca_mode_i[3:0]" + */ + +/* register address for bitfield dca_mode[3:0] */ +#define rdm_dca_mode_adr 0x00006180 +/* bitmask for bitfield dca_mode[3:0] */ +#define rdm_dca_mode_msk 0x0000000f +/* inverted bitmask for bitfield dca_mode[3:0] */ +#define rdm_dca_mode_mskn 0xfffffff0 +/* lower bit position of bitfield dca_mode[3:0] */ +#define rdm_dca_mode_shift 0 +/* width of bitfield dca_mode[3:0] */ +#define rdm_dca_mode_width 4 +/* default value of bitfield dca_mode[3:0] */ +#define rdm_dca_mode_default 0x0 + +/* rx desc{d}_data_size[4:0] bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_data_size[4:0]". + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + * port="pif_rdm_desc0_data_size_i[4:0]" + */ + +/* register address for bitfield desc{d}_data_size[4:0] */ +#define rdm_descddata_size_adr(descriptor) (0x00005b18 + (descriptor) * 0x20) +/* bitmask for bitfield desc{d}_data_size[4:0] */ +#define rdm_descddata_size_msk 0x0000001f +/* inverted bitmask for bitfield desc{d}_data_size[4:0] */ +#define rdm_descddata_size_mskn 0xffffffe0 +/* lower bit position of bitfield desc{d}_data_size[4:0] */ +#define rdm_descddata_size_shift 0 +/* width of bitfield desc{d}_data_size[4:0] */ +#define rdm_descddata_size_width 5 +/* default value of bitfield desc{d}_data_size[4:0] */ +#define rdm_descddata_size_default 0x0 + +/* rx dca{d}_desc_en bitfield definitions + * preprocessor definitions for the bitfield "dca{d}_desc_en". + * parameter: dca {d} | stride size 0x4 | range [0, 31] + * port="pif_rdm_dca_desc_en_i[0]" + */ + +/* register address for bitfield dca{d}_desc_en */ +#define rdm_dcaddesc_en_adr(dca) (0x00006100 + (dca) * 0x4) +/* bitmask for bitfield dca{d}_desc_en */ +#define rdm_dcaddesc_en_msk 0x80000000 +/* inverted bitmask for bitfield dca{d}_desc_en */ +#define rdm_dcaddesc_en_mskn 0x7fffffff +/* lower bit position of bitfield dca{d}_desc_en */ +#define rdm_dcaddesc_en_shift 31 +/* width of bitfield dca{d}_desc_en */ +#define rdm_dcaddesc_en_width 1 +/* default value of bitfield dca{d}_desc_en */ +#define rdm_dcaddesc_en_default 0x0 + +/* rx desc{d}_en bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_en". + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + * port="pif_rdm_desc_en_i[0]" + */ + +/* register address for bitfield desc{d}_en */ +#define rdm_descden_adr(descriptor) (0x00005b08 + (descriptor) * 0x20) +/* bitmask for bitfield desc{d}_en */ +#define rdm_descden_msk 0x80000000 +/* inverted bitmask for bitfield desc{d}_en */ +#define rdm_descden_mskn 0x7fffffff +/* lower bit position of bitfield desc{d}_en */ +#define rdm_descden_shift 31 +/* width of bitfield desc{d}_en */ +#define rdm_descden_width 1 +/* default value of bitfield desc{d}_en */ +#define rdm_descden_default 0x0 + +/* rx desc{d}_hdr_size[4:0] bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_hdr_size[4:0]". + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + * port="pif_rdm_desc0_hdr_size_i[4:0]" + */ + +/* register address for bitfield desc{d}_hdr_size[4:0] */ +#define rdm_descdhdr_size_adr(descriptor) (0x00005b18 + (descriptor) * 0x20) +/* bitmask for bitfield desc{d}_hdr_size[4:0] */ +#define rdm_descdhdr_size_msk 0x00001f00 +/* inverted bitmask for bitfield desc{d}_hdr_size[4:0] */ +#define rdm_descdhdr_size_mskn 0xffffe0ff +/* lower bit position of bitfield desc{d}_hdr_size[4:0] */ +#define rdm_descdhdr_size_shift 8 +/* width of bitfield desc{d}_hdr_size[4:0] */ +#define rdm_descdhdr_size_width 5 +/* default value of bitfield desc{d}_hdr_size[4:0] */ +#define rdm_descdhdr_size_default 0x0 + +/* rx desc{d}_hdr_split bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_hdr_split". + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + * port="pif_rdm_desc_hdr_split_i[0]" + */ + +/* register address for bitfield desc{d}_hdr_split */ +#define rdm_descdhdr_split_adr(descriptor) (0x00005b08 + (descriptor) * 0x20) +/* bitmask for bitfield desc{d}_hdr_split */ +#define rdm_descdhdr_split_msk 0x10000000 +/* inverted bitmask for bitfield desc{d}_hdr_split */ +#define rdm_descdhdr_split_mskn 0xefffffff +/* lower bit position of bitfield desc{d}_hdr_split */ +#define rdm_descdhdr_split_shift 28 +/* width of bitfield desc{d}_hdr_split */ +#define rdm_descdhdr_split_width 1 +/* default value of bitfield desc{d}_hdr_split */ +#define rdm_descdhdr_split_default 0x0 + +/* rx desc{d}_hd[c:0] bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_hd[c:0]". + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + * port="rdm_pif_desc0_hd_o[12:0]" + */ + +/* register address for bitfield desc{d}_hd[c:0] */ +#define rdm_descdhd_adr(descriptor) (0x00005b0c + (descriptor) * 0x20) +/* bitmask for bitfield desc{d}_hd[c:0] */ +#define rdm_descdhd_msk 0x00001fff +/* inverted bitmask for bitfield desc{d}_hd[c:0] */ +#define rdm_descdhd_mskn 0xffffe000 +/* lower bit position of bitfield desc{d}_hd[c:0] */ +#define rdm_descdhd_shift 0 +/* width of bitfield desc{d}_hd[c:0] */ +#define rdm_descdhd_width 13 + +/* rx desc{d}_len[9:0] bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_len[9:0]". + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + * port="pif_rdm_desc0_len_i[9:0]" + */ + +/* register address for bitfield desc{d}_len[9:0] */ +#define rdm_descdlen_adr(descriptor) (0x00005b08 + (descriptor) * 0x20) +/* bitmask for bitfield desc{d}_len[9:0] */ +#define rdm_descdlen_msk 0x00001ff8 +/* inverted bitmask for bitfield desc{d}_len[9:0] */ +#define rdm_descdlen_mskn 0xffffe007 +/* lower bit position of bitfield desc{d}_len[9:0] */ +#define rdm_descdlen_shift 3 +/* width of bitfield desc{d}_len[9:0] */ +#define rdm_descdlen_width 10 +/* default value of bitfield desc{d}_len[9:0] */ +#define rdm_descdlen_default 0x0 + +/* rx desc{d}_reset bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_reset". + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + * port="pif_rdm_q_pf_res_i[0]" + */ + +/* register address for bitfield desc{d}_reset */ +#define rdm_descdreset_adr(descriptor) (0x00005b08 + (descriptor) * 0x20) +/* bitmask for bitfield desc{d}_reset */ +#define rdm_descdreset_msk 0x02000000 +/* inverted bitmask for bitfield desc{d}_reset */ +#define rdm_descdreset_mskn 0xfdffffff +/* lower bit position of bitfield desc{d}_reset */ +#define rdm_descdreset_shift 25 +/* width of bitfield desc{d}_reset */ +#define rdm_descdreset_width 1 +/* default value of bitfield desc{d}_reset */ +#define rdm_descdreset_default 0x0 + +/* rx int_desc_wrb_en bitfield definitions + * preprocessor definitions for the bitfield "int_desc_wrb_en". + * port="pif_rdm_int_desc_wrb_en_i" + */ + +/* register address for bitfield int_desc_wrb_en */ +#define rdm_int_desc_wrb_en_adr 0x00005a30 +/* bitmask for bitfield int_desc_wrb_en */ +#define rdm_int_desc_wrb_en_msk 0x00000004 +/* inverted bitmask for bitfield int_desc_wrb_en */ +#define rdm_int_desc_wrb_en_mskn 0xfffffffb +/* lower bit position of bitfield int_desc_wrb_en */ +#define rdm_int_desc_wrb_en_shift 2 +/* width of bitfield int_desc_wrb_en */ +#define rdm_int_desc_wrb_en_width 1 +/* default value of bitfield int_desc_wrb_en */ +#define rdm_int_desc_wrb_en_default 0x0 + +/* rx dca{d}_hdr_en bitfield definitions + * preprocessor definitions for the bitfield "dca{d}_hdr_en". + * parameter: dca {d} | stride size 0x4 | range [0, 31] + * port="pif_rdm_dca_hdr_en_i[0]" + */ + +/* register address for bitfield dca{d}_hdr_en */ +#define rdm_dcadhdr_en_adr(dca) (0x00006100 + (dca) * 0x4) +/* bitmask for bitfield dca{d}_hdr_en */ +#define rdm_dcadhdr_en_msk 0x40000000 +/* inverted bitmask for bitfield dca{d}_hdr_en */ +#define rdm_dcadhdr_en_mskn 0xbfffffff +/* lower bit position of bitfield dca{d}_hdr_en */ +#define rdm_dcadhdr_en_shift 30 +/* width of bitfield dca{d}_hdr_en */ +#define rdm_dcadhdr_en_width 1 +/* default value of bitfield dca{d}_hdr_en */ +#define rdm_dcadhdr_en_default 0x0 + +/* rx dca{d}_pay_en bitfield definitions + * preprocessor definitions for the bitfield "dca{d}_pay_en". + * parameter: dca {d} | stride size 0x4 | range [0, 31] + * port="pif_rdm_dca_pay_en_i[0]" + */ + +/* register address for bitfield dca{d}_pay_en */ +#define rdm_dcadpay_en_adr(dca) (0x00006100 + (dca) * 0x4) +/* bitmask for bitfield dca{d}_pay_en */ +#define rdm_dcadpay_en_msk 0x20000000 +/* inverted bitmask for bitfield dca{d}_pay_en */ +#define rdm_dcadpay_en_mskn 0xdfffffff +/* lower bit position of bitfield dca{d}_pay_en */ +#define rdm_dcadpay_en_shift 29 +/* width of bitfield dca{d}_pay_en */ +#define rdm_dcadpay_en_width 1 +/* default value of bitfield dca{d}_pay_en */ +#define rdm_dcadpay_en_default 0x0 + +/* RX rdm_int_rim_en Bitfield Definitions + * Preprocessor definitions for the bitfield "rdm_int_rim_en". + * PORT="pif_rdm_int_rim_en_i" + */ + +/* Register address for bitfield rdm_int_rim_en */ +#define rdm_int_rim_en_adr 0x00005A30 +/* Bitmask for bitfield rdm_int_rim_en */ +#define rdm_int_rim_en_msk 0x00000008 +/* Inverted bitmask for bitfield rdm_int_rim_en */ +#define rdm_int_rim_en_mskn 0xFFFFFFF7 +/* Lower bit position of bitfield rdm_int_rim_en */ +#define rdm_int_rim_en_shift 3 +/* Width of bitfield rdm_int_rim_en */ +#define rdm_int_rim_en_width 1 +/* Default value of bitfield rdm_int_rim_en */ +#define rdm_int_rim_en_default 0x0 + +/* general interrupt mapping register definitions + * preprocessor definitions for general interrupt mapping register + * base address: 0x00002180 + * parameter: regidx {f} | stride size 0x4 | range [0, 3] + */ +#define gen_intr_map_adr(regidx) (0x00002180u + (regidx) * 0x4) + +/* general interrupt status register definitions + * preprocessor definitions for general interrupt status register + * address: 0x000021A0 + */ + +#define gen_intr_stat_adr 0x000021A4U + +/* interrupt global control register definitions + * preprocessor definitions for interrupt global control register + * address: 0x00002300 + */ +#define intr_glb_ctl_adr 0x00002300u + +/* interrupt throttle register definitions + * preprocessor definitions for interrupt throttle register + * base address: 0x00002800 + * parameter: throttle {t} | stride size 0x4 | range [0, 31] + */ +#define intr_thr_adr(throttle) (0x00002800u + (throttle) * 0x4) + +/* rx dma descriptor base address lsw definitions + * preprocessor definitions for rx dma descriptor base address lsw + * base address: 0x00005b00 + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + */ +#define rx_dma_desc_base_addrlsw_adr(descriptor) \ +(0x00005b00u + (descriptor) * 0x20) + +/* rx dma descriptor base address msw definitions + * preprocessor definitions for rx dma descriptor base address msw + * base address: 0x00005b04 + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + */ +#define rx_dma_desc_base_addrmsw_adr(descriptor) \ +(0x00005b04u + (descriptor) * 0x20) + +/* rx dma descriptor status register definitions + * preprocessor definitions for rx dma descriptor status register + * base address: 0x00005b14 + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + */ +#define rx_dma_desc_stat_adr(descriptor) (0x00005b14u + (descriptor) * 0x20) + +/* rx dma descriptor tail pointer register definitions + * preprocessor definitions for rx dma descriptor tail pointer register + * base address: 0x00005b10 + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + */ +#define rx_dma_desc_tail_ptr_adr(descriptor) (0x00005b10u + (descriptor) * 0x20) + +/* rx interrupt moderation control register definitions + * Preprocessor definitions for RX Interrupt Moderation Control Register + * Base Address: 0x00005A40 + * Parameter: RIM {R} | stride size 0x4 | range [0, 31] + */ +#define rx_intr_moderation_ctl_adr(rim) (0x00005A40u + (rim) * 0x4) + +/* rx filter multicast filter mask register definitions + * preprocessor definitions for rx filter multicast filter mask register + * address: 0x00005270 + */ +#define rx_flr_mcst_flr_msk_adr 0x00005270u + +/* rx filter multicast filter register definitions + * preprocessor definitions for rx filter multicast filter register + * base address: 0x00005250 + * parameter: filter {f} | stride size 0x4 | range [0, 7] + */ +#define rx_flr_mcst_flr_adr(filter) (0x00005250u + (filter) * 0x4) + +/* RX Filter RSS Control Register 1 Definitions + * Preprocessor definitions for RX Filter RSS Control Register 1 + * Address: 0x000054C0 + */ +#define rx_flr_rss_control1_adr 0x000054C0u + +/* RX Filter Control Register 2 Definitions + * Preprocessor definitions for RX Filter Control Register 2 + * Address: 0x00005104 + */ +#define rx_flr_control2_adr 0x00005104u + +/* tx tx dma debug control [1f:0] bitfield definitions + * preprocessor definitions for the bitfield "tx dma debug control [1f:0]". + * port="pif_tdm_debug_cntl_i[31:0]" + */ + +/* register address for bitfield tx dma debug control [1f:0] */ +#define tdm_tx_dma_debug_ctl_adr 0x00008920 +/* bitmask for bitfield tx dma debug control [1f:0] */ +#define tdm_tx_dma_debug_ctl_msk 0xffffffff +/* inverted bitmask for bitfield tx dma debug control [1f:0] */ +#define tdm_tx_dma_debug_ctl_mskn 0x00000000 +/* lower bit position of bitfield tx dma debug control [1f:0] */ +#define tdm_tx_dma_debug_ctl_shift 0 +/* width of bitfield tx dma debug control [1f:0] */ +#define tdm_tx_dma_debug_ctl_width 32 +/* default value of bitfield tx dma debug control [1f:0] */ +#define tdm_tx_dma_debug_ctl_default 0x0 + +/* tx dma descriptor base address lsw definitions + * preprocessor definitions for tx dma descriptor base address lsw + * base address: 0x00007c00 + * parameter: descriptor {d} | stride size 0x40 | range [0, 31] + */ +#define tx_dma_desc_base_addrlsw_adr(descriptor) \ + (0x00007c00u + (descriptor) * 0x40) + +/* tx dma descriptor tail pointer register definitions + * preprocessor definitions for tx dma descriptor tail pointer register + * base address: 0x00007c10 + * parameter: descriptor {d} | stride size 0x40 | range [0, 31] + */ +#define tx_dma_desc_tail_ptr_adr(descriptor) (0x00007c10u + (descriptor) * 0x40) + +/* rx dma_sys_loopback bitfield definitions + * preprocessor definitions for the bitfield "dma_sys_loopback". + * port="pif_rpb_dma_sys_lbk_i" + */ + +/* register address for bitfield dma_sys_loopback */ +#define rpb_dma_sys_lbk_adr 0x00005000 +/* bitmask for bitfield dma_sys_loopback */ +#define rpb_dma_sys_lbk_msk 0x00000040 +/* inverted bitmask for bitfield dma_sys_loopback */ +#define rpb_dma_sys_lbk_mskn 0xffffffbf +/* lower bit position of bitfield dma_sys_loopback */ +#define rpb_dma_sys_lbk_shift 6 +/* width of bitfield dma_sys_loopback */ +#define rpb_dma_sys_lbk_width 1 +/* default value of bitfield dma_sys_loopback */ +#define rpb_dma_sys_lbk_default 0x0 + +/* rx rx_tc_mode bitfield definitions + * preprocessor definitions for the bitfield "rx_tc_mode". + * port="pif_rpb_rx_tc_mode_i,pif_rpf_rx_tc_mode_i" + */ + +/* register address for bitfield rx_tc_mode */ +#define rpb_rpf_rx_tc_mode_adr 0x00005700 +/* bitmask for bitfield rx_tc_mode */ +#define rpb_rpf_rx_tc_mode_msk 0x00000100 +/* inverted bitmask for bitfield rx_tc_mode */ +#define rpb_rpf_rx_tc_mode_mskn 0xfffffeff +/* lower bit position of bitfield rx_tc_mode */ +#define rpb_rpf_rx_tc_mode_shift 8 +/* width of bitfield rx_tc_mode */ +#define rpb_rpf_rx_tc_mode_width 1 +/* default value of bitfield rx_tc_mode */ +#define rpb_rpf_rx_tc_mode_default 0x0 + +/* rx rx_buf_en bitfield definitions + * preprocessor definitions for the bitfield "rx_buf_en". + * port="pif_rpb_rx_buf_en_i" + */ + +/* register address for bitfield rx_buf_en */ +#define rpb_rx_buf_en_adr 0x00005700 +/* bitmask for bitfield rx_buf_en */ +#define rpb_rx_buf_en_msk 0x00000001 +/* inverted bitmask for bitfield rx_buf_en */ +#define rpb_rx_buf_en_mskn 0xfffffffe +/* lower bit position of bitfield rx_buf_en */ +#define rpb_rx_buf_en_shift 0 +/* width of bitfield rx_buf_en */ +#define rpb_rx_buf_en_width 1 +/* default value of bitfield rx_buf_en */ +#define rpb_rx_buf_en_default 0x0 + +/* rx rx{b}_hi_thresh[d:0] bitfield definitions + * preprocessor definitions for the bitfield "rx{b}_hi_thresh[d:0]". + * parameter: buffer {b} | stride size 0x10 | range [0, 7] + * port="pif_rpb_rx0_hi_thresh_i[13:0]" + */ + +/* register address for bitfield rx{b}_hi_thresh[d:0] */ +#define rpb_rxbhi_thresh_adr(buffer) (0x00005714 + (buffer) * 0x10) +/* bitmask for bitfield rx{b}_hi_thresh[d:0] */ +#define rpb_rxbhi_thresh_msk 0x3fff0000 +/* inverted bitmask for bitfield rx{b}_hi_thresh[d:0] */ +#define rpb_rxbhi_thresh_mskn 0xc000ffff +/* lower bit position of bitfield rx{b}_hi_thresh[d:0] */ +#define rpb_rxbhi_thresh_shift 16 +/* width of bitfield rx{b}_hi_thresh[d:0] */ +#define rpb_rxbhi_thresh_width 14 +/* default value of bitfield rx{b}_hi_thresh[d:0] */ +#define rpb_rxbhi_thresh_default 0x0 + +/* rx rx{b}_lo_thresh[d:0] bitfield definitions + * preprocessor definitions for the bitfield "rx{b}_lo_thresh[d:0]". + * parameter: buffer {b} | stride size 0x10 | range [0, 7] + * port="pif_rpb_rx0_lo_thresh_i[13:0]" + */ + +/* register address for bitfield rx{b}_lo_thresh[d:0] */ +#define rpb_rxblo_thresh_adr(buffer) (0x00005714 + (buffer) * 0x10) +/* bitmask for bitfield rx{b}_lo_thresh[d:0] */ +#define rpb_rxblo_thresh_msk 0x00003fff +/* inverted bitmask for bitfield rx{b}_lo_thresh[d:0] */ +#define rpb_rxblo_thresh_mskn 0xffffc000 +/* lower bit position of bitfield rx{b}_lo_thresh[d:0] */ +#define rpb_rxblo_thresh_shift 0 +/* width of bitfield rx{b}_lo_thresh[d:0] */ +#define rpb_rxblo_thresh_width 14 +/* default value of bitfield rx{b}_lo_thresh[d:0] */ +#define rpb_rxblo_thresh_default 0x0 + +/* rx rx_fc_mode[1:0] bitfield definitions + * preprocessor definitions for the bitfield "rx_fc_mode[1:0]". + * port="pif_rpb_rx_fc_mode_i[1:0]" + */ + +/* register address for bitfield rx_fc_mode[1:0] */ +#define rpb_rx_fc_mode_adr 0x00005700 +/* bitmask for bitfield rx_fc_mode[1:0] */ +#define rpb_rx_fc_mode_msk 0x00000030 +/* inverted bitmask for bitfield rx_fc_mode[1:0] */ +#define rpb_rx_fc_mode_mskn 0xffffffcf +/* lower bit position of bitfield rx_fc_mode[1:0] */ +#define rpb_rx_fc_mode_shift 4 +/* width of bitfield rx_fc_mode[1:0] */ +#define rpb_rx_fc_mode_width 2 +/* default value of bitfield rx_fc_mode[1:0] */ +#define rpb_rx_fc_mode_default 0x0 + +/* rx rx{b}_buf_size[8:0] bitfield definitions + * preprocessor definitions for the bitfield "rx{b}_buf_size[8:0]". + * parameter: buffer {b} | stride size 0x10 | range [0, 7] + * port="pif_rpb_rx0_buf_size_i[8:0]" + */ + +/* register address for bitfield rx{b}_buf_size[8:0] */ +#define rpb_rxbbuf_size_adr(buffer) (0x00005710 + (buffer) * 0x10) +/* bitmask for bitfield rx{b}_buf_size[8:0] */ +#define rpb_rxbbuf_size_msk 0x000001ff +/* inverted bitmask for bitfield rx{b}_buf_size[8:0] */ +#define rpb_rxbbuf_size_mskn 0xfffffe00 +/* lower bit position of bitfield rx{b}_buf_size[8:0] */ +#define rpb_rxbbuf_size_shift 0 +/* width of bitfield rx{b}_buf_size[8:0] */ +#define rpb_rxbbuf_size_width 9 +/* default value of bitfield rx{b}_buf_size[8:0] */ +#define rpb_rxbbuf_size_default 0x0 + +/* rx rx{b}_xoff_en bitfield definitions + * preprocessor definitions for the bitfield "rx{b}_xoff_en". + * parameter: buffer {b} | stride size 0x10 | range [0, 7] + * port="pif_rpb_rx_xoff_en_i[0]" + */ + +/* register address for bitfield rx{b}_xoff_en */ +#define rpb_rxbxoff_en_adr(buffer) (0x00005714 + (buffer) * 0x10) +/* bitmask for bitfield rx{b}_xoff_en */ +#define rpb_rxbxoff_en_msk 0x80000000 +/* inverted bitmask for bitfield rx{b}_xoff_en */ +#define rpb_rxbxoff_en_mskn 0x7fffffff +/* lower bit position of bitfield rx{b}_xoff_en */ +#define rpb_rxbxoff_en_shift 31 +/* width of bitfield rx{b}_xoff_en */ +#define rpb_rxbxoff_en_width 1 +/* default value of bitfield rx{b}_xoff_en */ +#define rpb_rxbxoff_en_default 0x0 + +/* rx l2_bc_thresh[f:0] bitfield definitions + * preprocessor definitions for the bitfield "l2_bc_thresh[f:0]". + * port="pif_rpf_l2_bc_thresh_i[15:0]" + */ + +/* register address for bitfield l2_bc_thresh[f:0] */ +#define rpfl2bc_thresh_adr 0x00005100 +/* bitmask for bitfield l2_bc_thresh[f:0] */ +#define rpfl2bc_thresh_msk 0xffff0000 +/* inverted bitmask for bitfield l2_bc_thresh[f:0] */ +#define rpfl2bc_thresh_mskn 0x0000ffff +/* lower bit position of bitfield l2_bc_thresh[f:0] */ +#define rpfl2bc_thresh_shift 16 +/* width of bitfield l2_bc_thresh[f:0] */ +#define rpfl2bc_thresh_width 16 +/* default value of bitfield l2_bc_thresh[f:0] */ +#define rpfl2bc_thresh_default 0x0 + +/* rx l2_bc_en bitfield definitions + * preprocessor definitions for the bitfield "l2_bc_en". + * port="pif_rpf_l2_bc_en_i" + */ + +/* register address for bitfield l2_bc_en */ +#define rpfl2bc_en_adr 0x00005100 +/* bitmask for bitfield l2_bc_en */ +#define rpfl2bc_en_msk 0x00000001 +/* inverted bitmask for bitfield l2_bc_en */ +#define rpfl2bc_en_mskn 0xfffffffe +/* lower bit position of bitfield l2_bc_en */ +#define rpfl2bc_en_shift 0 +/* width of bitfield l2_bc_en */ +#define rpfl2bc_en_width 1 +/* default value of bitfield l2_bc_en */ +#define rpfl2bc_en_default 0x0 + +/* rx l2_bc_act[2:0] bitfield definitions + * preprocessor definitions for the bitfield "l2_bc_act[2:0]". + * port="pif_rpf_l2_bc_act_i[2:0]" + */ + +/* register address for bitfield l2_bc_act[2:0] */ +#define rpfl2bc_act_adr 0x00005100 +/* bitmask for bitfield l2_bc_act[2:0] */ +#define rpfl2bc_act_msk 0x00007000 +/* inverted bitmask for bitfield l2_bc_act[2:0] */ +#define rpfl2bc_act_mskn 0xffff8fff +/* lower bit position of bitfield l2_bc_act[2:0] */ +#define rpfl2bc_act_shift 12 +/* width of bitfield l2_bc_act[2:0] */ +#define rpfl2bc_act_width 3 +/* default value of bitfield l2_bc_act[2:0] */ +#define rpfl2bc_act_default 0x0 + +/* rx l2_mc_en{f} bitfield definitions + * preprocessor definitions for the bitfield "l2_mc_en{f}". + * parameter: filter {f} | stride size 0x4 | range [0, 7] + * port="pif_rpf_l2_mc_en_i[0]" + */ + +/* register address for bitfield l2_mc_en{f} */ +#define rpfl2mc_enf_adr(filter) (0x00005250 + (filter) * 0x4) +/* bitmask for bitfield l2_mc_en{f} */ +#define rpfl2mc_enf_msk 0x80000000 +/* inverted bitmask for bitfield l2_mc_en{f} */ +#define rpfl2mc_enf_mskn 0x7fffffff +/* lower bit position of bitfield l2_mc_en{f} */ +#define rpfl2mc_enf_shift 31 +/* width of bitfield l2_mc_en{f} */ +#define rpfl2mc_enf_width 1 +/* default value of bitfield l2_mc_en{f} */ +#define rpfl2mc_enf_default 0x0 + +/* rx l2_promis_mode bitfield definitions + * preprocessor definitions for the bitfield "l2_promis_mode". + * port="pif_rpf_l2_promis_mode_i" + */ + +/* register address for bitfield l2_promis_mode */ +#define rpfl2promis_mode_adr 0x00005100 +/* bitmask for bitfield l2_promis_mode */ +#define rpfl2promis_mode_msk 0x00000008 +/* inverted bitmask for bitfield l2_promis_mode */ +#define rpfl2promis_mode_mskn 0xfffffff7 +/* lower bit position of bitfield l2_promis_mode */ +#define rpfl2promis_mode_shift 3 +/* width of bitfield l2_promis_mode */ +#define rpfl2promis_mode_width 1 +/* default value of bitfield l2_promis_mode */ +#define rpfl2promis_mode_default 0x0 + +/* rx l2_uc_act{f}[2:0] bitfield definitions + * preprocessor definitions for the bitfield "l2_uc_act{f}[2:0]". + * parameter: filter {f} | stride size 0x8 | range [0, 37] + * port="pif_rpf_l2_uc_act0_i[2:0]" + */ + +/* register address for bitfield l2_uc_act{f}[2:0] */ +#define rpfl2uc_actf_adr(filter) (0x00005114 + (filter) * 0x8) +/* bitmask for bitfield l2_uc_act{f}[2:0] */ +#define rpfl2uc_actf_msk 0x00070000 +/* inverted bitmask for bitfield l2_uc_act{f}[2:0] */ +#define rpfl2uc_actf_mskn 0xfff8ffff +/* lower bit position of bitfield l2_uc_act{f}[2:0] */ +#define rpfl2uc_actf_shift 16 +/* width of bitfield l2_uc_act{f}[2:0] */ +#define rpfl2uc_actf_width 3 +/* default value of bitfield l2_uc_act{f}[2:0] */ +#define rpfl2uc_actf_default 0x0 + +/* rx l2_uc_en{f} bitfield definitions + * preprocessor definitions for the bitfield "l2_uc_en{f}". + * parameter: filter {f} | stride size 0x8 | range [0, 37] + * port="pif_rpf_l2_uc_en_i[0]" + */ + +/* register address for bitfield l2_uc_en{f} */ +#define rpfl2uc_enf_adr(filter) (0x00005114 + (filter) * 0x8) +/* bitmask for bitfield l2_uc_en{f} */ +#define rpfl2uc_enf_msk 0x80000000 +/* inverted bitmask for bitfield l2_uc_en{f} */ +#define rpfl2uc_enf_mskn 0x7fffffff +/* lower bit position of bitfield l2_uc_en{f} */ +#define rpfl2uc_enf_shift 31 +/* width of bitfield l2_uc_en{f} */ +#define rpfl2uc_enf_width 1 +/* default value of bitfield l2_uc_en{f} */ +#define rpfl2uc_enf_default 0x0 + +/* register address for bitfield l2_uc_da{f}_lsw[1f:0] */ +#define rpfl2uc_daflsw_adr(filter) (0x00005110 + (filter) * 0x8) +/* register address for bitfield l2_uc_da{f}_msw[f:0] */ +#define rpfl2uc_dafmsw_adr(filter) (0x00005114 + (filter) * 0x8) +/* bitmask for bitfield l2_uc_da{f}_msw[f:0] */ +#define rpfl2uc_dafmsw_msk 0x0000ffff +/* lower bit position of bitfield l2_uc_da{f}_msw[f:0] */ +#define rpfl2uc_dafmsw_shift 0 + +/* rx l2_mc_accept_all bitfield definitions + * Preprocessor definitions for the bitfield "l2_mc_accept_all". + * PORT="pif_rpf_l2_mc_all_accept_i" + */ + +/* Register address for bitfield l2_mc_accept_all */ +#define rpfl2mc_accept_all_adr 0x00005270 +/* Bitmask for bitfield l2_mc_accept_all */ +#define rpfl2mc_accept_all_msk 0x00004000 +/* Inverted bitmask for bitfield l2_mc_accept_all */ +#define rpfl2mc_accept_all_mskn 0xFFFFBFFF +/* Lower bit position of bitfield l2_mc_accept_all */ +#define rpfl2mc_accept_all_shift 14 +/* Width of bitfield l2_mc_accept_all */ +#define rpfl2mc_accept_all_width 1 +/* Default value of bitfield l2_mc_accept_all */ +#define rpfl2mc_accept_all_default 0x0 + +/* width of bitfield rx_tc_up{t}[2:0] */ +#define rpf_rpb_rx_tc_upt_width 3 +/* default value of bitfield rx_tc_up{t}[2:0] */ +#define rpf_rpb_rx_tc_upt_default 0x0 + +/* rx rss_key_addr[4:0] bitfield definitions + * preprocessor definitions for the bitfield "rss_key_addr[4:0]". + * port="pif_rpf_rss_key_addr_i[4:0]" + */ + +/* register address for bitfield rss_key_addr[4:0] */ +#define rpf_rss_key_addr_adr 0x000054d0 +/* bitmask for bitfield rss_key_addr[4:0] */ +#define rpf_rss_key_addr_msk 0x0000001f +/* inverted bitmask for bitfield rss_key_addr[4:0] */ +#define rpf_rss_key_addr_mskn 0xffffffe0 +/* lower bit position of bitfield rss_key_addr[4:0] */ +#define rpf_rss_key_addr_shift 0 +/* width of bitfield rss_key_addr[4:0] */ +#define rpf_rss_key_addr_width 5 +/* default value of bitfield rss_key_addr[4:0] */ +#define rpf_rss_key_addr_default 0x0 + +/* rx rss_key_wr_data[1f:0] bitfield definitions + * preprocessor definitions for the bitfield "rss_key_wr_data[1f:0]". + * port="pif_rpf_rss_key_wr_data_i[31:0]" + */ + +/* register address for bitfield rss_key_wr_data[1f:0] */ +#define rpf_rss_key_wr_data_adr 0x000054d4 +/* bitmask for bitfield rss_key_wr_data[1f:0] */ +#define rpf_rss_key_wr_data_msk 0xffffffff +/* inverted bitmask for bitfield rss_key_wr_data[1f:0] */ +#define rpf_rss_key_wr_data_mskn 0x00000000 +/* lower bit position of bitfield rss_key_wr_data[1f:0] */ +#define rpf_rss_key_wr_data_shift 0 +/* width of bitfield rss_key_wr_data[1f:0] */ +#define rpf_rss_key_wr_data_width 32 +/* default value of bitfield rss_key_wr_data[1f:0] */ +#define rpf_rss_key_wr_data_default 0x0 + +/* rx rss_key_wr_en_i bitfield definitions + * preprocessor definitions for the bitfield "rss_key_wr_en_i". + * port="pif_rpf_rss_key_wr_en_i" + */ + +/* register address for bitfield rss_key_wr_en_i */ +#define rpf_rss_key_wr_eni_adr 0x000054d0 +/* bitmask for bitfield rss_key_wr_en_i */ +#define rpf_rss_key_wr_eni_msk 0x00000020 +/* inverted bitmask for bitfield rss_key_wr_en_i */ +#define rpf_rss_key_wr_eni_mskn 0xffffffdf +/* lower bit position of bitfield rss_key_wr_en_i */ +#define rpf_rss_key_wr_eni_shift 5 +/* width of bitfield rss_key_wr_en_i */ +#define rpf_rss_key_wr_eni_width 1 +/* default value of bitfield rss_key_wr_en_i */ +#define rpf_rss_key_wr_eni_default 0x0 + +/* rx rss_redir_addr[3:0] bitfield definitions + * preprocessor definitions for the bitfield "rss_redir_addr[3:0]". + * port="pif_rpf_rss_redir_addr_i[3:0]" + */ + +/* register address for bitfield rss_redir_addr[3:0] */ +#define rpf_rss_redir_addr_adr 0x000054e0 +/* bitmask for bitfield rss_redir_addr[3:0] */ +#define rpf_rss_redir_addr_msk 0x0000000f +/* inverted bitmask for bitfield rss_redir_addr[3:0] */ +#define rpf_rss_redir_addr_mskn 0xfffffff0 +/* lower bit position of bitfield rss_redir_addr[3:0] */ +#define rpf_rss_redir_addr_shift 0 +/* width of bitfield rss_redir_addr[3:0] */ +#define rpf_rss_redir_addr_width 4 +/* default value of bitfield rss_redir_addr[3:0] */ +#define rpf_rss_redir_addr_default 0x0 + +/* rx rss_redir_wr_data[f:0] bitfield definitions + * preprocessor definitions for the bitfield "rss_redir_wr_data[f:0]". + * port="pif_rpf_rss_redir_wr_data_i[15:0]" + */ + +/* register address for bitfield rss_redir_wr_data[f:0] */ +#define rpf_rss_redir_wr_data_adr 0x000054e4 +/* bitmask for bitfield rss_redir_wr_data[f:0] */ +#define rpf_rss_redir_wr_data_msk 0x0000ffff +/* inverted bitmask for bitfield rss_redir_wr_data[f:0] */ +#define rpf_rss_redir_wr_data_mskn 0xffff0000 +/* lower bit position of bitfield rss_redir_wr_data[f:0] */ +#define rpf_rss_redir_wr_data_shift 0 +/* width of bitfield rss_redir_wr_data[f:0] */ +#define rpf_rss_redir_wr_data_width 16 +/* default value of bitfield rss_redir_wr_data[f:0] */ +#define rpf_rss_redir_wr_data_default 0x0 + +/* rx rss_redir_wr_en_i bitfield definitions + * preprocessor definitions for the bitfield "rss_redir_wr_en_i". + * port="pif_rpf_rss_redir_wr_en_i" + */ + +/* register address for bitfield rss_redir_wr_en_i */ +#define rpf_rss_redir_wr_eni_adr 0x000054e0 +/* bitmask for bitfield rss_redir_wr_en_i */ +#define rpf_rss_redir_wr_eni_msk 0x00000010 +/* inverted bitmask for bitfield rss_redir_wr_en_i */ +#define rpf_rss_redir_wr_eni_mskn 0xffffffef +/* lower bit position of bitfield rss_redir_wr_en_i */ +#define rpf_rss_redir_wr_eni_shift 4 +/* width of bitfield rss_redir_wr_en_i */ +#define rpf_rss_redir_wr_eni_width 1 +/* default value of bitfield rss_redir_wr_en_i */ +#define rpf_rss_redir_wr_eni_default 0x0 + +/* rx tpo_rpf_sys_loopback bitfield definitions + * preprocessor definitions for the bitfield "tpo_rpf_sys_loopback". + * port="pif_rpf_tpo_pkt_sys_lbk_i" + */ + +/* register address for bitfield tpo_rpf_sys_loopback */ +#define rpf_tpo_rpf_sys_lbk_adr 0x00005000 +/* bitmask for bitfield tpo_rpf_sys_loopback */ +#define rpf_tpo_rpf_sys_lbk_msk 0x00000100 +/* inverted bitmask for bitfield tpo_rpf_sys_loopback */ +#define rpf_tpo_rpf_sys_lbk_mskn 0xfffffeff +/* lower bit position of bitfield tpo_rpf_sys_loopback */ +#define rpf_tpo_rpf_sys_lbk_shift 8 +/* width of bitfield tpo_rpf_sys_loopback */ +#define rpf_tpo_rpf_sys_lbk_width 1 +/* default value of bitfield tpo_rpf_sys_loopback */ +#define rpf_tpo_rpf_sys_lbk_default 0x0 + +/* rx vl_inner_tpid[f:0] bitfield definitions + * preprocessor definitions for the bitfield "vl_inner_tpid[f:0]". + * port="pif_rpf_vl_inner_tpid_i[15:0]" + */ + +/* register address for bitfield vl_inner_tpid[f:0] */ +#define rpf_vl_inner_tpid_adr 0x00005284 +/* bitmask for bitfield vl_inner_tpid[f:0] */ +#define rpf_vl_inner_tpid_msk 0x0000ffff +/* inverted bitmask for bitfield vl_inner_tpid[f:0] */ +#define rpf_vl_inner_tpid_mskn 0xffff0000 +/* lower bit position of bitfield vl_inner_tpid[f:0] */ +#define rpf_vl_inner_tpid_shift 0 +/* width of bitfield vl_inner_tpid[f:0] */ +#define rpf_vl_inner_tpid_width 16 +/* default value of bitfield vl_inner_tpid[f:0] */ +#define rpf_vl_inner_tpid_default 0x8100 + +/* rx vl_outer_tpid[f:0] bitfield definitions + * preprocessor definitions for the bitfield "vl_outer_tpid[f:0]". + * port="pif_rpf_vl_outer_tpid_i[15:0]" + */ + +/* register address for bitfield vl_outer_tpid[f:0] */ +#define rpf_vl_outer_tpid_adr 0x00005284 +/* bitmask for bitfield vl_outer_tpid[f:0] */ +#define rpf_vl_outer_tpid_msk 0xffff0000 +/* inverted bitmask for bitfield vl_outer_tpid[f:0] */ +#define rpf_vl_outer_tpid_mskn 0x0000ffff +/* lower bit position of bitfield vl_outer_tpid[f:0] */ +#define rpf_vl_outer_tpid_shift 16 +/* width of bitfield vl_outer_tpid[f:0] */ +#define rpf_vl_outer_tpid_width 16 +/* default value of bitfield vl_outer_tpid[f:0] */ +#define rpf_vl_outer_tpid_default 0x88a8 + +/* rx vl_promis_mode bitfield definitions + * preprocessor definitions for the bitfield "vl_promis_mode". + * port="pif_rpf_vl_promis_mode_i" + */ + +/* register address for bitfield vl_promis_mode */ +#define rpf_vl_promis_mode_adr 0x00005280 +/* bitmask for bitfield vl_promis_mode */ +#define rpf_vl_promis_mode_msk 0x00000002 +/* inverted bitmask for bitfield vl_promis_mode */ +#define rpf_vl_promis_mode_mskn 0xfffffffd +/* lower bit position of bitfield vl_promis_mode */ +#define rpf_vl_promis_mode_shift 1 +/* width of bitfield vl_promis_mode */ +#define rpf_vl_promis_mode_width 1 +/* default value of bitfield vl_promis_mode */ +#define rpf_vl_promis_mode_default 0x0 + +/* RX vl_accept_untagged_mode Bitfield Definitions + * Preprocessor definitions for the bitfield "vl_accept_untagged_mode". + * PORT="pif_rpf_vl_accept_untagged_i" + */ + +/* Register address for bitfield vl_accept_untagged_mode */ +#define rpf_vl_accept_untagged_mode_adr 0x00005280 +/* Bitmask for bitfield vl_accept_untagged_mode */ +#define rpf_vl_accept_untagged_mode_msk 0x00000004 +/* Inverted bitmask for bitfield vl_accept_untagged_mode */ +#define rpf_vl_accept_untagged_mode_mskn 0xFFFFFFFB +/* Lower bit position of bitfield vl_accept_untagged_mode */ +#define rpf_vl_accept_untagged_mode_shift 2 +/* Width of bitfield vl_accept_untagged_mode */ +#define rpf_vl_accept_untagged_mode_width 1 +/* Default value of bitfield vl_accept_untagged_mode */ +#define rpf_vl_accept_untagged_mode_default 0x0 + +/* rX vl_untagged_act[2:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "vl_untagged_act[2:0]". + * PORT="pif_rpf_vl_untagged_act_i[2:0]" + */ + +/* Register address for bitfield vl_untagged_act[2:0] */ +#define rpf_vl_untagged_act_adr 0x00005280 +/* Bitmask for bitfield vl_untagged_act[2:0] */ +#define rpf_vl_untagged_act_msk 0x00000038 +/* Inverted bitmask for bitfield vl_untagged_act[2:0] */ +#define rpf_vl_untagged_act_mskn 0xFFFFFFC7 +/* Lower bit position of bitfield vl_untagged_act[2:0] */ +#define rpf_vl_untagged_act_shift 3 +/* Width of bitfield vl_untagged_act[2:0] */ +#define rpf_vl_untagged_act_width 3 +/* Default value of bitfield vl_untagged_act[2:0] */ +#define rpf_vl_untagged_act_default 0x0 + +/* RX vl_en{F} Bitfield Definitions + * Preprocessor definitions for the bitfield "vl_en{F}". + * Parameter: filter {F} | stride size 0x4 | range [0, 15] + * PORT="pif_rpf_vl_en_i[0]" + */ + +/* Register address for bitfield vl_en{F} */ +#define rpf_vl_en_f_adr(filter) (0x00005290 + (filter) * 0x4) +/* Bitmask for bitfield vl_en{F} */ +#define rpf_vl_en_f_msk 0x80000000 +/* Inverted bitmask for bitfield vl_en{F} */ +#define rpf_vl_en_f_mskn 0x7FFFFFFF +/* Lower bit position of bitfield vl_en{F} */ +#define rpf_vl_en_f_shift 31 +/* Width of bitfield vl_en{F} */ +#define rpf_vl_en_f_width 1 +/* Default value of bitfield vl_en{F} */ +#define rpf_vl_en_f_default 0x0 + +/* RX vl_act{F}[2:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "vl_act{F}[2:0]". + * Parameter: filter {F} | stride size 0x4 | range [0, 15] + * PORT="pif_rpf_vl_act0_i[2:0]" + */ + +/* Register address for bitfield vl_act{F}[2:0] */ +#define rpf_vl_act_f_adr(filter) (0x00005290 + (filter) * 0x4) +/* Bitmask for bitfield vl_act{F}[2:0] */ +#define rpf_vl_act_f_msk 0x00070000 +/* Inverted bitmask for bitfield vl_act{F}[2:0] */ +#define rpf_vl_act_f_mskn 0xFFF8FFFF +/* Lower bit position of bitfield vl_act{F}[2:0] */ +#define rpf_vl_act_f_shift 16 +/* Width of bitfield vl_act{F}[2:0] */ +#define rpf_vl_act_f_width 3 +/* Default value of bitfield vl_act{F}[2:0] */ +#define rpf_vl_act_f_default 0x0 + +/* RX vl_id{F}[B:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "vl_id{F}[B:0]". + * Parameter: filter {F} | stride size 0x4 | range [0, 15] + * PORT="pif_rpf_vl_id0_i[11:0]" + */ + +/* Register address for bitfield vl_id{F}[B:0] */ +#define rpf_vl_id_f_adr(filter) (0x00005290 + (filter) * 0x4) +/* Bitmask for bitfield vl_id{F}[B:0] */ +#define rpf_vl_id_f_msk 0x00000FFF +/* Inverted bitmask for bitfield vl_id{F}[B:0] */ +#define rpf_vl_id_f_mskn 0xFFFFF000 +/* Lower bit position of bitfield vl_id{F}[B:0] */ +#define rpf_vl_id_f_shift 0 +/* Width of bitfield vl_id{F}[B:0] */ +#define rpf_vl_id_f_width 12 +/* Default value of bitfield vl_id{F}[B:0] */ +#define rpf_vl_id_f_default 0x0 + +/* RX et_en{F} Bitfield Definitions + * Preprocessor definitions for the bitfield "et_en{F}". + * Parameter: filter {F} | stride size 0x4 | range [0, 15] + * PORT="pif_rpf_et_en_i[0]" + */ + +/* Register address for bitfield et_en{F} */ +#define rpf_et_en_f_adr(filter) (0x00005300 + (filter) * 0x4) +/* Bitmask for bitfield et_en{F} */ +#define rpf_et_en_f_msk 0x80000000 +/* Inverted bitmask for bitfield et_en{F} */ +#define rpf_et_en_f_mskn 0x7FFFFFFF +/* Lower bit position of bitfield et_en{F} */ +#define rpf_et_en_f_shift 31 +/* Width of bitfield et_en{F} */ +#define rpf_et_en_f_width 1 +/* Default value of bitfield et_en{F} */ +#define rpf_et_en_f_default 0x0 + +/* rx et_en{f} bitfield definitions + * preprocessor definitions for the bitfield "et_en{f}". + * parameter: filter {f} | stride size 0x4 | range [0, 15] + * port="pif_rpf_et_en_i[0]" + */ + +/* register address for bitfield et_en{f} */ +#define rpf_et_enf_adr(filter) (0x00005300 + (filter) * 0x4) +/* bitmask for bitfield et_en{f} */ +#define rpf_et_enf_msk 0x80000000 +/* inverted bitmask for bitfield et_en{f} */ +#define rpf_et_enf_mskn 0x7fffffff +/* lower bit position of bitfield et_en{f} */ +#define rpf_et_enf_shift 31 +/* width of bitfield et_en{f} */ +#define rpf_et_enf_width 1 +/* default value of bitfield et_en{f} */ +#define rpf_et_enf_default 0x0 + +/* rx et_up{f}_en bitfield definitions + * preprocessor definitions for the bitfield "et_up{f}_en". + * parameter: filter {f} | stride size 0x4 | range [0, 15] + * port="pif_rpf_et_up_en_i[0]" + */ + +/* register address for bitfield et_up{f}_en */ +#define rpf_et_upfen_adr(filter) (0x00005300 + (filter) * 0x4) +/* bitmask for bitfield et_up{f}_en */ +#define rpf_et_upfen_msk 0x40000000 +/* inverted bitmask for bitfield et_up{f}_en */ +#define rpf_et_upfen_mskn 0xbfffffff +/* lower bit position of bitfield et_up{f}_en */ +#define rpf_et_upfen_shift 30 +/* width of bitfield et_up{f}_en */ +#define rpf_et_upfen_width 1 +/* default value of bitfield et_up{f}_en */ +#define rpf_et_upfen_default 0x0 + +/* rx et_rxq{f}_en bitfield definitions + * preprocessor definitions for the bitfield "et_rxq{f}_en". + * parameter: filter {f} | stride size 0x4 | range [0, 15] + * port="pif_rpf_et_rxq_en_i[0]" + */ + +/* register address for bitfield et_rxq{f}_en */ +#define rpf_et_rxqfen_adr(filter) (0x00005300 + (filter) * 0x4) +/* bitmask for bitfield et_rxq{f}_en */ +#define rpf_et_rxqfen_msk 0x20000000 +/* inverted bitmask for bitfield et_rxq{f}_en */ +#define rpf_et_rxqfen_mskn 0xdfffffff +/* lower bit position of bitfield et_rxq{f}_en */ +#define rpf_et_rxqfen_shift 29 +/* width of bitfield et_rxq{f}_en */ +#define rpf_et_rxqfen_width 1 +/* default value of bitfield et_rxq{f}_en */ +#define rpf_et_rxqfen_default 0x0 + +/* rx et_up{f}[2:0] bitfield definitions + * preprocessor definitions for the bitfield "et_up{f}[2:0]". + * parameter: filter {f} | stride size 0x4 | range [0, 15] + * port="pif_rpf_et_up0_i[2:0]" + */ + +/* register address for bitfield et_up{f}[2:0] */ +#define rpf_et_upf_adr(filter) (0x00005300 + (filter) * 0x4) +/* bitmask for bitfield et_up{f}[2:0] */ +#define rpf_et_upf_msk 0x1c000000 +/* inverted bitmask for bitfield et_up{f}[2:0] */ +#define rpf_et_upf_mskn 0xe3ffffff +/* lower bit position of bitfield et_up{f}[2:0] */ +#define rpf_et_upf_shift 26 +/* width of bitfield et_up{f}[2:0] */ +#define rpf_et_upf_width 3 +/* default value of bitfield et_up{f}[2:0] */ +#define rpf_et_upf_default 0x0 + +/* rx et_rxq{f}[4:0] bitfield definitions + * preprocessor definitions for the bitfield "et_rxq{f}[4:0]". + * parameter: filter {f} | stride size 0x4 | range [0, 15] + * port="pif_rpf_et_rxq0_i[4:0]" + */ + +/* register address for bitfield et_rxq{f}[4:0] */ +#define rpf_et_rxqf_adr(filter) (0x00005300 + (filter) * 0x4) +/* bitmask for bitfield et_rxq{f}[4:0] */ +#define rpf_et_rxqf_msk 0x01f00000 +/* inverted bitmask for bitfield et_rxq{f}[4:0] */ +#define rpf_et_rxqf_mskn 0xfe0fffff +/* lower bit position of bitfield et_rxq{f}[4:0] */ +#define rpf_et_rxqf_shift 20 +/* width of bitfield et_rxq{f}[4:0] */ +#define rpf_et_rxqf_width 5 +/* default value of bitfield et_rxq{f}[4:0] */ +#define rpf_et_rxqf_default 0x0 + +/* rx et_mng_rxq{f} bitfield definitions + * preprocessor definitions for the bitfield "et_mng_rxq{f}". + * parameter: filter {f} | stride size 0x4 | range [0, 15] + * port="pif_rpf_et_mng_rxq_i[0]" + */ + +/* register address for bitfield et_mng_rxq{f} */ +#define rpf_et_mng_rxqf_adr(filter) (0x00005300 + (filter) * 0x4) +/* bitmask for bitfield et_mng_rxq{f} */ +#define rpf_et_mng_rxqf_msk 0x00080000 +/* inverted bitmask for bitfield et_mng_rxq{f} */ +#define rpf_et_mng_rxqf_mskn 0xfff7ffff +/* lower bit position of bitfield et_mng_rxq{f} */ +#define rpf_et_mng_rxqf_shift 19 +/* width of bitfield et_mng_rxq{f} */ +#define rpf_et_mng_rxqf_width 1 +/* default value of bitfield et_mng_rxq{f} */ +#define rpf_et_mng_rxqf_default 0x0 + +/* rx et_act{f}[2:0] bitfield definitions + * preprocessor definitions for the bitfield "et_act{f}[2:0]". + * parameter: filter {f} | stride size 0x4 | range [0, 15] + * port="pif_rpf_et_act0_i[2:0]" + */ + +/* register address for bitfield et_act{f}[2:0] */ +#define rpf_et_actf_adr(filter) (0x00005300 + (filter) * 0x4) +/* bitmask for bitfield et_act{f}[2:0] */ +#define rpf_et_actf_msk 0x00070000 +/* inverted bitmask for bitfield et_act{f}[2:0] */ +#define rpf_et_actf_mskn 0xfff8ffff +/* lower bit position of bitfield et_act{f}[2:0] */ +#define rpf_et_actf_shift 16 +/* width of bitfield et_act{f}[2:0] */ +#define rpf_et_actf_width 3 +/* default value of bitfield et_act{f}[2:0] */ +#define rpf_et_actf_default 0x0 + +/* rx et_val{f}[f:0] bitfield definitions + * preprocessor definitions for the bitfield "et_val{f}[f:0]". + * parameter: filter {f} | stride size 0x4 | range [0, 15] + * port="pif_rpf_et_val0_i[15:0]" + */ + +/* register address for bitfield et_val{f}[f:0] */ +#define rpf_et_valf_adr(filter) (0x00005300 + (filter) * 0x4) +/* bitmask for bitfield et_val{f}[f:0] */ +#define rpf_et_valf_msk 0x0000ffff +/* inverted bitmask for bitfield et_val{f}[f:0] */ +#define rpf_et_valf_mskn 0xffff0000 +/* lower bit position of bitfield et_val{f}[f:0] */ +#define rpf_et_valf_shift 0 +/* width of bitfield et_val{f}[f:0] */ +#define rpf_et_valf_width 16 +/* default value of bitfield et_val{f}[f:0] */ +#define rpf_et_valf_default 0x0 + +/* rx ipv4_chk_en bitfield definitions + * preprocessor definitions for the bitfield "ipv4_chk_en". + * port="pif_rpo_ipv4_chk_en_i" + */ + +/* register address for bitfield ipv4_chk_en */ +#define rpo_ipv4chk_en_adr 0x00005580 +/* bitmask for bitfield ipv4_chk_en */ +#define rpo_ipv4chk_en_msk 0x00000002 +/* inverted bitmask for bitfield ipv4_chk_en */ +#define rpo_ipv4chk_en_mskn 0xfffffffd +/* lower bit position of bitfield ipv4_chk_en */ +#define rpo_ipv4chk_en_shift 1 +/* width of bitfield ipv4_chk_en */ +#define rpo_ipv4chk_en_width 1 +/* default value of bitfield ipv4_chk_en */ +#define rpo_ipv4chk_en_default 0x0 + +/* rx desc{d}_vl_strip bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_vl_strip". + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + * port="pif_rpo_desc_vl_strip_i[0]" + */ + +/* register address for bitfield desc{d}_vl_strip */ +#define rpo_descdvl_strip_adr(descriptor) (0x00005b08 + (descriptor) * 0x20) +/* bitmask for bitfield desc{d}_vl_strip */ +#define rpo_descdvl_strip_msk 0x20000000 +/* inverted bitmask for bitfield desc{d}_vl_strip */ +#define rpo_descdvl_strip_mskn 0xdfffffff +/* lower bit position of bitfield desc{d}_vl_strip */ +#define rpo_descdvl_strip_shift 29 +/* width of bitfield desc{d}_vl_strip */ +#define rpo_descdvl_strip_width 1 +/* default value of bitfield desc{d}_vl_strip */ +#define rpo_descdvl_strip_default 0x0 + +/* rx l4_chk_en bitfield definitions + * preprocessor definitions for the bitfield "l4_chk_en". + * port="pif_rpo_l4_chk_en_i" + */ + +/* register address for bitfield l4_chk_en */ +#define rpol4chk_en_adr 0x00005580 +/* bitmask for bitfield l4_chk_en */ +#define rpol4chk_en_msk 0x00000001 +/* inverted bitmask for bitfield l4_chk_en */ +#define rpol4chk_en_mskn 0xfffffffe +/* lower bit position of bitfield l4_chk_en */ +#define rpol4chk_en_shift 0 +/* width of bitfield l4_chk_en */ +#define rpol4chk_en_width 1 +/* default value of bitfield l4_chk_en */ +#define rpol4chk_en_default 0x0 + +/* rx reg_res_dsbl bitfield definitions + * preprocessor definitions for the bitfield "reg_res_dsbl". + * port="pif_rx_reg_res_dsbl_i" + */ + +/* register address for bitfield reg_res_dsbl */ +#define rx_reg_res_dsbl_adr 0x00005000 +/* bitmask for bitfield reg_res_dsbl */ +#define rx_reg_res_dsbl_msk 0x20000000 +/* inverted bitmask for bitfield reg_res_dsbl */ +#define rx_reg_res_dsbl_mskn 0xdfffffff +/* lower bit position of bitfield reg_res_dsbl */ +#define rx_reg_res_dsbl_shift 29 +/* width of bitfield reg_res_dsbl */ +#define rx_reg_res_dsbl_width 1 +/* default value of bitfield reg_res_dsbl */ +#define rx_reg_res_dsbl_default 0x1 + +/* tx dca{d}_cpuid[7:0] bitfield definitions + * preprocessor definitions for the bitfield "dca{d}_cpuid[7:0]". + * parameter: dca {d} | stride size 0x4 | range [0, 31] + * port="pif_tdm_dca0_cpuid_i[7:0]" + */ + +/* register address for bitfield dca{d}_cpuid[7:0] */ +#define tdm_dcadcpuid_adr(dca) (0x00008400 + (dca) * 0x4) +/* bitmask for bitfield dca{d}_cpuid[7:0] */ +#define tdm_dcadcpuid_msk 0x000000ff +/* inverted bitmask for bitfield dca{d}_cpuid[7:0] */ +#define tdm_dcadcpuid_mskn 0xffffff00 +/* lower bit position of bitfield dca{d}_cpuid[7:0] */ +#define tdm_dcadcpuid_shift 0 +/* width of bitfield dca{d}_cpuid[7:0] */ +#define tdm_dcadcpuid_width 8 +/* default value of bitfield dca{d}_cpuid[7:0] */ +#define tdm_dcadcpuid_default 0x0 + +/* tx lso_en[1f:0] bitfield definitions + * preprocessor definitions for the bitfield "lso_en[1f:0]". + * port="pif_tdm_lso_en_i[31:0]" + */ + +/* register address for bitfield lso_en[1f:0] */ +#define tdm_lso_en_adr 0x00007810 +/* bitmask for bitfield lso_en[1f:0] */ +#define tdm_lso_en_msk 0xffffffff +/* inverted bitmask for bitfield lso_en[1f:0] */ +#define tdm_lso_en_mskn 0x00000000 +/* lower bit position of bitfield lso_en[1f:0] */ +#define tdm_lso_en_shift 0 +/* width of bitfield lso_en[1f:0] */ +#define tdm_lso_en_width 32 +/* default value of bitfield lso_en[1f:0] */ +#define tdm_lso_en_default 0x0 + +/* tx dca_en bitfield definitions + * preprocessor definitions for the bitfield "dca_en". + * port="pif_tdm_dca_en_i" + */ + +/* register address for bitfield dca_en */ +#define tdm_dca_en_adr 0x00008480 +/* bitmask for bitfield dca_en */ +#define tdm_dca_en_msk 0x80000000 +/* inverted bitmask for bitfield dca_en */ +#define tdm_dca_en_mskn 0x7fffffff +/* lower bit position of bitfield dca_en */ +#define tdm_dca_en_shift 31 +/* width of bitfield dca_en */ +#define tdm_dca_en_width 1 +/* default value of bitfield dca_en */ +#define tdm_dca_en_default 0x1 + +/* tx dca_mode[3:0] bitfield definitions + * preprocessor definitions for the bitfield "dca_mode[3:0]". + * port="pif_tdm_dca_mode_i[3:0]" + */ + +/* register address for bitfield dca_mode[3:0] */ +#define tdm_dca_mode_adr 0x00008480 +/* bitmask for bitfield dca_mode[3:0] */ +#define tdm_dca_mode_msk 0x0000000f +/* inverted bitmask for bitfield dca_mode[3:0] */ +#define tdm_dca_mode_mskn 0xfffffff0 +/* lower bit position of bitfield dca_mode[3:0] */ +#define tdm_dca_mode_shift 0 +/* width of bitfield dca_mode[3:0] */ +#define tdm_dca_mode_width 4 +/* default value of bitfield dca_mode[3:0] */ +#define tdm_dca_mode_default 0x0 + +/* tx dca{d}_desc_en bitfield definitions + * preprocessor definitions for the bitfield "dca{d}_desc_en". + * parameter: dca {d} | stride size 0x4 | range [0, 31] + * port="pif_tdm_dca_desc_en_i[0]" + */ + +/* register address for bitfield dca{d}_desc_en */ +#define tdm_dcaddesc_en_adr(dca) (0x00008400 + (dca) * 0x4) +/* bitmask for bitfield dca{d}_desc_en */ +#define tdm_dcaddesc_en_msk 0x80000000 +/* inverted bitmask for bitfield dca{d}_desc_en */ +#define tdm_dcaddesc_en_mskn 0x7fffffff +/* lower bit position of bitfield dca{d}_desc_en */ +#define tdm_dcaddesc_en_shift 31 +/* width of bitfield dca{d}_desc_en */ +#define tdm_dcaddesc_en_width 1 +/* default value of bitfield dca{d}_desc_en */ +#define tdm_dcaddesc_en_default 0x0 + +/* tx desc{d}_en bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_en". + * parameter: descriptor {d} | stride size 0x40 | range [0, 31] + * port="pif_tdm_desc_en_i[0]" + */ + +/* register address for bitfield desc{d}_en */ +#define tdm_descden_adr(descriptor) (0x00007c08 + (descriptor) * 0x40) +/* bitmask for bitfield desc{d}_en */ +#define tdm_descden_msk 0x80000000 +/* inverted bitmask for bitfield desc{d}_en */ +#define tdm_descden_mskn 0x7fffffff +/* lower bit position of bitfield desc{d}_en */ +#define tdm_descden_shift 31 +/* width of bitfield desc{d}_en */ +#define tdm_descden_width 1 +/* default value of bitfield desc{d}_en */ +#define tdm_descden_default 0x0 + +/* tx desc{d}_hd[c:0] bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_hd[c:0]". + * parameter: descriptor {d} | stride size 0x40 | range [0, 31] + * port="tdm_pif_desc0_hd_o[12:0]" + */ + +/* register address for bitfield desc{d}_hd[c:0] */ +#define tdm_descdhd_adr(descriptor) (0x00007c0c + (descriptor) * 0x40) +/* bitmask for bitfield desc{d}_hd[c:0] */ +#define tdm_descdhd_msk 0x00001fff +/* inverted bitmask for bitfield desc{d}_hd[c:0] */ +#define tdm_descdhd_mskn 0xffffe000 +/* lower bit position of bitfield desc{d}_hd[c:0] */ +#define tdm_descdhd_shift 0 +/* width of bitfield desc{d}_hd[c:0] */ +#define tdm_descdhd_width 13 + +/* tx desc{d}_len[9:0] bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_len[9:0]". + * parameter: descriptor {d} | stride size 0x40 | range [0, 31] + * port="pif_tdm_desc0_len_i[9:0]" + */ + +/* register address for bitfield desc{d}_len[9:0] */ +#define tdm_descdlen_adr(descriptor) (0x00007c08 + (descriptor) * 0x40) +/* bitmask for bitfield desc{d}_len[9:0] */ +#define tdm_descdlen_msk 0x00001ff8 +/* inverted bitmask for bitfield desc{d}_len[9:0] */ +#define tdm_descdlen_mskn 0xffffe007 +/* lower bit position of bitfield desc{d}_len[9:0] */ +#define tdm_descdlen_shift 3 +/* width of bitfield desc{d}_len[9:0] */ +#define tdm_descdlen_width 10 +/* default value of bitfield desc{d}_len[9:0] */ +#define tdm_descdlen_default 0x0 + +/* tx int_desc_wrb_en bitfield definitions + * preprocessor definitions for the bitfield "int_desc_wrb_en". + * port="pif_tdm_int_desc_wrb_en_i" + */ + +/* register address for bitfield int_desc_wrb_en */ +#define tdm_int_desc_wrb_en_adr 0x00007b40 +/* bitmask for bitfield int_desc_wrb_en */ +#define tdm_int_desc_wrb_en_msk 0x00000002 +/* inverted bitmask for bitfield int_desc_wrb_en */ +#define tdm_int_desc_wrb_en_mskn 0xfffffffd +/* lower bit position of bitfield int_desc_wrb_en */ +#define tdm_int_desc_wrb_en_shift 1 +/* width of bitfield int_desc_wrb_en */ +#define tdm_int_desc_wrb_en_width 1 +/* default value of bitfield int_desc_wrb_en */ +#define tdm_int_desc_wrb_en_default 0x0 + +/* tx desc{d}_wrb_thresh[6:0] bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_wrb_thresh[6:0]". + * parameter: descriptor {d} | stride size 0x40 | range [0, 31] + * port="pif_tdm_desc0_wrb_thresh_i[6:0]" + */ + +/* register address for bitfield desc{d}_wrb_thresh[6:0] */ +#define tdm_descdwrb_thresh_adr(descriptor) (0x00007c18 + (descriptor) * 0x40) +/* bitmask for bitfield desc{d}_wrb_thresh[6:0] */ +#define tdm_descdwrb_thresh_msk 0x00007f00 +/* inverted bitmask for bitfield desc{d}_wrb_thresh[6:0] */ +#define tdm_descdwrb_thresh_mskn 0xffff80ff +/* lower bit position of bitfield desc{d}_wrb_thresh[6:0] */ +#define tdm_descdwrb_thresh_shift 8 +/* width of bitfield desc{d}_wrb_thresh[6:0] */ +#define tdm_descdwrb_thresh_width 7 +/* default value of bitfield desc{d}_wrb_thresh[6:0] */ +#define tdm_descdwrb_thresh_default 0x0 + +/* tx lso_tcp_flag_first[b:0] bitfield definitions + * preprocessor definitions for the bitfield "lso_tcp_flag_first[b:0]". + * port="pif_thm_lso_tcp_flag_first_i[11:0]" + */ + +/* register address for bitfield lso_tcp_flag_first[b:0] */ +#define thm_lso_tcp_flag_first_adr 0x00007820 +/* bitmask for bitfield lso_tcp_flag_first[b:0] */ +#define thm_lso_tcp_flag_first_msk 0x00000fff +/* inverted bitmask for bitfield lso_tcp_flag_first[b:0] */ +#define thm_lso_tcp_flag_first_mskn 0xfffff000 +/* lower bit position of bitfield lso_tcp_flag_first[b:0] */ +#define thm_lso_tcp_flag_first_shift 0 +/* width of bitfield lso_tcp_flag_first[b:0] */ +#define thm_lso_tcp_flag_first_width 12 +/* default value of bitfield lso_tcp_flag_first[b:0] */ +#define thm_lso_tcp_flag_first_default 0x0 + +/* tx lso_tcp_flag_last[b:0] bitfield definitions + * preprocessor definitions for the bitfield "lso_tcp_flag_last[b:0]". + * port="pif_thm_lso_tcp_flag_last_i[11:0]" + */ + +/* register address for bitfield lso_tcp_flag_last[b:0] */ +#define thm_lso_tcp_flag_last_adr 0x00007824 +/* bitmask for bitfield lso_tcp_flag_last[b:0] */ +#define thm_lso_tcp_flag_last_msk 0x00000fff +/* inverted bitmask for bitfield lso_tcp_flag_last[b:0] */ +#define thm_lso_tcp_flag_last_mskn 0xfffff000 +/* lower bit position of bitfield lso_tcp_flag_last[b:0] */ +#define thm_lso_tcp_flag_last_shift 0 +/* width of bitfield lso_tcp_flag_last[b:0] */ +#define thm_lso_tcp_flag_last_width 12 +/* default value of bitfield lso_tcp_flag_last[b:0] */ +#define thm_lso_tcp_flag_last_default 0x0 + +/* tx lso_tcp_flag_mid[b:0] bitfield definitions + * preprocessor definitions for the bitfield "lso_tcp_flag_mid[b:0]". + * port="pif_thm_lso_tcp_flag_mid_i[11:0]" + */ + +/* Register address for bitfield lro_rsc_max[1F:0] */ +#define rpo_lro_rsc_max_adr 0x00005598 +/* Bitmask for bitfield lro_rsc_max[1F:0] */ +#define rpo_lro_rsc_max_msk 0xFFFFFFFF +/* Inverted bitmask for bitfield lro_rsc_max[1F:0] */ +#define rpo_lro_rsc_max_mskn 0x00000000 +/* Lower bit position of bitfield lro_rsc_max[1F:0] */ +#define rpo_lro_rsc_max_shift 0 +/* Width of bitfield lro_rsc_max[1F:0] */ +#define rpo_lro_rsc_max_width 32 +/* Default value of bitfield lro_rsc_max[1F:0] */ +#define rpo_lro_rsc_max_default 0x0 + +/* RX lro_en[1F:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "lro_en[1F:0]". + * PORT="pif_rpo_lro_en_i[31:0]" + */ + +/* Register address for bitfield lro_en[1F:0] */ +#define rpo_lro_en_adr 0x00005590 +/* Bitmask for bitfield lro_en[1F:0] */ +#define rpo_lro_en_msk 0xFFFFFFFF +/* Inverted bitmask for bitfield lro_en[1F:0] */ +#define rpo_lro_en_mskn 0x00000000 +/* Lower bit position of bitfield lro_en[1F:0] */ +#define rpo_lro_en_shift 0 +/* Width of bitfield lro_en[1F:0] */ +#define rpo_lro_en_width 32 +/* Default value of bitfield lro_en[1F:0] */ +#define rpo_lro_en_default 0x0 + +/* RX lro_ptopt_en Bitfield Definitions + * Preprocessor definitions for the bitfield "lro_ptopt_en". + * PORT="pif_rpo_lro_ptopt_en_i" + */ + +/* Register address for bitfield lro_ptopt_en */ +#define rpo_lro_ptopt_en_adr 0x00005594 +/* Bitmask for bitfield lro_ptopt_en */ +#define rpo_lro_ptopt_en_msk 0x00008000 +/* Inverted bitmask for bitfield lro_ptopt_en */ +#define rpo_lro_ptopt_en_mskn 0xFFFF7FFF +/* Lower bit position of bitfield lro_ptopt_en */ +#define rpo_lro_ptopt_en_shift 15 +/* Width of bitfield lro_ptopt_en */ +#define rpo_lro_ptopt_en_width 1 +/* Default value of bitfield lro_ptopt_en */ +#define rpo_lro_ptopt_en_defalt 0x1 + +/* RX lro_q_ses_lmt Bitfield Definitions + * Preprocessor definitions for the bitfield "lro_q_ses_lmt". + * PORT="pif_rpo_lro_q_ses_lmt_i[1:0]" + */ + +/* Register address for bitfield lro_q_ses_lmt */ +#define rpo_lro_qses_lmt_adr 0x00005594 +/* Bitmask for bitfield lro_q_ses_lmt */ +#define rpo_lro_qses_lmt_msk 0x00003000 +/* Inverted bitmask for bitfield lro_q_ses_lmt */ +#define rpo_lro_qses_lmt_mskn 0xFFFFCFFF +/* Lower bit position of bitfield lro_q_ses_lmt */ +#define rpo_lro_qses_lmt_shift 12 +/* Width of bitfield lro_q_ses_lmt */ +#define rpo_lro_qses_lmt_width 2 +/* Default value of bitfield lro_q_ses_lmt */ +#define rpo_lro_qses_lmt_default 0x1 + +/* RX lro_tot_dsc_lmt[1:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "lro_tot_dsc_lmt[1:0]". + * PORT="pif_rpo_lro_tot_dsc_lmt_i[1:0]" + */ + +/* Register address for bitfield lro_tot_dsc_lmt[1:0] */ +#define rpo_lro_tot_dsc_lmt_adr 0x00005594 +/* Bitmask for bitfield lro_tot_dsc_lmt[1:0] */ +#define rpo_lro_tot_dsc_lmt_msk 0x00000060 +/* Inverted bitmask for bitfield lro_tot_dsc_lmt[1:0] */ +#define rpo_lro_tot_dsc_lmt_mskn 0xFFFFFF9F +/* Lower bit position of bitfield lro_tot_dsc_lmt[1:0] */ +#define rpo_lro_tot_dsc_lmt_shift 5 +/* Width of bitfield lro_tot_dsc_lmt[1:0] */ +#define rpo_lro_tot_dsc_lmt_width 2 +/* Default value of bitfield lro_tot_dsc_lmt[1:0] */ +#define rpo_lro_tot_dsc_lmt_defalt 0x1 + +/* RX lro_pkt_min[4:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "lro_pkt_min[4:0]". + * PORT="pif_rpo_lro_pkt_min_i[4:0]" + */ + +/* Register address for bitfield lro_pkt_min[4:0] */ +#define rpo_lro_pkt_min_adr 0x00005594 +/* Bitmask for bitfield lro_pkt_min[4:0] */ +#define rpo_lro_pkt_min_msk 0x0000001F +/* Inverted bitmask for bitfield lro_pkt_min[4:0] */ +#define rpo_lro_pkt_min_mskn 0xFFFFFFE0 +/* Lower bit position of bitfield lro_pkt_min[4:0] */ +#define rpo_lro_pkt_min_shift 0 +/* Width of bitfield lro_pkt_min[4:0] */ +#define rpo_lro_pkt_min_width 5 +/* Default value of bitfield lro_pkt_min[4:0] */ +#define rpo_lro_pkt_min_default 0x8 + +/* Width of bitfield lro{L}_des_max[1:0] */ +#define rpo_lro_ldes_max_width 2 +/* Default value of bitfield lro{L}_des_max[1:0] */ +#define rpo_lro_ldes_max_default 0x0 + +/* RX lro_tb_div[11:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "lro_tb_div[11:0]". + * PORT="pif_rpo_lro_tb_div_i[11:0]" + */ + +/* Register address for bitfield lro_tb_div[11:0] */ +#define rpo_lro_tb_div_adr 0x00005620 +/* Bitmask for bitfield lro_tb_div[11:0] */ +#define rpo_lro_tb_div_msk 0xFFF00000 +/* Inverted bitmask for bitfield lro_tb_div[11:0] */ +#define rpo_lro_tb_div_mskn 0x000FFFFF +/* Lower bit position of bitfield lro_tb_div[11:0] */ +#define rpo_lro_tb_div_shift 20 +/* Width of bitfield lro_tb_div[11:0] */ +#define rpo_lro_tb_div_width 12 +/* Default value of bitfield lro_tb_div[11:0] */ +#define rpo_lro_tb_div_default 0xC35 + +/* RX lro_ina_ival[9:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "lro_ina_ival[9:0]". + * PORT="pif_rpo_lro_ina_ival_i[9:0]" + */ + +/* Register address for bitfield lro_ina_ival[9:0] */ +#define rpo_lro_ina_ival_adr 0x00005620 +/* Bitmask for bitfield lro_ina_ival[9:0] */ +#define rpo_lro_ina_ival_msk 0x000FFC00 +/* Inverted bitmask for bitfield lro_ina_ival[9:0] */ +#define rpo_lro_ina_ival_mskn 0xFFF003FF +/* Lower bit position of bitfield lro_ina_ival[9:0] */ +#define rpo_lro_ina_ival_shift 10 +/* Width of bitfield lro_ina_ival[9:0] */ +#define rpo_lro_ina_ival_width 10 +/* Default value of bitfield lro_ina_ival[9:0] */ +#define rpo_lro_ina_ival_default 0xA + +/* RX lro_max_ival[9:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "lro_max_ival[9:0]". + * PORT="pif_rpo_lro_max_ival_i[9:0]" + */ + +/* Register address for bitfield lro_max_ival[9:0] */ +#define rpo_lro_max_ival_adr 0x00005620 +/* Bitmask for bitfield lro_max_ival[9:0] */ +#define rpo_lro_max_ival_msk 0x000003FF +/* Inverted bitmask for bitfield lro_max_ival[9:0] */ +#define rpo_lro_max_ival_mskn 0xFFFFFC00 +/* Lower bit position of bitfield lro_max_ival[9:0] */ +#define rpo_lro_max_ival_shift 0 +/* Width of bitfield lro_max_ival[9:0] */ +#define rpo_lro_max_ival_width 10 +/* Default value of bitfield lro_max_ival[9:0] */ +#define rpo_lro_max_ival_default 0x19 + +/* TX dca{D}_cpuid[7:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "dca{D}_cpuid[7:0]". + * Parameter: DCA {D} | stride size 0x4 | range [0, 31] + * PORT="pif_tdm_dca0_cpuid_i[7:0]" + */ + +/* Register address for bitfield dca{D}_cpuid[7:0] */ +#define tdm_dca_dcpuid_adr(dca) (0x00008400 + (dca) * 0x4) +/* Bitmask for bitfield dca{D}_cpuid[7:0] */ +#define tdm_dca_dcpuid_msk 0x000000FF +/* Inverted bitmask for bitfield dca{D}_cpuid[7:0] */ +#define tdm_dca_dcpuid_mskn 0xFFFFFF00 +/* Lower bit position of bitfield dca{D}_cpuid[7:0] */ +#define tdm_dca_dcpuid_shift 0 +/* Width of bitfield dca{D}_cpuid[7:0] */ +#define tdm_dca_dcpuid_width 8 +/* Default value of bitfield dca{D}_cpuid[7:0] */ +#define tdm_dca_dcpuid_default 0x0 + +/* TX dca{D}_desc_en Bitfield Definitions + * Preprocessor definitions for the bitfield "dca{D}_desc_en". + * Parameter: DCA {D} | stride size 0x4 | range [0, 31] + * PORT="pif_tdm_dca_desc_en_i[0]" + */ + +/* Register address for bitfield dca{D}_desc_en */ +#define tdm_dca_ddesc_en_adr(dca) (0x00008400 + (dca) * 0x4) +/* Bitmask for bitfield dca{D}_desc_en */ +#define tdm_dca_ddesc_en_msk 0x80000000 +/* Inverted bitmask for bitfield dca{D}_desc_en */ +#define tdm_dca_ddesc_en_mskn 0x7FFFFFFF +/* Lower bit position of bitfield dca{D}_desc_en */ +#define tdm_dca_ddesc_en_shift 31 +/* Width of bitfield dca{D}_desc_en */ +#define tdm_dca_ddesc_en_width 1 +/* Default value of bitfield dca{D}_desc_en */ +#define tdm_dca_ddesc_en_default 0x0 + +/* TX desc{D}_en Bitfield Definitions + * Preprocessor definitions for the bitfield "desc{D}_en". + * Parameter: descriptor {D} | stride size 0x40 | range [0, 31] + * PORT="pif_tdm_desc_en_i[0]" + */ + +/* Register address for bitfield desc{D}_en */ +#define tdm_desc_den_adr(descriptor) (0x00007C08 + (descriptor) * 0x40) +/* Bitmask for bitfield desc{D}_en */ +#define tdm_desc_den_msk 0x80000000 +/* Inverted bitmask for bitfield desc{D}_en */ +#define tdm_desc_den_mskn 0x7FFFFFFF +/* Lower bit position of bitfield desc{D}_en */ +#define tdm_desc_den_shift 31 +/* Width of bitfield desc{D}_en */ +#define tdm_desc_den_width 1 +/* Default value of bitfield desc{D}_en */ +#define tdm_desc_den_default 0x0 + +/* TX desc{D}_hd[C:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "desc{D}_hd[C:0]". + * Parameter: descriptor {D} | stride size 0x40 | range [0, 31] + * PORT="tdm_pif_desc0_hd_o[12:0]" + */ + +/* Register address for bitfield desc{D}_hd[C:0] */ +#define tdm_desc_dhd_adr(descriptor) (0x00007C0C + (descriptor) * 0x40) +/* Bitmask for bitfield desc{D}_hd[C:0] */ +#define tdm_desc_dhd_msk 0x00001FFF +/* Inverted bitmask for bitfield desc{D}_hd[C:0] */ +#define tdm_desc_dhd_mskn 0xFFFFE000 +/* Lower bit position of bitfield desc{D}_hd[C:0] */ +#define tdm_desc_dhd_shift 0 +/* Width of bitfield desc{D}_hd[C:0] */ +#define tdm_desc_dhd_width 13 + +/* TX desc{D}_len[9:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "desc{D}_len[9:0]". + * Parameter: descriptor {D} | stride size 0x40 | range [0, 31] + * PORT="pif_tdm_desc0_len_i[9:0]" + */ + +/* Register address for bitfield desc{D}_len[9:0] */ +#define tdm_desc_dlen_adr(descriptor) (0x00007C08 + (descriptor) * 0x40) +/* Bitmask for bitfield desc{D}_len[9:0] */ +#define tdm_desc_dlen_msk 0x00001FF8 +/* Inverted bitmask for bitfield desc{D}_len[9:0] */ +#define tdm_desc_dlen_mskn 0xFFFFE007 +/* Lower bit position of bitfield desc{D}_len[9:0] */ +#define tdm_desc_dlen_shift 3 +/* Width of bitfield desc{D}_len[9:0] */ +#define tdm_desc_dlen_width 10 +/* Default value of bitfield desc{D}_len[9:0] */ +#define tdm_desc_dlen_default 0x0 + +/* TX desc{D}_wrb_thresh[6:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "desc{D}_wrb_thresh[6:0]". + * Parameter: descriptor {D} | stride size 0x40 | range [0, 31] + * PORT="pif_tdm_desc0_wrb_thresh_i[6:0]" + */ + +/* Register address for bitfield desc{D}_wrb_thresh[6:0] */ +#define tdm_desc_dwrb_thresh_adr(descriptor) \ + (0x00007C18 + (descriptor) * 0x40) +/* Bitmask for bitfield desc{D}_wrb_thresh[6:0] */ +#define tdm_desc_dwrb_thresh_msk 0x00007F00 +/* Inverted bitmask for bitfield desc{D}_wrb_thresh[6:0] */ +#define tdm_desc_dwrb_thresh_mskn 0xFFFF80FF +/* Lower bit position of bitfield desc{D}_wrb_thresh[6:0] */ +#define tdm_desc_dwrb_thresh_shift 8 +/* Width of bitfield desc{D}_wrb_thresh[6:0] */ +#define tdm_desc_dwrb_thresh_width 7 +/* Default value of bitfield desc{D}_wrb_thresh[6:0] */ +#define tdm_desc_dwrb_thresh_default 0x0 + +/* TX tdm_int_mod_en Bitfield Definitions + * Preprocessor definitions for the bitfield "tdm_int_mod_en". + * PORT="pif_tdm_int_mod_en_i" + */ + +/* Register address for bitfield tdm_int_mod_en */ +#define tdm_int_mod_en_adr 0x00007B40 +/* Bitmask for bitfield tdm_int_mod_en */ +#define tdm_int_mod_en_msk 0x00000010 +/* Inverted bitmask for bitfield tdm_int_mod_en */ +#define tdm_int_mod_en_mskn 0xFFFFFFEF +/* Lower bit position of bitfield tdm_int_mod_en */ +#define tdm_int_mod_en_shift 4 +/* Width of bitfield tdm_int_mod_en */ +#define tdm_int_mod_en_width 1 +/* Default value of bitfield tdm_int_mod_en */ +#define tdm_int_mod_en_default 0x0 + +/* TX lso_tcp_flag_mid[B:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "lso_tcp_flag_mid[B:0]". + * PORT="pif_thm_lso_tcp_flag_mid_i[11:0]" + */ +/* register address for bitfield lso_tcp_flag_mid[b:0] */ +#define thm_lso_tcp_flag_mid_adr 0x00007820 +/* bitmask for bitfield lso_tcp_flag_mid[b:0] */ +#define thm_lso_tcp_flag_mid_msk 0x0fff0000 +/* inverted bitmask for bitfield lso_tcp_flag_mid[b:0] */ +#define thm_lso_tcp_flag_mid_mskn 0xf000ffff +/* lower bit position of bitfield lso_tcp_flag_mid[b:0] */ +#define thm_lso_tcp_flag_mid_shift 16 +/* width of bitfield lso_tcp_flag_mid[b:0] */ +#define thm_lso_tcp_flag_mid_width 12 +/* default value of bitfield lso_tcp_flag_mid[b:0] */ +#define thm_lso_tcp_flag_mid_default 0x0 + +/* tx tx_buf_en bitfield definitions + * preprocessor definitions for the bitfield "tx_buf_en". + * port="pif_tpb_tx_buf_en_i" + */ + +/* register address for bitfield tx_buf_en */ +#define tpb_tx_buf_en_adr 0x00007900 +/* bitmask for bitfield tx_buf_en */ +#define tpb_tx_buf_en_msk 0x00000001 +/* inverted bitmask for bitfield tx_buf_en */ +#define tpb_tx_buf_en_mskn 0xfffffffe +/* lower bit position of bitfield tx_buf_en */ +#define tpb_tx_buf_en_shift 0 +/* width of bitfield tx_buf_en */ +#define tpb_tx_buf_en_width 1 +/* default value of bitfield tx_buf_en */ +#define tpb_tx_buf_en_default 0x0 + +/* tx tx{b}_hi_thresh[c:0] bitfield definitions + * preprocessor definitions for the bitfield "tx{b}_hi_thresh[c:0]". + * parameter: buffer {b} | stride size 0x10 | range [0, 7] + * port="pif_tpb_tx0_hi_thresh_i[12:0]" + */ + +/* register address for bitfield tx{b}_hi_thresh[c:0] */ +#define tpb_txbhi_thresh_adr(buffer) (0x00007914 + (buffer) * 0x10) +/* bitmask for bitfield tx{b}_hi_thresh[c:0] */ +#define tpb_txbhi_thresh_msk 0x1fff0000 +/* inverted bitmask for bitfield tx{b}_hi_thresh[c:0] */ +#define tpb_txbhi_thresh_mskn 0xe000ffff +/* lower bit position of bitfield tx{b}_hi_thresh[c:0] */ +#define tpb_txbhi_thresh_shift 16 +/* width of bitfield tx{b}_hi_thresh[c:0] */ +#define tpb_txbhi_thresh_width 13 +/* default value of bitfield tx{b}_hi_thresh[c:0] */ +#define tpb_txbhi_thresh_default 0x0 + +/* tx tx{b}_lo_thresh[c:0] bitfield definitions + * preprocessor definitions for the bitfield "tx{b}_lo_thresh[c:0]". + * parameter: buffer {b} | stride size 0x10 | range [0, 7] + * port="pif_tpb_tx0_lo_thresh_i[12:0]" + */ + +/* register address for bitfield tx{b}_lo_thresh[c:0] */ +#define tpb_txblo_thresh_adr(buffer) (0x00007914 + (buffer) * 0x10) +/* bitmask for bitfield tx{b}_lo_thresh[c:0] */ +#define tpb_txblo_thresh_msk 0x00001fff +/* inverted bitmask for bitfield tx{b}_lo_thresh[c:0] */ +#define tpb_txblo_thresh_mskn 0xffffe000 +/* lower bit position of bitfield tx{b}_lo_thresh[c:0] */ +#define tpb_txblo_thresh_shift 0 +/* width of bitfield tx{b}_lo_thresh[c:0] */ +#define tpb_txblo_thresh_width 13 +/* default value of bitfield tx{b}_lo_thresh[c:0] */ +#define tpb_txblo_thresh_default 0x0 + +/* tx dma_sys_loopback bitfield definitions + * preprocessor definitions for the bitfield "dma_sys_loopback". + * port="pif_tpb_dma_sys_lbk_i" + */ + +/* register address for bitfield dma_sys_loopback */ +#define tpb_dma_sys_lbk_adr 0x00007000 +/* bitmask for bitfield dma_sys_loopback */ +#define tpb_dma_sys_lbk_msk 0x00000040 +/* inverted bitmask for bitfield dma_sys_loopback */ +#define tpb_dma_sys_lbk_mskn 0xffffffbf +/* lower bit position of bitfield dma_sys_loopback */ +#define tpb_dma_sys_lbk_shift 6 +/* width of bitfield dma_sys_loopback */ +#define tpb_dma_sys_lbk_width 1 +/* default value of bitfield dma_sys_loopback */ +#define tpb_dma_sys_lbk_default 0x0 + +/* tx tx{b}_buf_size[7:0] bitfield definitions + * preprocessor definitions for the bitfield "tx{b}_buf_size[7:0]". + * parameter: buffer {b} | stride size 0x10 | range [0, 7] + * port="pif_tpb_tx0_buf_size_i[7:0]" + */ + +/* register address for bitfield tx{b}_buf_size[7:0] */ +#define tpb_txbbuf_size_adr(buffer) (0x00007910 + (buffer) * 0x10) +/* bitmask for bitfield tx{b}_buf_size[7:0] */ +#define tpb_txbbuf_size_msk 0x000000ff +/* inverted bitmask for bitfield tx{b}_buf_size[7:0] */ +#define tpb_txbbuf_size_mskn 0xffffff00 +/* lower bit position of bitfield tx{b}_buf_size[7:0] */ +#define tpb_txbbuf_size_shift 0 +/* width of bitfield tx{b}_buf_size[7:0] */ +#define tpb_txbbuf_size_width 8 +/* default value of bitfield tx{b}_buf_size[7:0] */ +#define tpb_txbbuf_size_default 0x0 + +/* tx tx_scp_ins_en bitfield definitions + * preprocessor definitions for the bitfield "tx_scp_ins_en". + * port="pif_tpb_scp_ins_en_i" + */ + +/* register address for bitfield tx_scp_ins_en */ +#define tpb_tx_scp_ins_en_adr 0x00007900 +/* bitmask for bitfield tx_scp_ins_en */ +#define tpb_tx_scp_ins_en_msk 0x00000004 +/* inverted bitmask for bitfield tx_scp_ins_en */ +#define tpb_tx_scp_ins_en_mskn 0xfffffffb +/* lower bit position of bitfield tx_scp_ins_en */ +#define tpb_tx_scp_ins_en_shift 2 +/* width of bitfield tx_scp_ins_en */ +#define tpb_tx_scp_ins_en_width 1 +/* default value of bitfield tx_scp_ins_en */ +#define tpb_tx_scp_ins_en_default 0x0 + +/* tx ipv4_chk_en bitfield definitions + * preprocessor definitions for the bitfield "ipv4_chk_en". + * port="pif_tpo_ipv4_chk_en_i" + */ + +/* register address for bitfield ipv4_chk_en */ +#define tpo_ipv4chk_en_adr 0x00007800 +/* bitmask for bitfield ipv4_chk_en */ +#define tpo_ipv4chk_en_msk 0x00000002 +/* inverted bitmask for bitfield ipv4_chk_en */ +#define tpo_ipv4chk_en_mskn 0xfffffffd +/* lower bit position of bitfield ipv4_chk_en */ +#define tpo_ipv4chk_en_shift 1 +/* width of bitfield ipv4_chk_en */ +#define tpo_ipv4chk_en_width 1 +/* default value of bitfield ipv4_chk_en */ +#define tpo_ipv4chk_en_default 0x0 + +/* tx l4_chk_en bitfield definitions + * preprocessor definitions for the bitfield "l4_chk_en". + * port="pif_tpo_l4_chk_en_i" + */ + +/* register address for bitfield l4_chk_en */ +#define tpol4chk_en_adr 0x00007800 +/* bitmask for bitfield l4_chk_en */ +#define tpol4chk_en_msk 0x00000001 +/* inverted bitmask for bitfield l4_chk_en */ +#define tpol4chk_en_mskn 0xfffffffe +/* lower bit position of bitfield l4_chk_en */ +#define tpol4chk_en_shift 0 +/* width of bitfield l4_chk_en */ +#define tpol4chk_en_width 1 +/* default value of bitfield l4_chk_en */ +#define tpol4chk_en_default 0x0 + +/* tx pkt_sys_loopback bitfield definitions + * preprocessor definitions for the bitfield "pkt_sys_loopback". + * port="pif_tpo_pkt_sys_lbk_i" + */ + +/* register address for bitfield pkt_sys_loopback */ +#define tpo_pkt_sys_lbk_adr 0x00007000 +/* bitmask for bitfield pkt_sys_loopback */ +#define tpo_pkt_sys_lbk_msk 0x00000080 +/* inverted bitmask for bitfield pkt_sys_loopback */ +#define tpo_pkt_sys_lbk_mskn 0xffffff7f +/* lower bit position of bitfield pkt_sys_loopback */ +#define tpo_pkt_sys_lbk_shift 7 +/* width of bitfield pkt_sys_loopback */ +#define tpo_pkt_sys_lbk_width 1 +/* default value of bitfield pkt_sys_loopback */ +#define tpo_pkt_sys_lbk_default 0x0 + +/* tx data_tc_arb_mode bitfield definitions + * preprocessor definitions for the bitfield "data_tc_arb_mode". + * port="pif_tps_data_tc_arb_mode_i" + */ + +/* register address for bitfield data_tc_arb_mode */ +#define tps_data_tc_arb_mode_adr 0x00007100 +/* bitmask for bitfield data_tc_arb_mode */ +#define tps_data_tc_arb_mode_msk 0x00000001 +/* inverted bitmask for bitfield data_tc_arb_mode */ +#define tps_data_tc_arb_mode_mskn 0xfffffffe +/* lower bit position of bitfield data_tc_arb_mode */ +#define tps_data_tc_arb_mode_shift 0 +/* width of bitfield data_tc_arb_mode */ +#define tps_data_tc_arb_mode_width 1 +/* default value of bitfield data_tc_arb_mode */ +#define tps_data_tc_arb_mode_default 0x0 + +/* tx desc_rate_ta_rst bitfield definitions + * preprocessor definitions for the bitfield "desc_rate_ta_rst". + * port="pif_tps_desc_rate_ta_rst_i" + */ + +/* register address for bitfield desc_rate_ta_rst */ +#define tps_desc_rate_ta_rst_adr 0x00007310 +/* bitmask for bitfield desc_rate_ta_rst */ +#define tps_desc_rate_ta_rst_msk 0x80000000 +/* inverted bitmask for bitfield desc_rate_ta_rst */ +#define tps_desc_rate_ta_rst_mskn 0x7fffffff +/* lower bit position of bitfield desc_rate_ta_rst */ +#define tps_desc_rate_ta_rst_shift 31 +/* width of bitfield desc_rate_ta_rst */ +#define tps_desc_rate_ta_rst_width 1 +/* default value of bitfield desc_rate_ta_rst */ +#define tps_desc_rate_ta_rst_default 0x0 + +/* tx desc_rate_limit[a:0] bitfield definitions + * preprocessor definitions for the bitfield "desc_rate_limit[a:0]". + * port="pif_tps_desc_rate_lim_i[10:0]" + */ + +/* register address for bitfield desc_rate_limit[a:0] */ +#define tps_desc_rate_lim_adr 0x00007310 +/* bitmask for bitfield desc_rate_limit[a:0] */ +#define tps_desc_rate_lim_msk 0x000007ff +/* inverted bitmask for bitfield desc_rate_limit[a:0] */ +#define tps_desc_rate_lim_mskn 0xfffff800 +/* lower bit position of bitfield desc_rate_limit[a:0] */ +#define tps_desc_rate_lim_shift 0 +/* width of bitfield desc_rate_limit[a:0] */ +#define tps_desc_rate_lim_width 11 +/* default value of bitfield desc_rate_limit[a:0] */ +#define tps_desc_rate_lim_default 0x0 + +/* tx desc_tc_arb_mode[1:0] bitfield definitions + * preprocessor definitions for the bitfield "desc_tc_arb_mode[1:0]". + * port="pif_tps_desc_tc_arb_mode_i[1:0]" + */ + +/* register address for bitfield desc_tc_arb_mode[1:0] */ +#define tps_desc_tc_arb_mode_adr 0x00007200 +/* bitmask for bitfield desc_tc_arb_mode[1:0] */ +#define tps_desc_tc_arb_mode_msk 0x00000003 +/* inverted bitmask for bitfield desc_tc_arb_mode[1:0] */ +#define tps_desc_tc_arb_mode_mskn 0xfffffffc +/* lower bit position of bitfield desc_tc_arb_mode[1:0] */ +#define tps_desc_tc_arb_mode_shift 0 +/* width of bitfield desc_tc_arb_mode[1:0] */ +#define tps_desc_tc_arb_mode_width 2 +/* default value of bitfield desc_tc_arb_mode[1:0] */ +#define tps_desc_tc_arb_mode_default 0x0 + +/* tx desc_tc{t}_credit_max[b:0] bitfield definitions + * preprocessor definitions for the bitfield "desc_tc{t}_credit_max[b:0]". + * parameter: tc {t} | stride size 0x4 | range [0, 7] + * port="pif_tps_desc_tc0_credit_max_i[11:0]" + */ + +/* register address for bitfield desc_tc{t}_credit_max[b:0] */ +#define tps_desc_tctcredit_max_adr(tc) (0x00007210 + (tc) * 0x4) +/* bitmask for bitfield desc_tc{t}_credit_max[b:0] */ +#define tps_desc_tctcredit_max_msk 0x0fff0000 +/* inverted bitmask for bitfield desc_tc{t}_credit_max[b:0] */ +#define tps_desc_tctcredit_max_mskn 0xf000ffff +/* lower bit position of bitfield desc_tc{t}_credit_max[b:0] */ +#define tps_desc_tctcredit_max_shift 16 +/* width of bitfield desc_tc{t}_credit_max[b:0] */ +#define tps_desc_tctcredit_max_width 12 +/* default value of bitfield desc_tc{t}_credit_max[b:0] */ +#define tps_desc_tctcredit_max_default 0x0 + +/* tx desc_tc{t}_weight[8:0] bitfield definitions + * preprocessor definitions for the bitfield "desc_tc{t}_weight[8:0]". + * parameter: tc {t} | stride size 0x4 | range [0, 7] + * port="pif_tps_desc_tc0_weight_i[8:0]" + */ + +/* register address for bitfield desc_tc{t}_weight[8:0] */ +#define tps_desc_tctweight_adr(tc) (0x00007210 + (tc) * 0x4) +/* bitmask for bitfield desc_tc{t}_weight[8:0] */ +#define tps_desc_tctweight_msk 0x000001ff +/* inverted bitmask for bitfield desc_tc{t}_weight[8:0] */ +#define tps_desc_tctweight_mskn 0xfffffe00 +/* lower bit position of bitfield desc_tc{t}_weight[8:0] */ +#define tps_desc_tctweight_shift 0 +/* width of bitfield desc_tc{t}_weight[8:0] */ +#define tps_desc_tctweight_width 9 +/* default value of bitfield desc_tc{t}_weight[8:0] */ +#define tps_desc_tctweight_default 0x0 + +/* tx desc_vm_arb_mode bitfield definitions + * preprocessor definitions for the bitfield "desc_vm_arb_mode". + * port="pif_tps_desc_vm_arb_mode_i" + */ + +/* register address for bitfield desc_vm_arb_mode */ +#define tps_desc_vm_arb_mode_adr 0x00007300 +/* bitmask for bitfield desc_vm_arb_mode */ +#define tps_desc_vm_arb_mode_msk 0x00000001 +/* inverted bitmask for bitfield desc_vm_arb_mode */ +#define tps_desc_vm_arb_mode_mskn 0xfffffffe +/* lower bit position of bitfield desc_vm_arb_mode */ +#define tps_desc_vm_arb_mode_shift 0 +/* width of bitfield desc_vm_arb_mode */ +#define tps_desc_vm_arb_mode_width 1 +/* default value of bitfield desc_vm_arb_mode */ +#define tps_desc_vm_arb_mode_default 0x0 + +/* tx data_tc{t}_credit_max[b:0] bitfield definitions + * preprocessor definitions for the bitfield "data_tc{t}_credit_max[b:0]". + * parameter: tc {t} | stride size 0x4 | range [0, 7] + * port="pif_tps_data_tc0_credit_max_i[11:0]" + */ + +/* register address for bitfield data_tc{t}_credit_max[b:0] */ +#define tps_data_tctcredit_max_adr(tc) (0x00007110 + (tc) * 0x4) +/* bitmask for bitfield data_tc{t}_credit_max[b:0] */ +#define tps_data_tctcredit_max_msk 0x0fff0000 +/* inverted bitmask for bitfield data_tc{t}_credit_max[b:0] */ +#define tps_data_tctcredit_max_mskn 0xf000ffff +/* lower bit position of bitfield data_tc{t}_credit_max[b:0] */ +#define tps_data_tctcredit_max_shift 16 +/* width of bitfield data_tc{t}_credit_max[b:0] */ +#define tps_data_tctcredit_max_width 12 +/* default value of bitfield data_tc{t}_credit_max[b:0] */ +#define tps_data_tctcredit_max_default 0x0 + +/* tx data_tc{t}_weight[8:0] bitfield definitions + * preprocessor definitions for the bitfield "data_tc{t}_weight[8:0]". + * parameter: tc {t} | stride size 0x4 | range [0, 7] + * port="pif_tps_data_tc0_weight_i[8:0]" + */ + +/* register address for bitfield data_tc{t}_weight[8:0] */ +#define tps_data_tctweight_adr(tc) (0x00007110 + (tc) * 0x4) +/* bitmask for bitfield data_tc{t}_weight[8:0] */ +#define tps_data_tctweight_msk 0x000001ff +/* inverted bitmask for bitfield data_tc{t}_weight[8:0] */ +#define tps_data_tctweight_mskn 0xfffffe00 +/* lower bit position of bitfield data_tc{t}_weight[8:0] */ +#define tps_data_tctweight_shift 0 +/* width of bitfield data_tc{t}_weight[8:0] */ +#define tps_data_tctweight_width 9 +/* default value of bitfield data_tc{t}_weight[8:0] */ +#define tps_data_tctweight_default 0x0 + +/* tx reg_res_dsbl bitfield definitions + * preprocessor definitions for the bitfield "reg_res_dsbl". + * port="pif_tx_reg_res_dsbl_i" + */ + +/* register address for bitfield reg_res_dsbl */ +#define tx_reg_res_dsbl_adr 0x00007000 +/* bitmask for bitfield reg_res_dsbl */ +#define tx_reg_res_dsbl_msk 0x20000000 +/* inverted bitmask for bitfield reg_res_dsbl */ +#define tx_reg_res_dsbl_mskn 0xdfffffff +/* lower bit position of bitfield reg_res_dsbl */ +#define tx_reg_res_dsbl_shift 29 +/* width of bitfield reg_res_dsbl */ +#define tx_reg_res_dsbl_width 1 +/* default value of bitfield reg_res_dsbl */ +#define tx_reg_res_dsbl_default 0x1 + +/* mac_phy register access busy bitfield definitions + * preprocessor definitions for the bitfield "register access busy". + * port="msm_pif_reg_busy_o" + */ + +/* register address for bitfield register access busy */ +#define msm_reg_access_busy_adr 0x00004400 +/* bitmask for bitfield register access busy */ +#define msm_reg_access_busy_msk 0x00001000 +/* inverted bitmask for bitfield register access busy */ +#define msm_reg_access_busy_mskn 0xffffefff +/* lower bit position of bitfield register access busy */ +#define msm_reg_access_busy_shift 12 +/* width of bitfield register access busy */ +#define msm_reg_access_busy_width 1 + +/* mac_phy msm register address[7:0] bitfield definitions + * preprocessor definitions for the bitfield "msm register address[7:0]". + * port="pif_msm_reg_addr_i[7:0]" + */ + +/* register address for bitfield msm register address[7:0] */ +#define msm_reg_addr_adr 0x00004400 +/* bitmask for bitfield msm register address[7:0] */ +#define msm_reg_addr_msk 0x000000ff +/* inverted bitmask for bitfield msm register address[7:0] */ +#define msm_reg_addr_mskn 0xffffff00 +/* lower bit position of bitfield msm register address[7:0] */ +#define msm_reg_addr_shift 0 +/* width of bitfield msm register address[7:0] */ +#define msm_reg_addr_width 8 +/* default value of bitfield msm register address[7:0] */ +#define msm_reg_addr_default 0x0 + +/* mac_phy register read strobe bitfield definitions + * preprocessor definitions for the bitfield "register read strobe". + * port="pif_msm_reg_rden_i" + */ + +/* register address for bitfield register read strobe */ +#define msm_reg_rd_strobe_adr 0x00004400 +/* bitmask for bitfield register read strobe */ +#define msm_reg_rd_strobe_msk 0x00000200 +/* inverted bitmask for bitfield register read strobe */ +#define msm_reg_rd_strobe_mskn 0xfffffdff +/* lower bit position of bitfield register read strobe */ +#define msm_reg_rd_strobe_shift 9 +/* width of bitfield register read strobe */ +#define msm_reg_rd_strobe_width 1 +/* default value of bitfield register read strobe */ +#define msm_reg_rd_strobe_default 0x0 + +/* mac_phy msm register read data[31:0] bitfield definitions + * preprocessor definitions for the bitfield "msm register read data[31:0]". + * port="msm_pif_reg_rd_data_o[31:0]" + */ + +/* register address for bitfield msm register read data[31:0] */ +#define msm_reg_rd_data_adr 0x00004408 +/* bitmask for bitfield msm register read data[31:0] */ +#define msm_reg_rd_data_msk 0xffffffff +/* inverted bitmask for bitfield msm register read data[31:0] */ +#define msm_reg_rd_data_mskn 0x00000000 +/* lower bit position of bitfield msm register read data[31:0] */ +#define msm_reg_rd_data_shift 0 +/* width of bitfield msm register read data[31:0] */ +#define msm_reg_rd_data_width 32 + +/* mac_phy msm register write data[31:0] bitfield definitions + * preprocessor definitions for the bitfield "msm register write data[31:0]". + * port="pif_msm_reg_wr_data_i[31:0]" + */ + +/* register address for bitfield msm register write data[31:0] */ +#define msm_reg_wr_data_adr 0x00004404 +/* bitmask for bitfield msm register write data[31:0] */ +#define msm_reg_wr_data_msk 0xffffffff +/* inverted bitmask for bitfield msm register write data[31:0] */ +#define msm_reg_wr_data_mskn 0x00000000 +/* lower bit position of bitfield msm register write data[31:0] */ +#define msm_reg_wr_data_shift 0 +/* width of bitfield msm register write data[31:0] */ +#define msm_reg_wr_data_width 32 +/* default value of bitfield msm register write data[31:0] */ +#define msm_reg_wr_data_default 0x0 + +/* mac_phy register write strobe bitfield definitions + * preprocessor definitions for the bitfield "register write strobe". + * port="pif_msm_reg_wren_i" + */ + +/* register address for bitfield register write strobe */ +#define msm_reg_wr_strobe_adr 0x00004400 +/* bitmask for bitfield register write strobe */ +#define msm_reg_wr_strobe_msk 0x00000100 +/* inverted bitmask for bitfield register write strobe */ +#define msm_reg_wr_strobe_mskn 0xfffffeff +/* lower bit position of bitfield register write strobe */ +#define msm_reg_wr_strobe_shift 8 +/* width of bitfield register write strobe */ +#define msm_reg_wr_strobe_width 1 +/* default value of bitfield register write strobe */ +#define msm_reg_wr_strobe_default 0x0 + +/* mif soft reset bitfield definitions + * preprocessor definitions for the bitfield "soft reset". + * port="pif_glb_res_i" + */ + +/* register address for bitfield soft reset */ +#define glb_soft_res_adr 0x00000000 +/* bitmask for bitfield soft reset */ +#define glb_soft_res_msk 0x00008000 +/* inverted bitmask for bitfield soft reset */ +#define glb_soft_res_mskn 0xffff7fff +/* lower bit position of bitfield soft reset */ +#define glb_soft_res_shift 15 +/* width of bitfield soft reset */ +#define glb_soft_res_width 1 +/* default value of bitfield soft reset */ +#define glb_soft_res_default 0x0 + +/* mif register reset disable bitfield definitions + * preprocessor definitions for the bitfield "register reset disable". + * port="pif_glb_reg_res_dsbl_i" + */ + +/* register address for bitfield register reset disable */ +#define glb_reg_res_dis_adr 0x00000000 +/* bitmask for bitfield register reset disable */ +#define glb_reg_res_dis_msk 0x00004000 +/* inverted bitmask for bitfield register reset disable */ +#define glb_reg_res_dis_mskn 0xffffbfff +/* lower bit position of bitfield register reset disable */ +#define glb_reg_res_dis_shift 14 +/* width of bitfield register reset disable */ +#define glb_reg_res_dis_width 1 +/* default value of bitfield register reset disable */ +#define glb_reg_res_dis_default 0x1 + +/* tx dma debug control definitions */ +#define tx_dma_debug_ctl_adr 0x00008920u + +/* tx dma descriptor base address msw definitions */ +#define tx_dma_desc_base_addrmsw_adr(descriptor) \ + (0x00007c04u + (descriptor) * 0x40) + +/* tx interrupt moderation control register definitions + * Preprocessor definitions for TX Interrupt Moderation Control Register + * Base Address: 0x00008980 + * Parameter: queue {Q} | stride size 0x4 | range [0, 31] + */ + +#define tx_intr_moderation_ctl_adr(queue) (0x00008980u + (queue) * 0x4) + +/* pcie reg_res_dsbl bitfield definitions + * preprocessor definitions for the bitfield "reg_res_dsbl". + * port="pif_pci_reg_res_dsbl_i" + */ + +/* register address for bitfield reg_res_dsbl */ +#define pci_reg_res_dsbl_adr 0x00001000 +/* bitmask for bitfield reg_res_dsbl */ +#define pci_reg_res_dsbl_msk 0x20000000 +/* inverted bitmask for bitfield reg_res_dsbl */ +#define pci_reg_res_dsbl_mskn 0xdfffffff +/* lower bit position of bitfield reg_res_dsbl */ +#define pci_reg_res_dsbl_shift 29 +/* width of bitfield reg_res_dsbl */ +#define pci_reg_res_dsbl_width 1 +/* default value of bitfield reg_res_dsbl */ +#define pci_reg_res_dsbl_default 0x1 + +/* global microprocessor scratch pad definitions */ +#define glb_cpu_scratch_scp_adr(scratch_scp) (0x00000300u + (scratch_scp) * 0x4) + +#endif /* HW_ATL_LLH_INTERNAL_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c new file mode 100644 index 000000000000..8d6d8f5804da --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c @@ -0,0 +1,570 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File hw_atl_utils.c: Definition of common functions for Atlantic hardware + * abstraction layer. + */ + +#include "../aq_hw.h" +#include "../aq_hw_utils.h" +#include "../aq_pci_func.h" +#include "../aq_ring.h" +#include "../aq_vec.h" +#include "hw_atl_utils.h" +#include "hw_atl_llh.h" + +#include <linux/random.h> + +#define HW_ATL_UCP_0X370_REG 0x0370U + +#define HW_ATL_FW_SM_RAM 0x2U +#define HW_ATL_MPI_CONTROL_ADR 0x0368U +#define HW_ATL_MPI_STATE_ADR 0x036CU + +#define HW_ATL_MPI_STATE_MSK 0x00FFU +#define HW_ATL_MPI_STATE_SHIFT 0U +#define HW_ATL_MPI_SPEED_MSK 0xFFFFU +#define HW_ATL_MPI_SPEED_SHIFT 16U + +static int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a, + u32 *p, u32 cnt) +{ + int err = 0; + + AQ_HW_WAIT_FOR(reg_glb_cpu_sem_get(self, + HW_ATL_FW_SM_RAM) == 1U, + 1U, 10000U); + + if (err < 0) { + bool is_locked; + + reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM); + is_locked = reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM); + if (!is_locked) { + err = -ETIME; + goto err_exit; + } + } + + aq_hw_write_reg(self, 0x00000208U, a); + + for (++cnt; --cnt;) { + u32 i = 0U; + + aq_hw_write_reg(self, 0x00000200U, 0x00008000U); + + for (i = 1024U; + (0x100U & aq_hw_read_reg(self, 0x00000200U)) && --i;) { + } + + *(p++) = aq_hw_read_reg(self, 0x0000020CU); + } + + reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM); + +err_exit: + return err; +} + +static int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p, + u32 cnt) +{ + int err = 0; + bool is_locked; + + is_locked = reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM); + if (!is_locked) { + err = -ETIME; + goto err_exit; + } + + aq_hw_write_reg(self, 0x00000208U, a); + + for (++cnt; --cnt;) { + u32 i = 0U; + + aq_hw_write_reg(self, 0x0000020CU, *(p++)); + aq_hw_write_reg(self, 0x00000200U, 0xC000U); + + for (i = 1024U; + (0x100U & aq_hw_read_reg(self, 0x00000200U)) && --i;) { + } + } + + reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM); + +err_exit: + return err; +} + +static int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual) +{ + int err = 0; + const u32 dw_major_mask = 0xff000000U; + const u32 dw_minor_mask = 0x00ffffffU; + + err = (dw_major_mask & (ver_expected ^ ver_actual)) ? -EOPNOTSUPP : 0; + if (err < 0) + goto err_exit; + err = ((dw_minor_mask & ver_expected) > (dw_minor_mask & ver_actual)) ? + -EOPNOTSUPP : 0; +err_exit: + return err; +} + +static int hw_atl_utils_init_ucp(struct aq_hw_s *self, + struct aq_hw_caps_s *aq_hw_caps) +{ + int err = 0; + + if (!aq_hw_read_reg(self, 0x370U)) { + unsigned int rnd = 0U; + unsigned int ucp_0x370 = 0U; + + get_random_bytes(&rnd, sizeof(unsigned int)); + + ucp_0x370 = 0x02020202U | (0xFEFEFEFEU & rnd); + aq_hw_write_reg(self, HW_ATL_UCP_0X370_REG, ucp_0x370); + } + + reg_glb_cpu_scratch_scp_set(self, 0x00000000U, 25U); + + /* check 10 times by 1ms */ + AQ_HW_WAIT_FOR(0U != (PHAL_ATLANTIC_A0->mbox_addr = + aq_hw_read_reg(self, 0x360U)), 1000U, 10U); + + err = hw_atl_utils_ver_match(aq_hw_caps->fw_ver_expected, + aq_hw_read_reg(self, 0x18U)); + return err; +} + +#define HW_ATL_RPC_CONTROL_ADR 0x0338U +#define HW_ATL_RPC_STATE_ADR 0x033CU + +struct aq_hw_atl_utils_fw_rpc_tid_s { + union { + u32 val; + struct { + u16 tid; + u16 len; + }; + }; +}; + +#define hw_atl_utils_fw_rpc_init(_H_) hw_atl_utils_fw_rpc_wait(_H_, NULL) + +static int hw_atl_utils_fw_rpc_call(struct aq_hw_s *self, unsigned int rpc_size) +{ + int err = 0; + struct aq_hw_atl_utils_fw_rpc_tid_s sw; + + if (!IS_CHIP_FEATURE(MIPS)) { + err = -1; + goto err_exit; + } + err = hw_atl_utils_fw_upload_dwords(self, PHAL_ATLANTIC->rpc_addr, + (u32 *)(void *)&PHAL_ATLANTIC->rpc, + (rpc_size + sizeof(u32) - + sizeof(u8)) / sizeof(u32)); + if (err < 0) + goto err_exit; + + sw.tid = 0xFFFFU & (++PHAL_ATLANTIC->rpc_tid); + sw.len = (u16)rpc_size; + aq_hw_write_reg(self, HW_ATL_RPC_CONTROL_ADR, sw.val); + +err_exit: + return err; +} + +static int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self, + struct hw_aq_atl_utils_fw_rpc **rpc) +{ + int err = 0; + struct aq_hw_atl_utils_fw_rpc_tid_s sw; + struct aq_hw_atl_utils_fw_rpc_tid_s fw; + + do { + sw.val = aq_hw_read_reg(self, HW_ATL_RPC_CONTROL_ADR); + + PHAL_ATLANTIC->rpc_tid = sw.tid; + + AQ_HW_WAIT_FOR(sw.tid == + (fw.val = + aq_hw_read_reg(self, HW_ATL_RPC_STATE_ADR), + fw.tid), 1000U, 100U); + if (err < 0) + goto err_exit; + + if (fw.len == 0xFFFFU) { + err = hw_atl_utils_fw_rpc_call(self, sw.len); + if (err < 0) + goto err_exit; + } + } while (sw.tid != fw.tid || 0xFFFFU == fw.len); + if (err < 0) + goto err_exit; + + if (rpc) { + if (fw.len) { + err = + hw_atl_utils_fw_downld_dwords(self, + PHAL_ATLANTIC->rpc_addr, + (u32 *)(void *) + &PHAL_ATLANTIC->rpc, + (fw.len + sizeof(u32) - + sizeof(u8)) / + sizeof(u32)); + if (err < 0) + goto err_exit; + } + + *rpc = &PHAL_ATLANTIC->rpc; + } + +err_exit: + return err; +} + +static int hw_atl_utils_mpi_create(struct aq_hw_s *self, + struct aq_hw_caps_s *aq_hw_caps) +{ + int err = 0; + + err = hw_atl_utils_init_ucp(self, aq_hw_caps); + if (err < 0) + goto err_exit; + + err = hw_atl_utils_fw_rpc_init(self); + if (err < 0) + goto err_exit; + +err_exit: + return err; +} + +void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self, + struct hw_aq_atl_utils_mbox *pmbox) +{ + int err = 0; + + err = hw_atl_utils_fw_downld_dwords(self, + PHAL_ATLANTIC->mbox_addr, + (u32 *)(void *)pmbox, + sizeof(*pmbox) / sizeof(u32)); + if (err < 0) + goto err_exit; + + if (pmbox != &PHAL_ATLANTIC->mbox) + memcpy(pmbox, &PHAL_ATLANTIC->mbox, sizeof(*pmbox)); + + if (IS_CHIP_FEATURE(REVISION_A0)) { + unsigned int mtu = self->aq_nic_cfg ? + self->aq_nic_cfg->mtu : 1514U; + pmbox->stats.ubrc = pmbox->stats.uprc * mtu; + pmbox->stats.ubtc = pmbox->stats.uptc * mtu; + pmbox->stats.dpc = atomic_read(&PHAL_ATLANTIC_A0->dpc); + } else { + pmbox->stats.dpc = reg_rx_dma_stat_counter7get(self); + } + +err_exit:; +} + +int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed, + enum hal_atl_utils_fw_state_e state) +{ + u32 ucp_0x368 = 0; + + ucp_0x368 = (speed << HW_ATL_MPI_SPEED_SHIFT) | state; + aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR, ucp_0x368); + + return 0; +} + +void hw_atl_utils_mpi_set(struct aq_hw_s *self, + enum hal_atl_utils_fw_state_e state, u32 speed) +{ + int err = 0; + u32 transaction_id = 0; + + if (state == MPI_RESET) { + hw_atl_utils_mpi_read_stats(self, &PHAL_ATLANTIC->mbox); + + transaction_id = PHAL_ATLANTIC->mbox.transaction_id; + + AQ_HW_WAIT_FOR(transaction_id != + (hw_atl_utils_mpi_read_stats + (self, &PHAL_ATLANTIC->mbox), + PHAL_ATLANTIC->mbox.transaction_id), + 1000U, 100U); + if (err < 0) + goto err_exit; + } + + err = hw_atl_utils_mpi_set_speed(self, speed, state); + +err_exit:; +} + +int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self, + struct aq_hw_link_status_s *link_status) +{ + u32 cp0x036C = aq_hw_read_reg(self, HW_ATL_MPI_STATE_ADR); + u32 link_speed_mask = cp0x036C >> HW_ATL_MPI_SPEED_SHIFT; + + if (!link_speed_mask) { + link_status->mbps = 0U; + } else { + switch (link_speed_mask) { + case HAL_ATLANTIC_RATE_10G: + link_status->mbps = 10000U; + break; + + case HAL_ATLANTIC_RATE_5G: + case HAL_ATLANTIC_RATE_5GSR: + link_status->mbps = 5000U; + break; + + case HAL_ATLANTIC_RATE_2GS: + link_status->mbps = 2500U; + break; + + case HAL_ATLANTIC_RATE_1G: + link_status->mbps = 1000U; + break; + + case HAL_ATLANTIC_RATE_100M: + link_status->mbps = 100U; + break; + + default: + link_status->mbps = 0U; + break; + } + } + + return 0; +} + +int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self, + struct aq_hw_caps_s *aq_hw_caps, + u8 *mac) +{ + int err = 0; + u32 h = 0U; + u32 l = 0U; + u32 mac_addr[2]; + + self->mmio = aq_pci_func_get_mmio(self->aq_pci_func); + + hw_atl_utils_hw_chip_features_init(self, + &PHAL_ATLANTIC_A0->chip_features); + + err = hw_atl_utils_mpi_create(self, aq_hw_caps); + if (err < 0) + goto err_exit; + + if (!aq_hw_read_reg(self, HW_ATL_UCP_0X370_REG)) { + unsigned int rnd = 0; + unsigned int ucp_0x370 = 0; + + get_random_bytes(&rnd, sizeof(unsigned int)); + + ucp_0x370 = 0x02020202 | (0xFEFEFEFE & rnd); + aq_hw_write_reg(self, HW_ATL_UCP_0X370_REG, ucp_0x370); + } + + err = hw_atl_utils_fw_downld_dwords(self, + aq_hw_read_reg(self, 0x00000374U) + + (40U * 4U), + mac_addr, + AQ_DIMOF(mac_addr)); + if (err < 0) { + mac_addr[0] = 0U; + mac_addr[1] = 0U; + err = 0; + } else { + mac_addr[0] = __swab32(mac_addr[0]); + mac_addr[1] = __swab32(mac_addr[1]); + } + + ether_addr_copy(mac, (u8 *)mac_addr); + + if ((mac[0] & 0x01U) || ((mac[0] | mac[1] | mac[2]) == 0x00U)) { + /* chip revision */ + l = 0xE3000000U + | (0xFFFFU & aq_hw_read_reg(self, HW_ATL_UCP_0X370_REG)) + | (0x00 << 16); + h = 0x8001300EU; + + mac[5] = (u8)(0xFFU & l); + l >>= 8; + mac[4] = (u8)(0xFFU & l); + l >>= 8; + mac[3] = (u8)(0xFFU & l); + l >>= 8; + mac[2] = (u8)(0xFFU & l); + mac[1] = (u8)(0xFFU & h); + h >>= 8; + mac[0] = (u8)(0xFFU & h); + } + +err_exit: + return err; +} + +unsigned int hw_atl_utils_mbps_2_speed_index(unsigned int mbps) +{ + unsigned int ret = 0U; + + switch (mbps) { + case 100U: + ret = 5U; + break; + + case 1000U: + ret = 4U; + break; + + case 2500U: + ret = 3U; + break; + + case 5000U: + ret = 1U; + break; + + case 10000U: + ret = 0U; + break; + + default: + break; + } + return ret; +} + +void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p) +{ + u32 chip_features = 0U; + u32 val = reg_glb_mif_id_get(self); + u32 mif_rev = val & 0xFFU; + + if ((3U & mif_rev) == 1U) { + chip_features |= + HAL_ATLANTIC_UTILS_CHIP_REVISION_A0 | + HAL_ATLANTIC_UTILS_CHIP_MPI_AQ | + HAL_ATLANTIC_UTILS_CHIP_MIPS; + } else if ((3U & mif_rev) == 2U) { + chip_features |= + HAL_ATLANTIC_UTILS_CHIP_REVISION_B0 | + HAL_ATLANTIC_UTILS_CHIP_MPI_AQ | + HAL_ATLANTIC_UTILS_CHIP_MIPS | + HAL_ATLANTIC_UTILS_CHIP_TPO2 | + HAL_ATLANTIC_UTILS_CHIP_RPF2; + } + + *p = chip_features; +} + +int hw_atl_utils_hw_deinit(struct aq_hw_s *self) +{ + hw_atl_utils_mpi_set(self, MPI_DEINIT, 0x0U); + return 0; +} + +int hw_atl_utils_hw_set_power(struct aq_hw_s *self, + unsigned int power_state) +{ + hw_atl_utils_mpi_set(self, MPI_POWER, 0x0U); + return 0; +} + +int hw_atl_utils_get_hw_stats(struct aq_hw_s *self, + u64 *data, unsigned int *p_count) +{ + struct hw_atl_stats_s *stats = NULL; + int i = 0; + + hw_atl_utils_mpi_read_stats(self, &PHAL_ATLANTIC->mbox); + + stats = &PHAL_ATLANTIC->mbox.stats; + + data[i] = stats->uprc + stats->mprc + stats->bprc; + data[++i] = stats->uprc; + data[++i] = stats->mprc; + data[++i] = stats->bprc; + data[++i] = stats->erpt; + data[++i] = stats->uptc + stats->mptc + stats->bptc; + data[++i] = stats->uptc; + data[++i] = stats->mptc; + data[++i] = stats->bptc; + data[++i] = stats->ubrc; + data[++i] = stats->ubtc; + data[++i] = stats->mbrc; + data[++i] = stats->mbtc; + data[++i] = stats->bbrc; + data[++i] = stats->bbtc; + data[++i] = stats->ubrc + stats->mbrc + stats->bbrc; + data[++i] = stats->ubtc + stats->mbtc + stats->bbtc; + data[++i] = stats_rx_dma_good_pkt_counterlsw_get(self); + data[++i] = stats_tx_dma_good_pkt_counterlsw_get(self); + data[++i] = stats_rx_dma_good_octet_counterlsw_get(self); + data[++i] = stats_tx_dma_good_octet_counterlsw_get(self); + data[++i] = stats->dpc; + + if (p_count) + *p_count = ++i; + + return 0; +} + +static const u32 hw_atl_utils_hw_mac_regs[] = { + 0x00005580U, 0x00005590U, 0x000055B0U, 0x000055B4U, + 0x000055C0U, 0x00005B00U, 0x00005B04U, 0x00005B08U, + 0x00005B0CU, 0x00005B10U, 0x00005B14U, 0x00005B18U, + 0x00005B1CU, 0x00005B20U, 0x00005B24U, 0x00005B28U, + 0x00005B2CU, 0x00005B30U, 0x00005B34U, 0x00005B38U, + 0x00005B3CU, 0x00005B40U, 0x00005B44U, 0x00005B48U, + 0x00005B4CU, 0x00005B50U, 0x00005B54U, 0x00005B58U, + 0x00005B5CU, 0x00005B60U, 0x00005B64U, 0x00005B68U, + 0x00005B6CU, 0x00005B70U, 0x00005B74U, 0x00005B78U, + 0x00005B7CU, 0x00007C00U, 0x00007C04U, 0x00007C08U, + 0x00007C0CU, 0x00007C10U, 0x00007C14U, 0x00007C18U, + 0x00007C1CU, 0x00007C20U, 0x00007C40U, 0x00007C44U, + 0x00007C48U, 0x00007C4CU, 0x00007C50U, 0x00007C54U, + 0x00007C58U, 0x00007C5CU, 0x00007C60U, 0x00007C80U, + 0x00007C84U, 0x00007C88U, 0x00007C8CU, 0x00007C90U, + 0x00007C94U, 0x00007C98U, 0x00007C9CU, 0x00007CA0U, + 0x00007CC0U, 0x00007CC4U, 0x00007CC8U, 0x00007CCCU, + 0x00007CD0U, 0x00007CD4U, 0x00007CD8U, 0x00007CDCU, + 0x00007CE0U, 0x00000300U, 0x00000304U, 0x00000308U, + 0x0000030cU, 0x00000310U, 0x00000314U, 0x00000318U, + 0x0000031cU, 0x00000360U, 0x00000364U, 0x00000368U, + 0x0000036cU, 0x00000370U, 0x00000374U, 0x00006900U, +}; + +int hw_atl_utils_hw_get_regs(struct aq_hw_s *self, + struct aq_hw_caps_s *aq_hw_caps, + u32 *regs_buff) +{ + unsigned int i = 0U; + + for (i = 0; i < aq_hw_caps->mac_regs_count; i++) + regs_buff[i] = aq_hw_read_reg(self, + hw_atl_utils_hw_mac_regs[i]); + return 0; +} + +int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version) +{ + *fw_version = aq_hw_read_reg(self, 0x18U); + return 0; +} diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h new file mode 100644 index 000000000000..b8e3d88f0879 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h @@ -0,0 +1,210 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File hw_atl_utils.h: Declaration of common functions for Atlantic hardware + * abstraction layer. + */ + +#ifndef HW_ATL_UTILS_H +#define HW_ATL_UTILS_H + +#include "../aq_common.h" + +#define HW_ATL_FLUSH() { (void)aq_hw_read_reg(self, 0x10); } + +struct __packed hw_atl_stats_s { + u32 uprc; + u32 mprc; + u32 bprc; + u32 erpt; + u32 uptc; + u32 mptc; + u32 bptc; + u32 erpr; + u32 mbtc; + u32 bbtc; + u32 mbrc; + u32 bbrc; + u32 ubrc; + u32 ubtc; + u32 dpc; +}; + +union __packed ip_addr { + struct { + u8 addr[16]; + } v6; + struct { + u8 padding[12]; + u8 addr[4]; + } v4; +}; + +struct __packed hw_aq_atl_utils_fw_rpc { + u32 msg_id; + + union { + struct { + u32 pong; + } msg_ping; + + struct { + u8 mac_addr[6]; + u32 ip_addr_cnt; + + struct { + union ip_addr addr; + union ip_addr mask; + } ip[1]; + } msg_arp; + + struct { + u32 len; + u8 packet[1514U]; + } msg_inject; + + struct { + u32 priority; + u32 wol_packet_type; + u16 friendly_name_len; + u16 friendly_name[65]; + u32 pattern_id; + u32 next_wol_pattern_offset; + + union { + struct { + u32 flags; + u8 ipv4_source_address[4]; + u8 ipv4_dest_address[4]; + u16 tcp_source_port_number; + u16 tcp_dest_port_number; + } ipv4_tcp_syn_parameters; + + struct { + u32 flags; + u8 ipv6_source_address[16]; + u8 ipv6_dest_address[16]; + u16 tcp_source_port_number; + u16 tcp_dest_port_number; + } ipv6_tcp_syn_parameters; + + struct { + u32 flags; + } eapol_request_id_message_parameters; + + struct { + u32 flags; + u32 mask_offset; + u32 mask_size; + u32 pattern_offset; + u32 pattern_size; + } wol_bit_map_pattern; + } wol_pattern; + } msg_wol; + + struct { + u32 is_wake_on_link_down; + u32 is_wake_on_link_up; + } msg_wolink; + }; +}; + +struct __packed hw_aq_atl_utils_mbox { + u32 version; + u32 transaction_id; + int error; + struct hw_atl_stats_s stats; +}; + +struct __packed hw_atl_s { + struct aq_hw_s base; + struct hw_aq_atl_utils_mbox mbox; + u64 speed; + u32 itr_tx; + u32 itr_rx; + unsigned int chip_features; + u32 fw_ver_actual; + atomic_t dpc; + u32 mbox_addr; + u32 rpc_addr; + u32 rpc_tid; + struct hw_aq_atl_utils_fw_rpc rpc; +}; + +#define SELF ((struct hw_atl_s *)self) + +#define PHAL_ATLANTIC ((struct hw_atl_s *)((void *)(self))) +#define PHAL_ATLANTIC_A0 ((struct hw_atl_s *)((void *)(self))) +#define PHAL_ATLANTIC_B0 ((struct hw_atl_s *)((void *)(self))) + +#define HAL_ATLANTIC_UTILS_CHIP_MIPS 0x00000001U +#define HAL_ATLANTIC_UTILS_CHIP_TPO2 0x00000002U +#define HAL_ATLANTIC_UTILS_CHIP_RPF2 0x00000004U +#define HAL_ATLANTIC_UTILS_CHIP_MPI_AQ 0x00000010U +#define HAL_ATLANTIC_UTILS_CHIP_REVISION_A0 0x01000000U +#define HAL_ATLANTIC_UTILS_CHIP_REVISION_B0 0x02000000U + +#define IS_CHIP_FEATURE(_F_) (HAL_ATLANTIC_UTILS_CHIP_##_F_ & \ + PHAL_ATLANTIC->chip_features) + +enum hal_atl_utils_fw_state_e { + MPI_DEINIT = 0, + MPI_RESET = 1, + MPI_INIT = 2, + MPI_POWER = 4, +}; + +#define HAL_ATLANTIC_RATE_10G BIT(0) +#define HAL_ATLANTIC_RATE_5G BIT(1) +#define HAL_ATLANTIC_RATE_5GSR BIT(2) +#define HAL_ATLANTIC_RATE_2GS BIT(3) +#define HAL_ATLANTIC_RATE_1G BIT(4) +#define HAL_ATLANTIC_RATE_100M BIT(5) +#define HAL_ATLANTIC_RATE_INVALID BIT(6) + +void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p); + +void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self, + struct hw_aq_atl_utils_mbox *pmbox); + +void hw_atl_utils_mpi_set(struct aq_hw_s *self, + enum hal_atl_utils_fw_state_e state, + u32 speed); + +int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed, + enum hal_atl_utils_fw_state_e state); + +int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self, + struct aq_hw_link_status_s *link_status); + +int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self, + struct aq_hw_caps_s *aq_hw_caps, + u8 *mac); + +unsigned int hw_atl_utils_mbps_2_speed_index(unsigned int mbps); + +int hw_atl_utils_hw_get_regs(struct aq_hw_s *self, + struct aq_hw_caps_s *aq_hw_caps, + u32 *regs_buff); + +int hw_atl_utils_hw_get_settings(struct aq_hw_s *self, + struct ethtool_cmd *cmd); + +int hw_atl_utils_hw_set_power(struct aq_hw_s *self, + unsigned int power_state); + +int hw_atl_utils_hw_deinit(struct aq_hw_s *self); + +int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version); + +int hw_atl_utils_get_hw_stats(struct aq_hw_s *self, + u64 *data, + unsigned int *p_count); + +#endif /* HW_ATL_UTILS_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/ver.h b/drivers/net/ethernet/aquantia/atlantic/ver.h new file mode 100644 index 000000000000..0de858d215c2 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/ver.h @@ -0,0 +1,18 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +#ifndef VER_H +#define VER_H + +#define NIC_MAJOR_DRIVER_VERSION 1 +#define NIC_MINOR_DRIVER_VERSION 5 +#define NIC_BUILD_DRIVER_VERSION 345 +#define NIC_REVISION_DRIVER_VERSION 0 + +#endif /* VER_H */ diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index abc9f2a59054..23873395f100 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c @@ -275,7 +275,7 @@ static int arc_emac_poll(struct napi_struct *napi, int budget) work_done = arc_emac_rx(ndev, budget); if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); arc_reg_or(priv, R_ENABLE, RXINT_MASK | TXINT_MASK); } diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index 7dcc907a449d..6a27c2662675 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -311,7 +311,7 @@ static int alx_poll(struct napi_struct *napi, int budget) if (!tx_complete || work == budget) return budget; - napi_complete(&np->napi); + napi_complete_done(&np->napi, work); /* enable interrupt */ if (alx->flags & ALX_FLAG_USING_MSIX) { @@ -1648,8 +1648,8 @@ static void alx_poll_controller(struct net_device *netdev) } #endif -static struct rtnl_link_stats64 *alx_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *net_stats) +static void alx_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *net_stats) { struct alx_priv *alx = netdev_priv(dev); struct alx_hw_stats *hw_stats = &alx->hw.stats; @@ -1693,8 +1693,6 @@ static struct rtnl_link_stats64 *alx_get_stats64(struct net_device *dev, net_stats->rx_packets = hw_stats->rx_ok + net_stats->rx_errors; spin_unlock(&alx->stats_lock); - - return net_stats; } static const struct net_device_ops alx_netdev_ops = { @@ -1823,6 +1821,7 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | + NETIF_F_RXCSUM | NETIF_F_TSO | NETIF_F_TSO6; diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index 773d3b7d8dd5..7e913d8331c3 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c @@ -1892,7 +1892,7 @@ static int atl1c_clean(struct napi_struct *napi, int budget) if (work_done < budget) { quit_polling: - napi_complete(napi); + napi_complete_done(napi, work_done); adapter->hw.intr_mask |= ISR_RX_PKT; AT_WRITE_REG(&adapter->hw, REG_IMR, adapter->hw.intr_mask); } diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c index e96091b652a7..4f7e195af0bc 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c @@ -1472,7 +1472,7 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que, prrs->vtag); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); } - netif_receive_skb(skb); + napi_gro_receive(&adapter->napi, skb); skip_pkt: /* skip current packet whether it's ok or not. */ @@ -1526,7 +1526,7 @@ static int atl1e_clean(struct napi_struct *napi, int budget) /* If no Tx and not enough Rx work done, exit the polling mode */ if (work_done < budget) { quit_polling: - napi_complete(napi); + napi_complete_done(napi, work_done); imr_data = AT_READ_REG(&adapter->hw, REG_IMR); AT_WRITE_REG(&adapter->hw, REG_IMR, imr_data | ISR_RX_EVENT); /* test debug */ diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c index 7dad8e4b9d2a..022772e1e249 100644 --- a/drivers/net/ethernet/atheros/atlx/atl1.c +++ b/drivers/net/ethernet/atheros/atlx/atl1.c @@ -2457,7 +2457,7 @@ static int atl1_rings_clean(struct napi_struct *napi, int budget) if (work_done >= budget) return work_done; - napi_complete(napi); + napi_complete_done(napi, work_done); /* re-enable Interrupt */ if (likely(adapter->int_enabled)) atlx_imr_set(adapter, IMR_NORMAL_MASK); diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c index 48707ed76ffc..5b95bb48ce97 100644 --- a/drivers/net/ethernet/broadcom/b44.c +++ b/drivers/net/ethernet/broadcom/b44.c @@ -902,7 +902,7 @@ static int b44_poll(struct napi_struct *napi, int budget) } if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); b44_enable_ints(bp); } @@ -1674,8 +1674,8 @@ static int b44_close(struct net_device *dev) return 0; } -static struct rtnl_link_stats64 *b44_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *nstat) +static void b44_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *nstat) { struct b44 *bp = netdev_priv(dev); struct b44_hw_stats *hwstat = &bp->hw_stats; @@ -1718,7 +1718,6 @@ static struct rtnl_link_stats64 *b44_get_stats64(struct net_device *dev, #endif } while (u64_stats_fetch_retry_irq(&hwstat->syncp, start)); - return nstat; } static int __b44_load_mcast(struct b44 *bp, struct net_device *dev) diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index c483618b57bd..50d88d3e03b6 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -511,7 +511,7 @@ static int bcm_enet_poll(struct napi_struct *napi, int budget) /* no more packet in rx/tx queue, remove device from poll * queue */ - napi_complete(napi); + napi_complete_done(napi, rx_work_done); /* restore rx/tx interrupt */ enet_dmac_writel(priv, priv->dma_chan_int_mask, @@ -817,7 +817,7 @@ static void bcm_enet_adjust_phy_link(struct net_device *dev) rx_pause_en = 1; tx_pause_en = 1; } else if (!priv->pause_auto) { - /* pause setting overrided by user */ + /* pause setting overridden by user */ rx_pause_en = priv->pause_rx; tx_pause_en = priv->pause_tx; } else { diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 744ed6ddaf37..a68d4889f5db 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -43,14 +43,43 @@ static inline void name##_writel(struct bcm_sysport_priv *priv, \ BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET); BCM_SYSPORT_IO_MACRO(intrl2_1, SYS_PORT_INTRL2_1_OFFSET); BCM_SYSPORT_IO_MACRO(umac, SYS_PORT_UMAC_OFFSET); +BCM_SYSPORT_IO_MACRO(gib, SYS_PORT_GIB_OFFSET); BCM_SYSPORT_IO_MACRO(tdma, SYS_PORT_TDMA_OFFSET); -BCM_SYSPORT_IO_MACRO(rdma, SYS_PORT_RDMA_OFFSET); BCM_SYSPORT_IO_MACRO(rxchk, SYS_PORT_RXCHK_OFFSET); BCM_SYSPORT_IO_MACRO(txchk, SYS_PORT_TXCHK_OFFSET); BCM_SYSPORT_IO_MACRO(rbuf, SYS_PORT_RBUF_OFFSET); BCM_SYSPORT_IO_MACRO(tbuf, SYS_PORT_TBUF_OFFSET); BCM_SYSPORT_IO_MACRO(topctrl, SYS_PORT_TOPCTRL_OFFSET); +/* On SYSTEMPORT Lite, any register after RDMA_STATUS has the exact + * same layout, except it has been moved by 4 bytes up, *sigh* + */ +static inline u32 rdma_readl(struct bcm_sysport_priv *priv, u32 off) +{ + if (priv->is_lite && off >= RDMA_STATUS) + off += 4; + return __raw_readl(priv->base + SYS_PORT_RDMA_OFFSET + off); +} + +static inline void rdma_writel(struct bcm_sysport_priv *priv, u32 val, u32 off) +{ + if (priv->is_lite && off >= RDMA_STATUS) + off += 4; + __raw_writel(val, priv->base + SYS_PORT_RDMA_OFFSET + off); +} + +static inline u32 tdma_control_bit(struct bcm_sysport_priv *priv, u32 bit) +{ + if (!priv->is_lite) { + return BIT(bit); + } else { + if (bit >= ACB_ALGO) + return BIT(bit + 1); + else + return BIT(bit); + } +} + /* L2-interrupt masking/unmasking helpers, does automatic saving of the applied * mask in a software copy to avoid CPU_MASK_STATUS reads in hot-paths. */ @@ -143,9 +172,9 @@ static int bcm_sysport_set_tx_csum(struct net_device *dev, priv->tsb_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)); reg = tdma_readl(priv, TDMA_CONTROL); if (priv->tsb_en) - reg |= TSB_EN; + reg |= tdma_control_bit(priv, TSB_EN); else - reg &= ~TSB_EN; + reg &= ~tdma_control_bit(priv, TSB_EN); tdma_writel(priv, reg, TDMA_CONTROL); return 0; @@ -281,11 +310,35 @@ static void bcm_sysport_set_msglvl(struct net_device *dev, u32 enable) priv->msg_enable = enable; } +static inline bool bcm_sysport_lite_stat_valid(enum bcm_sysport_stat_type type) +{ + switch (type) { + case BCM_SYSPORT_STAT_NETDEV: + case BCM_SYSPORT_STAT_RXCHK: + case BCM_SYSPORT_STAT_RBUF: + case BCM_SYSPORT_STAT_SOFT: + return true; + default: + return false; + } +} + static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set) { + struct bcm_sysport_priv *priv = netdev_priv(dev); + const struct bcm_sysport_stats *s; + unsigned int i, j; + switch (string_set) { case ETH_SS_STATS: - return BCM_SYSPORT_STATS_LEN; + for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) { + s = &bcm_sysport_gstrings_stats[i]; + if (priv->is_lite && + !bcm_sysport_lite_stat_valid(s->type)) + continue; + j++; + } + return j; default: return -EOPNOTSUPP; } @@ -294,14 +347,21 @@ static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set) static void bcm_sysport_get_strings(struct net_device *dev, u32 stringset, u8 *data) { - int i; + struct bcm_sysport_priv *priv = netdev_priv(dev); + const struct bcm_sysport_stats *s; + int i, j; switch (stringset) { case ETH_SS_STATS: - for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) { - memcpy(data + i * ETH_GSTRING_LEN, - bcm_sysport_gstrings_stats[i].stat_string, + for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) { + s = &bcm_sysport_gstrings_stats[i]; + if (priv->is_lite && + !bcm_sysport_lite_stat_valid(s->type)) + continue; + + memcpy(data + j * ETH_GSTRING_LEN, s->stat_string, ETH_GSTRING_LEN); + j++; } break; default: @@ -327,6 +387,9 @@ static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv) case BCM_SYSPORT_STAT_MIB_RX: case BCM_SYSPORT_STAT_MIB_TX: case BCM_SYSPORT_STAT_RUNT: + if (priv->is_lite) + continue; + if (s->type != BCM_SYSPORT_STAT_MIB_RX) offset = UMAC_MIB_STAT_OFFSET; val = umac_readl(priv, UMAC_MIB_START + j + offset); @@ -355,12 +418,12 @@ static void bcm_sysport_get_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct bcm_sysport_priv *priv = netdev_priv(dev); - int i; + int i, j; if (netif_running(dev)) bcm_sysport_update_mib_counters(priv); - for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) { + for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) { const struct bcm_sysport_stats *s; char *p; @@ -370,7 +433,8 @@ static void bcm_sysport_get_stats(struct net_device *dev, else p = (char *)priv; p += s->stat_offset; - data[i] = *(unsigned long *)p; + data[j] = *(unsigned long *)p; + j++; } } @@ -573,8 +637,14 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, u16 len, status; struct bcm_rsb *rsb; - /* Determine how much we should process since last call */ - p_index = rdma_readl(priv, RDMA_PROD_INDEX); + /* Determine how much we should process since last call, SYSTEMPORT Lite + * groups the producer and consumer indexes into the same 32-bit + * which we access using RDMA_CONS_INDEX + */ + if (!priv->is_lite) + p_index = rdma_readl(priv, RDMA_PROD_INDEX); + else + p_index = rdma_readl(priv, RDMA_CONS_INDEX); p_index &= RDMA_PROD_INDEX_MASK; if (p_index < priv->rx_c_index) @@ -791,7 +861,11 @@ static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget) if (work_done == 0) { napi_complete(napi); /* re-enable TX interrupt */ - intrl2_1_mask_clear(ring->priv, BIT(ring->index)); + if (!ring->priv->is_lite) + intrl2_1_mask_clear(ring->priv, BIT(ring->index)); + else + intrl2_0_mask_clear(ring->priv, BIT(ring->index + + INTRL2_0_TDMA_MBDONE_SHIFT)); return 0; } @@ -817,7 +891,15 @@ static int bcm_sysport_poll(struct napi_struct *napi, int budget) priv->rx_c_index += work_done; priv->rx_c_index &= RDMA_CONS_INDEX_MASK; - rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX); + + /* SYSTEMPORT Lite groups the producer/consumer index, producer is + * maintained by HW, but writes to it will be ignore while RDMA + * is active + */ + if (!priv->is_lite) + rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX); + else + rdma_writel(priv, priv->rx_c_index << 16, RDMA_CONS_INDEX); if (work_done < budget) { napi_complete_done(napi, work_done); @@ -848,6 +930,8 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id) { struct net_device *dev = dev_id; struct bcm_sysport_priv *priv = netdev_priv(dev); + struct bcm_sysport_tx_ring *txr; + unsigned int ring, ring_bit; priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) & ~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS); @@ -877,6 +961,22 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id) bcm_sysport_resume_from_wol(priv); } + if (!priv->is_lite) + goto out; + + for (ring = 0; ring < dev->num_tx_queues; ring++) { + ring_bit = BIT(ring + INTRL2_0_TDMA_MBDONE_SHIFT); + if (!(priv->irq0_stat & ring_bit)) + continue; + + txr = &priv->tx_rings[ring]; + + if (likely(napi_schedule_prep(&txr->napi))) { + intrl2_0_mask_set(priv, ring_bit); + __napi_schedule(&txr->napi); + } + } +out: return IRQ_HANDLED; } @@ -930,9 +1030,11 @@ static void bcm_sysport_poll_controller(struct net_device *dev) bcm_sysport_rx_isr(priv->irq0, priv); enable_irq(priv->irq0); - disable_irq(priv->irq1); - bcm_sysport_tx_isr(priv->irq1, priv); - enable_irq(priv->irq1); + if (!priv->is_lite) { + disable_irq(priv->irq1); + bcm_sysport_tx_isr(priv->irq1, priv); + enable_irq(priv->irq1); + } } #endif @@ -1129,6 +1231,9 @@ static void bcm_sysport_adj_link(struct net_device *dev) priv->old_duplex = phydev->duplex; } + if (priv->is_lite) + goto out; + switch (phydev->speed) { case SPEED_2500: cmd_bits = CMD_SPEED_2500; @@ -1169,8 +1274,9 @@ static void bcm_sysport_adj_link(struct net_device *dev) reg |= cmd_bits; umac_writel(priv, reg, UMAC_CMD); } - - phy_print_status(phydev); +out: + if (changed) + phy_print_status(phydev); } static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv, @@ -1315,9 +1421,9 @@ static inline int tdma_enable_set(struct bcm_sysport_priv *priv, reg = tdma_readl(priv, TDMA_CONTROL); if (enable) - reg |= TDMA_EN; + reg |= tdma_control_bit(priv, TDMA_EN); else - reg &= ~TDMA_EN; + reg &= ~tdma_control_bit(priv, TDMA_EN); tdma_writel(priv, reg, TDMA_CONTROL); /* Poll for TMDA disabling completion */ @@ -1342,7 +1448,7 @@ static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv) int i; /* Initialize SW view of the RX ring */ - priv->num_rx_bds = NUM_RX_DESC; + priv->num_rx_bds = priv->num_rx_desc_words / WORDS_PER_DESC; priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET; priv->rx_c_index = 0; priv->rx_read_ptr = 0; @@ -1379,7 +1485,7 @@ static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv) rdma_writel(priv, 0, RDMA_START_ADDR_HI); rdma_writel(priv, 0, RDMA_START_ADDR_LO); rdma_writel(priv, 0, RDMA_END_ADDR_HI); - rdma_writel(priv, NUM_HW_RX_DESC_WORDS - 1, RDMA_END_ADDR_LO); + rdma_writel(priv, priv->num_rx_desc_words - 1, RDMA_END_ADDR_LO); rdma_writel(priv, 1, RDMA_MBDONE_INTR); @@ -1421,6 +1527,9 @@ static void bcm_sysport_set_rx_mode(struct net_device *dev) struct bcm_sysport_priv *priv = netdev_priv(dev); u32 reg; + if (priv->is_lite) + return; + reg = umac_readl(priv, UMAC_CMD); if (dev->flags & IFF_PROMISC) reg |= CMD_PROMISC; @@ -1438,12 +1547,21 @@ static inline void umac_enable_set(struct bcm_sysport_priv *priv, { u32 reg; - reg = umac_readl(priv, UMAC_CMD); - if (enable) - reg |= mask; - else - reg &= ~mask; - umac_writel(priv, reg, UMAC_CMD); + if (!priv->is_lite) { + reg = umac_readl(priv, UMAC_CMD); + if (enable) + reg |= mask; + else + reg &= ~mask; + umac_writel(priv, reg, UMAC_CMD); + } else { + reg = gib_readl(priv, GIB_CONTROL); + if (enable) + reg |= mask; + else + reg &= ~mask; + gib_writel(priv, reg, GIB_CONTROL); + } /* UniMAC stops on a packet boundary, wait for a full-sized packet * to be processed (1 msec). @@ -1456,6 +1574,9 @@ static inline void umac_reset(struct bcm_sysport_priv *priv) { u32 reg; + if (priv->is_lite) + return; + reg = umac_readl(priv, UMAC_CMD); reg |= CMD_SW_RESET; umac_writel(priv, reg, UMAC_CMD); @@ -1468,9 +1589,17 @@ static inline void umac_reset(struct bcm_sysport_priv *priv) static void umac_set_hw_addr(struct bcm_sysport_priv *priv, unsigned char *addr) { - umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) | - (addr[2] << 8) | addr[3], UMAC_MAC0); - umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1); + u32 mac0 = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | + addr[3]; + u32 mac1 = (addr[4] << 8) | addr[5]; + + if (!priv->is_lite) { + umac_writel(priv, mac0, UMAC_MAC0); + umac_writel(priv, mac1, UMAC_MAC1); + } else { + gib_writel(priv, mac0, GIB_MAC0); + gib_writel(priv, mac1, GIB_MAC1); + } } static void topctrl_flush(struct bcm_sysport_priv *priv) @@ -1515,8 +1644,11 @@ static void bcm_sysport_netif_start(struct net_device *dev) phy_start(dev->phydev); - /* Enable TX interrupts for the 32 TXQs */ - intrl2_1_mask_clear(priv, 0xffffffff); + /* Enable TX interrupts for the TXQs */ + if (!priv->is_lite) + intrl2_1_mask_clear(priv, 0xffffffff); + else + intrl2_0_mask_clear(priv, INTRL2_0_TDMA_MBDONE_MASK); /* Last call before we start the real business */ netif_tx_start_all_queues(dev); @@ -1528,9 +1660,37 @@ static void rbuf_init(struct bcm_sysport_priv *priv) reg = rbuf_readl(priv, RBUF_CONTROL); reg |= RBUF_4B_ALGN | RBUF_RSB_EN; + /* Set a correct RSB format on SYSTEMPORT Lite */ + if (priv->is_lite) { + reg &= ~RBUF_RSB_SWAP1; + reg |= RBUF_RSB_SWAP0; + } rbuf_writel(priv, reg, RBUF_CONTROL); } +static inline void bcm_sysport_mask_all_intrs(struct bcm_sysport_priv *priv) +{ + intrl2_0_mask_set(priv, 0xffffffff); + intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); + if (!priv->is_lite) { + intrl2_1_mask_set(priv, 0xffffffff); + intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); + } +} + +static inline void gib_set_pad_extension(struct bcm_sysport_priv *priv) +{ + u32 __maybe_unused reg; + + /* Include Broadcom tag in pad extension */ + if (netdev_uses_dsa(priv->netdev)) { + reg = gib_readl(priv, GIB_CONTROL); + reg &= ~(GIB_PAD_EXTENSION_MASK << GIB_PAD_EXTENSION_SHIFT); + reg |= ENET_BRCM_TAG_LEN << GIB_PAD_EXTENSION_SHIFT; + gib_writel(priv, reg, GIB_CONTROL); + } +} + static int bcm_sysport_open(struct net_device *dev) { struct bcm_sysport_priv *priv = netdev_priv(dev); @@ -1551,13 +1711,20 @@ static int bcm_sysport_open(struct net_device *dev) rbuf_init(priv); /* Set maximum frame length */ - umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN); + if (!priv->is_lite) + umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN); + else + gib_set_pad_extension(priv); /* Set MAC address */ umac_set_hw_addr(priv, dev->dev_addr); /* Read CRC forward */ - priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD); + if (!priv->is_lite) + priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD); + else + priv->crc_fwd = !!(gib_readl(priv, GIB_CONTROL) & + GIB_FCS_STRIP); phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link, 0, priv->phy_interface); @@ -1572,12 +1739,7 @@ static int bcm_sysport_open(struct net_device *dev) priv->old_pause = -1; /* mask all interrupts and request them */ - intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET); - intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); - intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR); - intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET); - intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); - intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR); + bcm_sysport_mask_all_intrs(priv); ret = request_irq(priv->irq0, bcm_sysport_rx_isr, 0, dev->name, dev); if (ret) { @@ -1585,10 +1747,13 @@ static int bcm_sysport_open(struct net_device *dev) goto out_phy_disconnect; } - ret = request_irq(priv->irq1, bcm_sysport_tx_isr, 0, dev->name, dev); - if (ret) { - netdev_err(dev, "failed to request TX interrupt\n"); - goto out_free_irq0; + if (!priv->is_lite) { + ret = request_irq(priv->irq1, bcm_sysport_tx_isr, 0, + dev->name, dev); + if (ret) { + netdev_err(dev, "failed to request TX interrupt\n"); + goto out_free_irq0; + } } /* Initialize both hardware and software ring */ @@ -1635,7 +1800,8 @@ out_free_rx_ring: out_free_tx_ring: for (i = 0; i < dev->num_tx_queues; i++) bcm_sysport_fini_tx_ring(priv, i); - free_irq(priv->irq1, dev); + if (!priv->is_lite) + free_irq(priv->irq1, dev); out_free_irq0: free_irq(priv->irq0, dev); out_phy_disconnect: @@ -1653,10 +1819,7 @@ static void bcm_sysport_netif_stop(struct net_device *dev) phy_stop(dev->phydev); /* mask all interrupts */ - intrl2_0_mask_set(priv, 0xffffffff); - intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); - intrl2_1_mask_set(priv, 0xffffffff); - intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); + bcm_sysport_mask_all_intrs(priv); } static int bcm_sysport_stop(struct net_device *dev) @@ -1694,7 +1857,8 @@ static int bcm_sysport_stop(struct net_device *dev) bcm_sysport_fini_rx_ring(priv); free_irq(priv->irq0, dev); - free_irq(priv->irq1, dev); + if (!priv->is_lite) + free_irq(priv->irq1, dev); /* Disconnect from PHY */ phy_disconnect(dev->phydev); @@ -1733,8 +1897,32 @@ static const struct net_device_ops bcm_sysport_netdev_ops = { #define REV_FMT "v%2x.%02x" +static const struct bcm_sysport_hw_params bcm_sysport_params[] = { + [SYSTEMPORT] = { + .is_lite = false, + .num_rx_desc_words = SP_NUM_HW_RX_DESC_WORDS, + }, + [SYSTEMPORT_LITE] = { + .is_lite = true, + .num_rx_desc_words = SP_LT_NUM_HW_RX_DESC_WORDS, + }, +}; + +static const struct of_device_id bcm_sysport_of_match[] = { + { .compatible = "brcm,systemportlite-v1.00", + .data = &bcm_sysport_params[SYSTEMPORT_LITE] }, + { .compatible = "brcm,systemport-v1.00", + .data = &bcm_sysport_params[SYSTEMPORT] }, + { .compatible = "brcm,systemport", + .data = &bcm_sysport_params[SYSTEMPORT] }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, bcm_sysport_of_match); + static int bcm_sysport_probe(struct platform_device *pdev) { + const struct bcm_sysport_hw_params *params; + const struct of_device_id *of_id = NULL; struct bcm_sysport_priv *priv; struct device_node *dn; struct net_device *dev; @@ -1745,6 +1933,12 @@ static int bcm_sysport_probe(struct platform_device *pdev) dn = pdev->dev.of_node; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + of_id = of_match_node(bcm_sysport_of_match, dn); + if (!of_id || !of_id->data) + return -EINVAL; + + /* Fairly quickly we need to know the type of adapter we have */ + params = of_id->data; /* Read the Transmit/Receive Queue properties */ if (of_property_read_u32(dn, "systemport,num-txq", &txq)) @@ -1752,6 +1946,10 @@ static int bcm_sysport_probe(struct platform_device *pdev) if (of_property_read_u32(dn, "systemport,num-rxq", &rxq)) rxq = 1; + /* Sanity check the number of transmit queues */ + if (!txq || txq > TDMA_NUM_RINGS) + return -EINVAL; + dev = alloc_etherdev_mqs(sizeof(*priv), txq, rxq); if (!dev) return -ENOMEM; @@ -1759,10 +1957,21 @@ static int bcm_sysport_probe(struct platform_device *pdev) /* Initialize private members */ priv = netdev_priv(dev); + /* Allocate number of TX rings */ + priv->tx_rings = devm_kcalloc(&pdev->dev, txq, + sizeof(struct bcm_sysport_tx_ring), + GFP_KERNEL); + if (!priv->tx_rings) + return -ENOMEM; + + priv->is_lite = params->is_lite; + priv->num_rx_desc_words = params->num_rx_desc_words; + priv->irq0 = platform_get_irq(pdev, 0); - priv->irq1 = platform_get_irq(pdev, 1); + if (!priv->is_lite) + priv->irq1 = platform_get_irq(pdev, 1); priv->wol_irq = platform_get_irq(pdev, 2); - if (priv->irq0 <= 0 || priv->irq1 <= 0) { + if (priv->irq0 <= 0 || (priv->irq1 <= 0 && !priv->is_lite)) { dev_err(&pdev->dev, "invalid interrupts\n"); ret = -EINVAL; goto err_free_netdev; @@ -1836,8 +2045,9 @@ static int bcm_sysport_probe(struct platform_device *pdev) priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK; dev_info(&pdev->dev, - "Broadcom SYSTEMPORT" REV_FMT + "Broadcom SYSTEMPORT%s" REV_FMT " at 0x%p (irqs: %d, %d, TXQs: %d, RXQs: %d)\n", + priv->is_lite ? " Lite" : "", (priv->rev >> 8) & 0xff, priv->rev & 0xff, priv->base, priv->irq0, priv->irq1, txq, rxq); @@ -2033,7 +2243,10 @@ static int bcm_sysport_resume(struct device *d) rbuf_init(priv); /* Set maximum frame length */ - umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN); + if (!priv->is_lite) + umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN); + else + gib_set_pad_extension(priv); /* Set MAC address */ umac_set_hw_addr(priv, dev->dev_addr); @@ -2069,13 +2282,6 @@ out_free_tx_rings: static SIMPLE_DEV_PM_OPS(bcm_sysport_pm_ops, bcm_sysport_suspend, bcm_sysport_resume); -static const struct of_device_id bcm_sysport_of_match[] = { - { .compatible = "brcm,systemport-v1.00" }, - { .compatible = "brcm,systemport" }, - { /* sentinel */ } -}; -MODULE_DEVICE_TABLE(of, bcm_sysport_of_match); - static struct platform_driver bcm_sysport_driver = { .probe = bcm_sysport_probe, .remove = bcm_sysport_remove, diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h index 1c82e3da69a7..863ddd7870b7 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.h +++ b/drivers/net/ethernet/broadcom/bcmsysport.h @@ -127,6 +127,10 @@ struct bcm_rsb { #define INTRL2_0_DESC_ALLOC_ERR (1 << 10) #define INTRL2_0_UNEXP_PKTSIZE_ACK (1 << 11) +/* SYSTEMPORT Lite groups the TX queues interrupts on instance 0 */ +#define INTRL2_0_TDMA_MBDONE_SHIFT 12 +#define INTRL2_0_TDMA_MBDONE_MASK (0xffff << INTRL2_0_TDMA_MBDONE_SHIFT) + /* RXCHK offset and defines */ #define SYS_PORT_RXCHK_OFFSET 0x300 @@ -176,7 +180,9 @@ struct bcm_rsb { #define RBUF_OK_TO_SEND_MASK 0xff #define RBUF_CRC_REPLACE (1 << 20) #define RBUF_OK_TO_SEND_MODE (1 << 21) -#define RBUF_RSB_SWAP (1 << 22) +/* SYSTEMPORT Lite uses two bits here */ +#define RBUF_RSB_SWAP0 (1 << 22) +#define RBUF_RSB_SWAP1 (1 << 23) #define RBUF_ACPI_EN (1 << 23) #define RBUF_PKT_RDY_THRESH 0x04 @@ -247,6 +253,7 @@ struct bcm_rsb { #define MIB_RUNT_CNT_RST (1 << 1) #define MIB_TX_CNT_RST (1 << 2) +/* These offsets are valid for SYSTEMPORT and SYSTEMPORT Lite */ #define UMAC_MPD_CTRL 0x620 #define MPD_EN (1 << 0) #define MSEQ_LEN_SHIFT 16 @@ -258,6 +265,34 @@ struct bcm_rsb { #define UMAC_MDF_CTRL 0x650 #define UMAC_MDF_ADDR 0x654 +/* Only valid on SYSTEMPORT Lite */ +#define SYS_PORT_GIB_OFFSET 0x1000 + +#define GIB_CONTROL 0x00 +#define GIB_TX_EN (1 << 0) +#define GIB_RX_EN (1 << 1) +#define GIB_TX_FLUSH (1 << 2) +#define GIB_RX_FLUSH (1 << 3) +#define GIB_GTX_CLK_SEL_SHIFT 4 +#define GIB_GTX_CLK_EXT_CLK (0 << GIB_GTX_CLK_SEL_SHIFT) +#define GIB_GTX_CLK_125MHZ (1 << GIB_GTX_CLK_SEL_SHIFT) +#define GIB_GTX_CLK_250MHZ (2 << GIB_GTX_CLK_SEL_SHIFT) +#define GIB_FCS_STRIP (1 << 6) +#define GIB_LCL_LOOP_EN (1 << 7) +#define GIB_LCL_LOOP_TXEN (1 << 8) +#define GIB_RMT_LOOP_EN (1 << 9) +#define GIB_RMT_LOOP_RXEN (1 << 10) +#define GIB_RX_PAUSE_EN (1 << 11) +#define GIB_PREAMBLE_LEN_SHIFT 12 +#define GIB_PREAMBLE_LEN_MASK 0xf +#define GIB_IPG_LEN_SHIFT 16 +#define GIB_IPG_LEN_MASK 0x3f +#define GIB_PAD_EXTENSION_SHIFT 22 +#define GIB_PAD_EXTENSION_MASK 0x3f + +#define GIB_MAC1 0x08 +#define GIB_MAC0 0x0c + /* Receive DMA offset and defines */ #define SYS_PORT_RDMA_OFFSET 0x2000 @@ -409,16 +444,19 @@ struct bcm_rsb { RING_PCP_DEI_VID) #define TDMA_CONTROL 0x600 -#define TDMA_EN (1 << 0) -#define TSB_EN (1 << 1) -#define TSB_SWAP (1 << 2) -#define ACB_ALGO (1 << 3) +#define TDMA_EN 0 +#define TSB_EN 1 +/* Uses 2 bits on SYSTEMPORT Lite and shifts everything by 1 bit, we + * keep the SYSTEMPORT layout here and adjust with tdma_control_bit() + */ +#define TSB_SWAP 2 +#define ACB_ALGO 3 #define BUF_DATA_OFFSET_SHIFT 4 #define BUF_DATA_OFFSET_MASK 0x3ff -#define VLAN_EN (1 << 14) -#define SW_BRCM_TAG (1 << 15) -#define WNC_KPT_SIZE_UPDATE (1 << 16) -#define SYNC_PKT_SIZE (1 << 17) +#define VLAN_EN 14 +#define SW_BRCM_TAG 15 +#define WNC_KPT_SIZE_UPDATE 16 +#define SYNC_PKT_SIZE 17 #define ACH_TXDONE_DELAY_SHIFT 18 #define ACH_TXDONE_DELAY_MASK 0xff @@ -475,12 +513,12 @@ struct dma_desc { }; /* Number of Receive hardware descriptor words */ -#define NUM_HW_RX_DESC_WORDS 1024 -/* Real number of usable descriptors */ -#define NUM_RX_DESC (NUM_HW_RX_DESC_WORDS / WORDS_PER_DESC) +#define SP_NUM_HW_RX_DESC_WORDS 1024 +#define SP_LT_NUM_HW_RX_DESC_WORDS 256 -/* Internal linked-list RAM has up to 1536 entries */ -#define NUM_TX_DESC 1536 +/* Internal linked-list RAM size */ +#define SP_NUM_TX_DESC 1536 +#define SP_LT_NUM_TX_DESC 256 #define WORDS_PER_DESC (sizeof(struct dma_desc) / sizeof(u32)) @@ -627,6 +665,16 @@ struct bcm_sysport_cb { DEFINE_DMA_UNMAP_LEN(dma_len); }; +enum bcm_sysport_type { + SYSTEMPORT = 0, + SYSTEMPORT_LITE, +}; + +struct bcm_sysport_hw_params { + bool is_lite; + unsigned int num_rx_desc_words; +}; + /* Software view of the TX ring */ struct bcm_sysport_tx_ring { spinlock_t lock; /* Ring lock for tx reclaim/xmit */ @@ -651,6 +699,8 @@ struct bcm_sysport_priv { u32 irq0_mask; u32 irq1_stat; u32 irq1_mask; + bool is_lite; + unsigned int num_rx_desc_words; struct napi_struct napi ____cacheline_aligned; struct net_device *netdev; struct platform_device *pdev; @@ -659,7 +709,7 @@ struct bcm_sysport_priv { int wol_irq; /* Transmit rings */ - struct bcm_sysport_tx_ring tx_rings[TDMA_NUM_RINGS]; + struct bcm_sysport_tx_ring *tx_rings; /* Receive queue */ void __iomem *rx_bds; diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c b/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c index 7c19c8e2bf91..6ce80cbcb48e 100644 --- a/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c +++ b/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c @@ -12,11 +12,6 @@ #include <linux/brcmphy.h> #include "bgmac.h" -struct bcma_mdio { - struct bcma_device *core; - u8 phyaddr; -}; - static bool bcma_mdio_wait_value(struct bcma_device *core, u16 reg, u32 mask, u32 value, int timeout) { @@ -37,7 +32,7 @@ static bool bcma_mdio_wait_value(struct bcma_device *core, u16 reg, u32 mask, * PHY ops **************************************************/ -static u16 bcma_mdio_phy_read(struct bcma_mdio *bcma_mdio, u8 phyaddr, u8 reg) +static u16 bcma_mdio_phy_read(struct bgmac *bgmac, u8 phyaddr, u8 reg) { struct bcma_device *core; u16 phy_access_addr; @@ -56,12 +51,12 @@ static u16 bcma_mdio_phy_read(struct bcma_mdio *bcma_mdio, u8 phyaddr, u8 reg) BUILD_BUG_ON(BGMAC_PC_MCT_SHIFT != BCMA_GMAC_CMN_PC_MCT_SHIFT); BUILD_BUG_ON(BGMAC_PC_MTE != BCMA_GMAC_CMN_PC_MTE); - if (bcma_mdio->core->id.id == BCMA_CORE_4706_MAC_GBIT) { - core = bcma_mdio->core->bus->drv_gmac_cmn.core; + if (bgmac->bcma.core->id.id == BCMA_CORE_4706_MAC_GBIT) { + core = bgmac->bcma.core->bus->drv_gmac_cmn.core; phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS; phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL; } else { - core = bcma_mdio->core; + core = bgmac->bcma.core; phy_access_addr = BGMAC_PHY_ACCESS; phy_ctl_addr = BGMAC_PHY_CNTL; } @@ -87,7 +82,7 @@ static u16 bcma_mdio_phy_read(struct bcma_mdio *bcma_mdio, u8 phyaddr, u8 reg) } /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphywr */ -static int bcma_mdio_phy_write(struct bcma_mdio *bcma_mdio, u8 phyaddr, u8 reg, +static int bcma_mdio_phy_write(struct bgmac *bgmac, u8 phyaddr, u8 reg, u16 value) { struct bcma_device *core; @@ -95,12 +90,12 @@ static int bcma_mdio_phy_write(struct bcma_mdio *bcma_mdio, u8 phyaddr, u8 reg, u16 phy_ctl_addr; u32 tmp; - if (bcma_mdio->core->id.id == BCMA_CORE_4706_MAC_GBIT) { - core = bcma_mdio->core->bus->drv_gmac_cmn.core; + if (bgmac->bcma.core->id.id == BCMA_CORE_4706_MAC_GBIT) { + core = bgmac->bcma.core->bus->drv_gmac_cmn.core; phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS; phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL; } else { - core = bcma_mdio->core; + core = bgmac->bcma.core; phy_access_addr = BGMAC_PHY_ACCESS; phy_ctl_addr = BGMAC_PHY_CNTL; } @@ -110,8 +105,8 @@ static int bcma_mdio_phy_write(struct bcma_mdio *bcma_mdio, u8 phyaddr, u8 reg, tmp |= phyaddr; bcma_write32(core, phy_ctl_addr, tmp); - bcma_write32(bcma_mdio->core, BGMAC_INT_STATUS, BGMAC_IS_MDIO); - if (bcma_read32(bcma_mdio->core, BGMAC_INT_STATUS) & BGMAC_IS_MDIO) + bcma_write32(bgmac->bcma.core, BGMAC_INT_STATUS, BGMAC_IS_MDIO); + if (bcma_read32(bgmac->bcma.core, BGMAC_INT_STATUS) & BGMAC_IS_MDIO) dev_warn(&core->dev, "Error setting MDIO int\n"); tmp = BGMAC_PA_START; @@ -132,57 +127,67 @@ static int bcma_mdio_phy_write(struct bcma_mdio *bcma_mdio, u8 phyaddr, u8 reg, } /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyinit */ -static void bcma_mdio_phy_init(struct bcma_mdio *bcma_mdio) +static void bcma_mdio_phy_init(struct bgmac *bgmac) { - struct bcma_chipinfo *ci = &bcma_mdio->core->bus->chipinfo; + struct bcma_chipinfo *ci = &bgmac->bcma.core->bus->chipinfo; u8 i; + /* For some legacy hardware we do chipset-based PHY initialization here + * without even detecting PHY ID. It's hacky and should be cleaned as + * soon as someone can test it. + */ if (ci->id == BCMA_CHIP_ID_BCM5356) { for (i = 0; i < 5; i++) { - bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x008b); - bcma_mdio_phy_write(bcma_mdio, i, 0x15, 0x0100); - bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000f); - bcma_mdio_phy_write(bcma_mdio, i, 0x12, 0x2aaa); - bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000b); + bcma_mdio_phy_write(bgmac, i, 0x1f, 0x008b); + bcma_mdio_phy_write(bgmac, i, 0x15, 0x0100); + bcma_mdio_phy_write(bgmac, i, 0x1f, 0x000f); + bcma_mdio_phy_write(bgmac, i, 0x12, 0x2aaa); + bcma_mdio_phy_write(bgmac, i, 0x1f, 0x000b); } + return; } if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg != 10) || (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg != 10) || (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg != 9)) { - struct bcma_drv_cc *cc = &bcma_mdio->core->bus->drv_cc; + struct bcma_drv_cc *cc = &bgmac->bcma.core->bus->drv_cc; bcma_chipco_chipctl_maskset(cc, 2, ~0xc0000000, 0); bcma_chipco_chipctl_maskset(cc, 4, ~0x80000000, 0); for (i = 0; i < 5; i++) { - bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000f); - bcma_mdio_phy_write(bcma_mdio, i, 0x16, 0x5284); - bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000b); - bcma_mdio_phy_write(bcma_mdio, i, 0x17, 0x0010); - bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000f); - bcma_mdio_phy_write(bcma_mdio, i, 0x16, 0x5296); - bcma_mdio_phy_write(bcma_mdio, i, 0x17, 0x1073); - bcma_mdio_phy_write(bcma_mdio, i, 0x17, 0x9073); - bcma_mdio_phy_write(bcma_mdio, i, 0x16, 0x52b6); - bcma_mdio_phy_write(bcma_mdio, i, 0x17, 0x9273); - bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000b); + bcma_mdio_phy_write(bgmac, i, 0x1f, 0x000f); + bcma_mdio_phy_write(bgmac, i, 0x16, 0x5284); + bcma_mdio_phy_write(bgmac, i, 0x1f, 0x000b); + bcma_mdio_phy_write(bgmac, i, 0x17, 0x0010); + bcma_mdio_phy_write(bgmac, i, 0x1f, 0x000f); + bcma_mdio_phy_write(bgmac, i, 0x16, 0x5296); + bcma_mdio_phy_write(bgmac, i, 0x17, 0x1073); + bcma_mdio_phy_write(bgmac, i, 0x17, 0x9073); + bcma_mdio_phy_write(bgmac, i, 0x16, 0x52b6); + bcma_mdio_phy_write(bgmac, i, 0x17, 0x9273); + bcma_mdio_phy_write(bgmac, i, 0x1f, 0x000b); } + return; } + + /* For all other hw do initialization using PHY subsystem. */ + if (bgmac->net_dev && bgmac->net_dev->phydev) + phy_init_hw(bgmac->net_dev->phydev); } /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyreset */ static int bcma_mdio_phy_reset(struct mii_bus *bus) { - struct bcma_mdio *bcma_mdio = bus->priv; - u8 phyaddr = bcma_mdio->phyaddr; + struct bgmac *bgmac = bus->priv; + u8 phyaddr = bgmac->phyaddr; - if (bcma_mdio->phyaddr == BGMAC_PHY_NOREGS) + if (phyaddr == BGMAC_PHY_NOREGS) return 0; - bcma_mdio_phy_write(bcma_mdio, phyaddr, MII_BMCR, BMCR_RESET); + bcma_mdio_phy_write(bgmac, phyaddr, MII_BMCR, BMCR_RESET); udelay(100); - if (bcma_mdio_phy_read(bcma_mdio, phyaddr, MII_BMCR) & BMCR_RESET) - dev_err(&bcma_mdio->core->dev, "PHY reset failed\n"); - bcma_mdio_phy_init(bcma_mdio); + if (bcma_mdio_phy_read(bgmac, phyaddr, MII_BMCR) & BMCR_RESET) + dev_err(bgmac->dev, "PHY reset failed\n"); + bcma_mdio_phy_init(bgmac); return 0; } @@ -202,16 +207,12 @@ static int bcma_mdio_mii_write(struct mii_bus *bus, int mii_id, int regnum, return bcma_mdio_phy_write(bus->priv, mii_id, regnum, value); } -struct mii_bus *bcma_mdio_mii_register(struct bcma_device *core, u8 phyaddr) +struct mii_bus *bcma_mdio_mii_register(struct bgmac *bgmac) { - struct bcma_mdio *bcma_mdio; + struct bcma_device *core = bgmac->bcma.core; struct mii_bus *mii_bus; int err; - bcma_mdio = kzalloc(sizeof(*bcma_mdio), GFP_KERNEL); - if (!bcma_mdio) - return ERR_PTR(-ENOMEM); - mii_bus = mdiobus_alloc(); if (!mii_bus) { err = -ENOMEM; @@ -221,15 +222,12 @@ struct mii_bus *bcma_mdio_mii_register(struct bcma_device *core, u8 phyaddr) mii_bus->name = "bcma_mdio mii bus"; sprintf(mii_bus->id, "%s-%d-%d", "bcma_mdio", core->bus->num, core->core_unit); - mii_bus->priv = bcma_mdio; + mii_bus->priv = bgmac; mii_bus->read = bcma_mdio_mii_read; mii_bus->write = bcma_mdio_mii_write; mii_bus->reset = bcma_mdio_phy_reset; mii_bus->parent = &core->dev; - mii_bus->phy_mask = ~(1 << phyaddr); - - bcma_mdio->core = core; - bcma_mdio->phyaddr = phyaddr; + mii_bus->phy_mask = ~(1 << bgmac->phyaddr); err = mdiobus_register(mii_bus); if (err) { @@ -242,23 +240,17 @@ struct mii_bus *bcma_mdio_mii_register(struct bcma_device *core, u8 phyaddr) err_free_bus: mdiobus_free(mii_bus); err: - kfree(bcma_mdio); return ERR_PTR(err); } EXPORT_SYMBOL_GPL(bcma_mdio_mii_register); void bcma_mdio_mii_unregister(struct mii_bus *mii_bus) { - struct bcma_mdio *bcma_mdio; - if (!mii_bus) return; - bcma_mdio = mii_bus->priv; - mdiobus_unregister(mii_bus); mdiobus_free(mii_bus); - kfree(bcma_mdio); } EXPORT_SYMBOL_GPL(bcma_mdio_mii_unregister); diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c index 4a4ffc0c4c65..d59cfcc4c4d5 100644 --- a/drivers/net/ethernet/broadcom/bgmac-bcma.c +++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c @@ -117,12 +117,11 @@ static int bgmac_probe(struct bcma_device *core) u8 *mac; int err; - bgmac = kzalloc(sizeof(*bgmac), GFP_KERNEL); + bgmac = bgmac_alloc(&core->dev); if (!bgmac) return -ENOMEM; bgmac->bcma.core = core; - bgmac->dev = &core->dev; bgmac->dma_dev = core->dma_dev; bgmac->irq = core->irq; @@ -145,7 +144,7 @@ static int bgmac_probe(struct bcma_device *core) goto err; } - ether_addr_copy(bgmac->mac_addr, mac); + ether_addr_copy(bgmac->net_dev->dev_addr, mac); /* On BCM4706 we need common core to access PHY */ if (core->id.id == BCMA_CORE_4706_MAC_GBIT && @@ -178,7 +177,7 @@ static int bgmac_probe(struct bcma_device *core) if (!bgmac_is_bcm4707_family(core) && !(ci->id == BCMA_CHIP_ID_BCM53573 && core->core_unit == 1)) { - mii_bus = bcma_mdio_mii_register(core, bgmac->phyaddr); + mii_bus = bcma_mdio_mii_register(bgmac); if (IS_ERR(mii_bus)) { err = PTR_ERR(mii_bus); goto err; @@ -307,7 +306,6 @@ static int bgmac_probe(struct bcma_device *core) err1: bcma_mdio_mii_unregister(bgmac->mii_bus); err: - kfree(bgmac); bcma_set_drvdata(core, NULL); return err; diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c index 6f736c19872f..da1b8b225eb9 100644 --- a/drivers/net/ethernet/broadcom/bgmac-platform.c +++ b/drivers/net/ethernet/broadcom/bgmac-platform.c @@ -51,8 +51,7 @@ static void platform_bgmac_idm_write(struct bgmac *bgmac, u16 offset, u32 value) static bool platform_bgmac_clk_enabled(struct bgmac *bgmac) { - if ((bgmac_idm_read(bgmac, BCMA_IOCTL) & - (BCMA_IOCTL_CLK | BCMA_IOCTL_FGC)) != BCMA_IOCTL_CLK) + if ((bgmac_idm_read(bgmac, BCMA_IOCTL) & BGMAC_CLK_EN) != BGMAC_CLK_EN) return false; if (bgmac_idm_read(bgmac, BCMA_RESET_CTL) & BCMA_RESET_CTL_RESET) return false; @@ -61,15 +60,25 @@ static bool platform_bgmac_clk_enabled(struct bgmac *bgmac) static void platform_bgmac_clk_enable(struct bgmac *bgmac, u32 flags) { - bgmac_idm_write(bgmac, BCMA_IOCTL, - (BCMA_IOCTL_CLK | BCMA_IOCTL_FGC | flags)); - bgmac_idm_read(bgmac, BCMA_IOCTL); + u32 val; - bgmac_idm_write(bgmac, BCMA_RESET_CTL, 0); - bgmac_idm_read(bgmac, BCMA_RESET_CTL); - udelay(1); + /* The Reset Control register only contains a single bit to show if the + * controller is currently in reset. Do a sanity check here, just in + * case the bootloader happened to leave the device in reset. + */ + val = bgmac_idm_read(bgmac, BCMA_RESET_CTL); + if (val) { + bgmac_idm_write(bgmac, BCMA_RESET_CTL, 0); + bgmac_idm_read(bgmac, BCMA_RESET_CTL); + udelay(1); + } - bgmac_idm_write(bgmac, BCMA_IOCTL, (BCMA_IOCTL_CLK | flags)); + val = bgmac_idm_read(bgmac, BCMA_IOCTL); + /* Some bits of BCMA_IOCTL set by HW/ATF and should not change */ + val |= flags & ~(BGMAC_AWCACHE | BGMAC_ARCACHE | BGMAC_AWUSER | + BGMAC_ARUSER); + val |= BGMAC_CLK_EN; + bgmac_idm_write(bgmac, BCMA_IOCTL, val); bgmac_idm_read(bgmac, BCMA_IOCTL); udelay(1); } @@ -151,7 +160,7 @@ static int bgmac_probe(struct platform_device *pdev) struct resource *regs; const u8 *mac_addr; - bgmac = devm_kzalloc(&pdev->dev, sizeof(*bgmac), GFP_KERNEL); + bgmac = bgmac_alloc(&pdev->dev); if (!bgmac) return -ENOMEM; @@ -169,7 +178,7 @@ static int bgmac_probe(struct platform_device *pdev) mac_addr = of_get_mac_address(np); if (mac_addr) - ether_addr_copy(bgmac->mac_addr, mac_addr); + ether_addr_copy(bgmac->net_dev->dev_addr, mac_addr); else dev_warn(&pdev->dev, "MAC address not present in device tree\n"); diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 0e066dc6b8cc..fd66fca00e01 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -12,6 +12,8 @@ #include <linux/bcma/bcma.h> #include <linux/etherdevice.h> #include <linux/bcm47xx_nvram.h> +#include <linux/phy.h> +#include <linux/phy_fixed.h> #include "bgmac.h" static bool bgmac_wait_value(struct bgmac *bgmac, u16 reg, u32 mask, @@ -1148,7 +1150,7 @@ static int bgmac_poll(struct napi_struct *napi, int weight) return weight; if (handled < weight) { - napi_complete(napi); + napi_complete_done(napi, handled); bgmac_chip_intrs_on(bgmac); } @@ -1221,12 +1223,16 @@ static netdev_tx_t bgmac_start_xmit(struct sk_buff *skb, static int bgmac_set_mac_address(struct net_device *net_dev, void *addr) { struct bgmac *bgmac = netdev_priv(net_dev); + struct sockaddr *sa = addr; int ret; ret = eth_prepare_mac_addr_change(net_dev, addr); if (ret < 0) return ret; - bgmac_write_mac_address(bgmac, (u8 *)addr); + + ether_addr_copy(net_dev->dev_addr, sa->sa_data); + bgmac_write_mac_address(bgmac, net_dev->dev_addr); + eth_commit_mac_addr_change(net_dev, addr); return 0; } @@ -1446,33 +1452,42 @@ int bgmac_phy_connect_direct(struct bgmac *bgmac) } EXPORT_SYMBOL_GPL(bgmac_phy_connect_direct); -int bgmac_enet_probe(struct bgmac *info) +struct bgmac *bgmac_alloc(struct device *dev) { struct net_device *net_dev; struct bgmac *bgmac; - int err; /* Allocation and references */ - net_dev = alloc_etherdev(sizeof(*bgmac)); + net_dev = devm_alloc_etherdev(dev, sizeof(*bgmac)); if (!net_dev) - return -ENOMEM; + return NULL; net_dev->netdev_ops = &bgmac_netdev_ops; net_dev->ethtool_ops = &bgmac_ethtool_ops; + bgmac = netdev_priv(net_dev); - memcpy(bgmac, info, sizeof(*bgmac)); + bgmac->dev = dev; bgmac->net_dev = net_dev; + + return bgmac; +} +EXPORT_SYMBOL_GPL(bgmac_alloc); + +int bgmac_enet_probe(struct bgmac *bgmac) +{ + struct net_device *net_dev = bgmac->net_dev; + int err; + net_dev->irq = bgmac->irq; SET_NETDEV_DEV(net_dev, bgmac->dev); - if (!is_valid_ether_addr(bgmac->mac_addr)) { + if (!is_valid_ether_addr(net_dev->dev_addr)) { dev_err(bgmac->dev, "Invalid MAC addr: %pM\n", - bgmac->mac_addr); - eth_random_addr(bgmac->mac_addr); + net_dev->dev_addr); + eth_hw_addr_random(net_dev); dev_warn(bgmac->dev, "Using random MAC: %pM\n", - bgmac->mac_addr); + net_dev->dev_addr); } - ether_addr_copy(net_dev->dev_addr, bgmac->mac_addr); /* This (reset &) enable is not preset in specs or reference driver but * Broadcom does it in arch PCI code when enabling fake PCI device. @@ -1488,7 +1503,7 @@ int bgmac_enet_probe(struct bgmac *info) err = bgmac_dma_alloc(bgmac); if (err) { dev_err(bgmac->dev, "Unable to alloc memory for DMA\n"); - goto err_netdev_free; + goto err_out; } bgmac->int_mask = BGMAC_IS_ERRMASK | BGMAC_IS_RX | BGMAC_IS_TX_MASK; @@ -1521,8 +1536,7 @@ err_phy_disconnect: phy_disconnect(net_dev->phydev); err_dma_free: bgmac_dma_free(bgmac); -err_netdev_free: - free_netdev(net_dev); +err_out: return err; } diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h index 71f493f2451f..6d1c6ff1ed96 100644 --- a/drivers/net/ethernet/broadcom/bgmac.h +++ b/drivers/net/ethernet/broadcom/bgmac.h @@ -213,6 +213,22 @@ /* BCMA GMAC core specific IO Control (BCMA_IOCTL) flags */ #define BGMAC_BCMA_IOCTL_SW_CLKEN 0x00000004 /* PHY Clock Enable */ #define BGMAC_BCMA_IOCTL_SW_RESET 0x00000008 /* PHY Reset */ +/* The IOCTL values appear to be different in NS, NSP, and NS2, and do not match + * the values directly above + */ +#define BGMAC_CLK_EN BIT(0) +#define BGMAC_RESERVED_0 BIT(1) +#define BGMAC_SOURCE_SYNC_MODE_EN BIT(2) +#define BGMAC_DEST_SYNC_MODE_EN BIT(3) +#define BGMAC_TX_CLK_OUT_INVERT_EN BIT(4) +#define BGMAC_DIRECT_GMII_MODE BIT(5) +#define BGMAC_CLK_250_SEL BIT(6) +#define BGMAC_AWCACHE (0xf << 7) +#define BGMAC_RESERVED_1 (0x1f << 11) +#define BGMAC_ARCACHE (0xf << 16) +#define BGMAC_AWUSER (0x3f << 20) +#define BGMAC_ARUSER (0x3f << 26) +#define BGMAC_RESERVED BIT(31) /* BCMA GMAC core specific IO status (BCMA_IOST) flags */ #define BGMAC_BCMA_IOST_ATTACHED 0x00000800 @@ -474,7 +490,6 @@ struct bgmac { struct device *dev; struct device *dma_dev; - unsigned char mac_addr[ETH_ALEN]; u32 feature_flags; struct net_device *net_dev; @@ -517,12 +532,13 @@ struct bgmac { int (*phy_connect)(struct bgmac *bgmac); }; -int bgmac_enet_probe(struct bgmac *info); +struct bgmac *bgmac_alloc(struct device *dev); +int bgmac_enet_probe(struct bgmac *bgmac); void bgmac_enet_remove(struct bgmac *bgmac); void bgmac_adjust_link(struct net_device *net_dev); int bgmac_phy_connect_direct(struct bgmac *bgmac); -struct mii_bus *bcma_mdio_mii_register(struct bcma_device *core, u8 phyaddr); +struct mii_bus *bcma_mdio_mii_register(struct bgmac *bgmac); void bcma_mdio_mii_unregister(struct mii_bus *mii_bus); static inline u32 bgmac_read(struct bgmac *bgmac, u16 offset) diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index d5d1026be4b7..e3af1f3cb61f 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -3515,7 +3515,7 @@ static int bnx2_poll_msix(struct napi_struct *napi, int budget) rmb(); if (likely(!bnx2_has_fast_work(bnapi))) { - napi_complete(napi); + napi_complete_done(napi, work_done); BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num | BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bnapi->last_status_idx); @@ -3552,7 +3552,7 @@ static int bnx2_poll(struct napi_struct *napi, int budget) rmb(); if (likely(!bnx2_has_work(bnapi))) { - napi_complete(napi); + napi_complete_done(napi, work_done); if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) { BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | @@ -6821,13 +6821,13 @@ bnx2_save_stats(struct bnx2 *bp) (unsigned long) (bp->stats_blk->ctr + \ bp->temp_stats_blk->ctr) -static struct rtnl_link_stats64 * +static void bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats) { struct bnx2 *bp = netdev_priv(dev); if (bp->stats_blk == NULL) - return net_stats; + return; net_stats->rx_packets = GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) + @@ -6891,7 +6891,6 @@ bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats) GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) + GET_32BIT_NET_STATS(stat_FwRxDrop); - return net_stats; } /* All ethtool functions called with rtnl_lock */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 3e199d3e461e..9e8c06130c09 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -549,14 +549,7 @@ static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp, struct bnx2x_alloc_pool *pool = &fp->page_pool; dma_addr_t mapping; - if (!pool->page || (PAGE_SIZE - pool->offset) < SGE_PAGE_SIZE) { - - /* put page reference used by the memory pool, since we - * won't be using this page as the mempool anymore. - */ - if (pool->page) - put_page(pool->page); - + if (!pool->page) { pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT); if (unlikely(!pool->page)) return -ENOMEM; @@ -571,7 +564,6 @@ static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp, return -ENOMEM; } - get_page(pool->page); sw_buf->page = pool->page; sw_buf->offset = pool->offset; @@ -581,7 +573,10 @@ static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp, sge->addr_lo = cpu_to_le32(U64_LO(mapping)); pool->offset += SGE_PAGE_SIZE; - + if (PAGE_SIZE - pool->offset >= SGE_PAGE_SIZE) + get_page(pool->page); + else + pool->page = NULL; return 0; } @@ -3229,7 +3224,7 @@ static int bnx2x_poll(struct napi_struct *napi, int budget) * has been updated when NAPI was scheduled. */ if (IS_FCOE_FP(fp)) { - napi_complete(napi); + napi_complete_done(napi, rx_work_done); } else { bnx2x_update_fpsb_idx(fp); /* bnx2x_has_rx_work() reads the status block, diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index 5f19427c7b27..43423744fdfa 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -216,165 +216,184 @@ static int bnx2x_get_port_type(struct bnx2x *bp) return port_type; } -static int bnx2x_get_vf_settings(struct net_device *dev, - struct ethtool_cmd *cmd) +static int bnx2x_get_vf_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct bnx2x *bp = netdev_priv(dev); + u32 supported, advertising; + + ethtool_convert_link_mode_to_legacy_u32(&supported, + cmd->link_modes.supported); + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); if (bp->state == BNX2X_STATE_OPEN) { if (test_bit(BNX2X_LINK_REPORT_FD, &bp->vf_link_vars.link_report_flags)) - cmd->duplex = DUPLEX_FULL; + cmd->base.duplex = DUPLEX_FULL; else - cmd->duplex = DUPLEX_HALF; + cmd->base.duplex = DUPLEX_HALF; - ethtool_cmd_speed_set(cmd, bp->vf_link_vars.line_speed); + cmd->base.speed = bp->vf_link_vars.line_speed; } else { - cmd->duplex = DUPLEX_UNKNOWN; - ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); + cmd->base.duplex = DUPLEX_UNKNOWN; + cmd->base.speed = SPEED_UNKNOWN; } - cmd->port = PORT_OTHER; - cmd->phy_address = 0; - cmd->transceiver = XCVR_INTERNAL; - cmd->autoneg = AUTONEG_DISABLE; - cmd->maxtxpkt = 0; - cmd->maxrxpkt = 0; + cmd->base.port = PORT_OTHER; + cmd->base.phy_address = 0; + cmd->base.autoneg = AUTONEG_DISABLE; DP(BNX2X_MSG_ETHTOOL, "ethtool_cmd: cmd %d\n" " supported 0x%x advertising 0x%x speed %u\n" - " duplex %d port %d phy_address %d transceiver %d\n" - " autoneg %d maxtxpkt %d maxrxpkt %d\n", - cmd->cmd, cmd->supported, cmd->advertising, - ethtool_cmd_speed(cmd), - cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver, - cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt); + " duplex %d port %d phy_address %d\n" + " autoneg %d\n", + cmd->base.cmd, supported, advertising, + cmd->base.speed, + cmd->base.duplex, cmd->base.port, cmd->base.phy_address, + cmd->base.autoneg); return 0; } -static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int bnx2x_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct bnx2x *bp = netdev_priv(dev); int cfg_idx = bnx2x_get_link_cfg_idx(bp); u32 media_type; + u32 supported, advertising, lp_advertising; + + ethtool_convert_link_mode_to_legacy_u32(&lp_advertising, + cmd->link_modes.lp_advertising); /* Dual Media boards present all available port types */ - cmd->supported = bp->port.supported[cfg_idx] | + supported = bp->port.supported[cfg_idx] | (bp->port.supported[cfg_idx ^ 1] & (SUPPORTED_TP | SUPPORTED_FIBRE)); - cmd->advertising = bp->port.advertising[cfg_idx]; + advertising = bp->port.advertising[cfg_idx]; media_type = bp->link_params.phy[bnx2x_get_cur_phy_idx(bp)].media_type; if (media_type == ETH_PHY_SFP_1G_FIBER) { - cmd->supported &= ~(SUPPORTED_10000baseT_Full); - cmd->advertising &= ~(ADVERTISED_10000baseT_Full); + supported &= ~(SUPPORTED_10000baseT_Full); + advertising &= ~(ADVERTISED_10000baseT_Full); } if ((bp->state == BNX2X_STATE_OPEN) && bp->link_vars.link_up && !(bp->flags & MF_FUNC_DIS)) { - cmd->duplex = bp->link_vars.duplex; + cmd->base.duplex = bp->link_vars.duplex; if (IS_MF(bp) && !BP_NOMCP(bp)) - ethtool_cmd_speed_set(cmd, bnx2x_get_mf_speed(bp)); + cmd->base.speed = bnx2x_get_mf_speed(bp); else - ethtool_cmd_speed_set(cmd, bp->link_vars.line_speed); + cmd->base.speed = bp->link_vars.line_speed; } else { - cmd->duplex = DUPLEX_UNKNOWN; - ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); + cmd->base.duplex = DUPLEX_UNKNOWN; + cmd->base.speed = SPEED_UNKNOWN; } - cmd->port = bnx2x_get_port_type(bp); + cmd->base.port = bnx2x_get_port_type(bp); - cmd->phy_address = bp->mdio.prtad; - cmd->transceiver = XCVR_INTERNAL; + cmd->base.phy_address = bp->mdio.prtad; if (bp->link_params.req_line_speed[cfg_idx] == SPEED_AUTO_NEG) - cmd->autoneg = AUTONEG_ENABLE; + cmd->base.autoneg = AUTONEG_ENABLE; else - cmd->autoneg = AUTONEG_DISABLE; + cmd->base.autoneg = AUTONEG_DISABLE; /* Publish LP advertised speeds and FC */ if (bp->link_vars.link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) { u32 status = bp->link_vars.link_status; - cmd->lp_advertising |= ADVERTISED_Autoneg; + lp_advertising |= ADVERTISED_Autoneg; if (status & LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE) - cmd->lp_advertising |= ADVERTISED_Pause; + lp_advertising |= ADVERTISED_Pause; if (status & LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE) - cmd->lp_advertising |= ADVERTISED_Asym_Pause; + lp_advertising |= ADVERTISED_Asym_Pause; if (status & LINK_STATUS_LINK_PARTNER_10THD_CAPABLE) - cmd->lp_advertising |= ADVERTISED_10baseT_Half; + lp_advertising |= ADVERTISED_10baseT_Half; if (status & LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE) - cmd->lp_advertising |= ADVERTISED_10baseT_Full; + lp_advertising |= ADVERTISED_10baseT_Full; if (status & LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE) - cmd->lp_advertising |= ADVERTISED_100baseT_Half; + lp_advertising |= ADVERTISED_100baseT_Half; if (status & LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE) - cmd->lp_advertising |= ADVERTISED_100baseT_Full; + lp_advertising |= ADVERTISED_100baseT_Full; if (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) - cmd->lp_advertising |= ADVERTISED_1000baseT_Half; + lp_advertising |= ADVERTISED_1000baseT_Half; if (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) { if (media_type == ETH_PHY_KR) { - cmd->lp_advertising |= + lp_advertising |= ADVERTISED_1000baseKX_Full; } else { - cmd->lp_advertising |= + lp_advertising |= ADVERTISED_1000baseT_Full; } } if (status & LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE) - cmd->lp_advertising |= ADVERTISED_2500baseX_Full; + lp_advertising |= ADVERTISED_2500baseX_Full; if (status & LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE) { if (media_type == ETH_PHY_KR) { - cmd->lp_advertising |= + lp_advertising |= ADVERTISED_10000baseKR_Full; } else { - cmd->lp_advertising |= + lp_advertising |= ADVERTISED_10000baseT_Full; } } if (status & LINK_STATUS_LINK_PARTNER_20GXFD_CAPABLE) - cmd->lp_advertising |= ADVERTISED_20000baseKR2_Full; + lp_advertising |= ADVERTISED_20000baseKR2_Full; } - cmd->maxtxpkt = 0; - cmd->maxrxpkt = 0; + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising, + lp_advertising); DP(BNX2X_MSG_ETHTOOL, "ethtool_cmd: cmd %d\n" " supported 0x%x advertising 0x%x speed %u\n" - " duplex %d port %d phy_address %d transceiver %d\n" - " autoneg %d maxtxpkt %d maxrxpkt %d\n", - cmd->cmd, cmd->supported, cmd->advertising, - ethtool_cmd_speed(cmd), - cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver, - cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt); + " duplex %d port %d phy_address %d\n" + " autoneg %d\n", + cmd->base.cmd, supported, advertising, + cmd->base.speed, + cmd->base.duplex, cmd->base.port, cmd->base.phy_address, + cmd->base.autoneg); return 0; } -static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int bnx2x_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct bnx2x *bp = netdev_priv(dev); u32 advertising, cfg_idx, old_multi_phy_config, new_multi_phy_config; u32 speed, phy_idx; + u32 supported; + u8 duplex = cmd->base.duplex; + + ethtool_convert_link_mode_to_legacy_u32(&supported, + cmd->link_modes.supported); + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); if (IS_MF_SD(bp)) return 0; DP(BNX2X_MSG_ETHTOOL, "ethtool_cmd: cmd %d\n" " supported 0x%x advertising 0x%x speed %u\n" - " duplex %d port %d phy_address %d transceiver %d\n" - " autoneg %d maxtxpkt %d maxrxpkt %d\n", - cmd->cmd, cmd->supported, cmd->advertising, - ethtool_cmd_speed(cmd), - cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver, - cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt); + " duplex %d port %d phy_address %d\n" + " autoneg %d\n", + cmd->base.cmd, supported, advertising, + cmd->base.speed, + cmd->base.duplex, cmd->base.port, cmd->base.phy_address, + cmd->base.autoneg); - speed = ethtool_cmd_speed(cmd); + speed = cmd->base.speed; /* If received a request for an unknown duplex, assume full*/ - if (cmd->duplex == DUPLEX_UNKNOWN) - cmd->duplex = DUPLEX_FULL; + if (duplex == DUPLEX_UNKNOWN) + duplex = DUPLEX_FULL; if (IS_MF_SI(bp)) { u32 part; @@ -410,8 +429,8 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) cfg_idx = bnx2x_get_link_cfg_idx(bp); old_multi_phy_config = bp->link_params.multi_phy_config; - if (cmd->port != bnx2x_get_port_type(bp)) { - switch (cmd->port) { + if (cmd->base.port != bnx2x_get_port_type(bp)) { + switch (cmd->base.port) { case PORT_TP: if (!(bp->port.supported[0] & SUPPORTED_TP || bp->port.supported[1] & SUPPORTED_TP)) { @@ -461,7 +480,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) bp->link_params.multi_phy_config = old_multi_phy_config; DP(BNX2X_MSG_ETHTOOL, "cfg_idx = %x\n", cfg_idx); - if (cmd->autoneg == AUTONEG_ENABLE) { + if (cmd->base.autoneg == AUTONEG_ENABLE) { u32 an_supported_speed = bp->port.supported[cfg_idx]; if (bp->link_params.phy[EXT_PHY1].type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) @@ -473,51 +492,51 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) } /* advertise the requested speed and duplex if supported */ - if (cmd->advertising & ~an_supported_speed) { + if (advertising & ~an_supported_speed) { DP(BNX2X_MSG_ETHTOOL, "Advertisement parameters are not supported\n"); return -EINVAL; } bp->link_params.req_line_speed[cfg_idx] = SPEED_AUTO_NEG; - bp->link_params.req_duplex[cfg_idx] = cmd->duplex; + bp->link_params.req_duplex[cfg_idx] = duplex; bp->port.advertising[cfg_idx] = (ADVERTISED_Autoneg | - cmd->advertising); - if (cmd->advertising) { + advertising); + if (advertising) { bp->link_params.speed_cap_mask[cfg_idx] = 0; - if (cmd->advertising & ADVERTISED_10baseT_Half) { + if (advertising & ADVERTISED_10baseT_Half) { bp->link_params.speed_cap_mask[cfg_idx] |= PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF; } - if (cmd->advertising & ADVERTISED_10baseT_Full) + if (advertising & ADVERTISED_10baseT_Full) bp->link_params.speed_cap_mask[cfg_idx] |= PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL; - if (cmd->advertising & ADVERTISED_100baseT_Full) + if (advertising & ADVERTISED_100baseT_Full) bp->link_params.speed_cap_mask[cfg_idx] |= PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL; - if (cmd->advertising & ADVERTISED_100baseT_Half) { + if (advertising & ADVERTISED_100baseT_Half) { bp->link_params.speed_cap_mask[cfg_idx] |= PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF; } - if (cmd->advertising & ADVERTISED_1000baseT_Half) { + if (advertising & ADVERTISED_1000baseT_Half) { bp->link_params.speed_cap_mask[cfg_idx] |= PORT_HW_CFG_SPEED_CAPABILITY_D0_1G; } - if (cmd->advertising & (ADVERTISED_1000baseT_Full | + if (advertising & (ADVERTISED_1000baseT_Full | ADVERTISED_1000baseKX_Full)) bp->link_params.speed_cap_mask[cfg_idx] |= PORT_HW_CFG_SPEED_CAPABILITY_D0_1G; - if (cmd->advertising & (ADVERTISED_10000baseT_Full | + if (advertising & (ADVERTISED_10000baseT_Full | ADVERTISED_10000baseKX4_Full | ADVERTISED_10000baseKR_Full)) bp->link_params.speed_cap_mask[cfg_idx] |= PORT_HW_CFG_SPEED_CAPABILITY_D0_10G; - if (cmd->advertising & ADVERTISED_20000baseKR2_Full) + if (advertising & ADVERTISED_20000baseKR2_Full) bp->link_params.speed_cap_mask[cfg_idx] |= PORT_HW_CFG_SPEED_CAPABILITY_D0_20G; } @@ -525,7 +544,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) /* advertise the requested speed and duplex if supported */ switch (speed) { case SPEED_10: - if (cmd->duplex == DUPLEX_FULL) { + if (duplex == DUPLEX_FULL) { if (!(bp->port.supported[cfg_idx] & SUPPORTED_10baseT_Full)) { DP(BNX2X_MSG_ETHTOOL, @@ -549,7 +568,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) break; case SPEED_100: - if (cmd->duplex == DUPLEX_FULL) { + if (duplex == DUPLEX_FULL) { if (!(bp->port.supported[cfg_idx] & SUPPORTED_100baseT_Full)) { DP(BNX2X_MSG_ETHTOOL, @@ -573,7 +592,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) break; case SPEED_1000: - if (cmd->duplex != DUPLEX_FULL) { + if (duplex != DUPLEX_FULL) { DP(BNX2X_MSG_ETHTOOL, "1G half not supported\n"); return -EINVAL; @@ -596,7 +615,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) break; case SPEED_2500: - if (cmd->duplex != DUPLEX_FULL) { + if (duplex != DUPLEX_FULL) { DP(BNX2X_MSG_ETHTOOL, "2.5G half not supported\n"); return -EINVAL; @@ -614,7 +633,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) break; case SPEED_10000: - if (cmd->duplex != DUPLEX_FULL) { + if (duplex != DUPLEX_FULL) { DP(BNX2X_MSG_ETHTOOL, "10G half not supported\n"); return -EINVAL; @@ -644,7 +663,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) } bp->link_params.req_line_speed[cfg_idx] = speed; - bp->link_params.req_duplex[cfg_idx] = cmd->duplex; + bp->link_params.req_duplex[cfg_idx] = duplex; bp->port.advertising[cfg_idx] = advertising; } @@ -3605,8 +3624,6 @@ static int bnx2x_get_ts_info(struct net_device *dev, } static const struct ethtool_ops bnx2x_ethtool_ops = { - .get_settings = bnx2x_get_settings, - .set_settings = bnx2x_set_settings, .get_drvinfo = bnx2x_get_drvinfo, .get_regs_len = bnx2x_get_regs_len, .get_regs = bnx2x_get_regs, @@ -3646,10 +3663,11 @@ static const struct ethtool_ops bnx2x_ethtool_ops = { .get_eee = bnx2x_get_eee, .set_eee = bnx2x_set_eee, .get_ts_info = bnx2x_get_ts_info, + .get_link_ksettings = bnx2x_get_link_ksettings, + .set_link_ksettings = bnx2x_set_link_ksettings, }; static const struct ethtool_ops bnx2x_vf_ethtool_ops = { - .get_settings = bnx2x_get_vf_settings, .get_drvinfo = bnx2x_get_drvinfo, .get_msglevel = bnx2x_get_msglevel, .set_msglevel = bnx2x_set_msglevel, @@ -3667,6 +3685,7 @@ static const struct ethtool_ops bnx2x_vf_ethtool_ops = { .set_rxfh = bnx2x_set_rxfh, .get_channels = bnx2x_get_channels, .set_channels = bnx2x_set_channels, + .get_link_ksettings = bnx2x_get_vf_link_ksettings, }; void bnx2x_set_ethtool_ops(struct bnx2x *bp, struct net_device *netdev) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index 05356efdbf93..b209b7f6093e 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -6957,7 +6957,7 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars) * hence its link is expected to be down * - SECOND_PHY means that first phy should not be able * to link up by itself (using configuration) - * - DEFAULT should be overriden during initialiazation + * - DEFAULT should be overridden during initialization */ DP(NETIF_MSG_LINK, "Invalid link indication" "mpc=0x%x. DISABLING LINK !!!\n", diff --git a/drivers/net/ethernet/broadcom/bnxt/Makefile b/drivers/net/ethernet/broadcom/bnxt/Makefile index 6082ed1b5ea0..a7ca45b251cb 100644 --- a/drivers/net/ethernet/broadcom/bnxt/Makefile +++ b/drivers/net/ethernet/broadcom/bnxt/Makefile @@ -1,3 +1,3 @@ obj-$(CONFIG_BNXT) += bnxt_en.o -bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o +bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 4fcc6a84a087..235733e91c79 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1,6 +1,7 @@ /* Broadcom NetXtreme-C/E network driver. * * Copyright (c) 2014-2016 Broadcom Corporation + * Copyright (c) 2016-2017 Broadcom Limited * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -33,15 +34,13 @@ #include <linux/if.h> #include <linux/if_vlan.h> #include <linux/rtc.h> +#include <linux/bpf.h> #include <net/ip.h> #include <net/tcp.h> #include <net/udp.h> #include <net/checksum.h> #include <net/ip6_checksum.h> #include <net/udp_tunnel.h> -#ifdef CONFIG_NET_RX_BUSY_POLL -#include <net/busy_poll.h> -#endif #include <linux/workqueue.h> #include <linux/prefetch.h> #include <linux/cache.h> @@ -56,6 +55,7 @@ #include "bnxt_sriov.h" #include "bnxt_ethtool.h" #include "bnxt_dcb.h" +#include "bnxt_xdp.h" #define BNXT_TX_TIMEOUT (5 * HZ) @@ -99,6 +99,8 @@ enum board_idx { BCM57407_NPAR, BCM57414_NPAR, BCM57416_NPAR, + BCM57452, + BCM57454, NETXTREME_E_VF, NETXTREME_C_VF, }; @@ -133,6 +135,8 @@ static const struct { { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" }, { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" }, { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" }, + { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" }, + { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, { "Broadcom NetXtreme-E Ethernet Virtual Function" }, { "Broadcom NetXtreme-C Ethernet Virtual Function" }, }; @@ -168,6 +172,8 @@ static const struct pci_device_id bnxt_pci_tbl[] = { { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR }, { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR }, { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR }, + { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 }, + { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 }, #ifdef CONFIG_BNXT_SRIOV { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF }, { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF }, @@ -213,16 +219,7 @@ static bool bnxt_vf_pciid(enum board_idx idx) #define BNXT_CP_DB_IRQ_DIS(db) \ writel(DB_CP_IRQ_DIS_FLAGS, db) -static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr) -{ - /* Tell compiler to fetch tx indices from memory. */ - barrier(); - - return bp->tx_ring_size - - ((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask); -} - -static const u16 bnxt_lhint_arr[] = { +const u16 bnxt_lhint_arr[] = { TX_BD_FLAGS_LHINT_512_AND_SMALLER, TX_BD_FLAGS_LHINT_512_TO_1023, TX_BD_FLAGS_LHINT_1024_TO_2047, @@ -265,8 +262,8 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } - txr = &bp->tx_ring[i]; txq = netdev_get_tx_queue(dev, i); + txr = &bp->tx_ring[bp->tx_ring_map[i]]; prod = txr->tx_prod; free_size = bnxt_tx_avail(bp, txr); @@ -512,8 +509,7 @@ tx_dma_error: static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts) { struct bnxt_tx_ring_info *txr = bnapi->tx_ring; - int index = txr - &bp->tx_ring[0]; - struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, index); + struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index); u16 cons = txr->tx_cons; struct pci_dev *pdev = bp->pdev; int i; @@ -576,6 +572,25 @@ next_tx_int: } } +static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping, + gfp_t gfp) +{ + struct device *dev = &bp->pdev->dev; + struct page *page; + + page = alloc_page(gfp); + if (!page) + return NULL; + + *mapping = dma_map_page(dev, page, 0, PAGE_SIZE, bp->rx_dir); + if (dma_mapping_error(dev, *mapping)) { + __free_page(page); + return NULL; + } + *mapping += bp->rx_dma_offset; + return page; +} + static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping, gfp_t gfp) { @@ -586,8 +601,8 @@ static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping, if (!data) return NULL; - *mapping = dma_map_single(&pdev->dev, data + BNXT_RX_DMA_OFFSET, - bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); + *mapping = dma_map_single(&pdev->dev, data + bp->rx_dma_offset, + bp->rx_buf_use_size, bp->rx_dir); if (dma_mapping_error(&pdev->dev, *mapping)) { kfree(data); @@ -596,29 +611,37 @@ static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping, return data; } -static inline int bnxt_alloc_rx_data(struct bnxt *bp, - struct bnxt_rx_ring_info *rxr, - u16 prod, gfp_t gfp) +int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, + u16 prod, gfp_t gfp) { struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod]; - u8 *data; dma_addr_t mapping; - data = __bnxt_alloc_rx_data(bp, &mapping, gfp); - if (!data) - return -ENOMEM; + if (BNXT_RX_PAGE_MODE(bp)) { + struct page *page = __bnxt_alloc_rx_page(bp, &mapping, gfp); - rx_buf->data = data; - dma_unmap_addr_set(rx_buf, mapping, mapping); + if (!page) + return -ENOMEM; - rxbd->rx_bd_haddr = cpu_to_le64(mapping); + rx_buf->data = page; + rx_buf->data_ptr = page_address(page) + bp->rx_offset; + } else { + u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp); + if (!data) + return -ENOMEM; + + rx_buf->data = data; + rx_buf->data_ptr = data + bp->rx_offset; + } + rx_buf->mapping = mapping; + + rxbd->rx_bd_haddr = cpu_to_le64(mapping); return 0; } -static void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, - u8 *data) +void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data) { u16 prod = rxr->rx_prod; struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf; @@ -628,9 +651,9 @@ static void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, cons_rx_buf = &rxr->rx_buf_ring[cons]; prod_rx_buf->data = data; + prod_rx_buf->data_ptr = cons_rx_buf->data_ptr; - dma_unmap_addr_set(prod_rx_buf, mapping, - dma_unmap_addr(cons_rx_buf, mapping)); + prod_rx_buf->mapping = cons_rx_buf->mapping; prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)]; @@ -756,13 +779,60 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons, rxr->rx_sw_agg_prod = sw_prod; } +static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp, + struct bnxt_rx_ring_info *rxr, + u16 cons, void *data, u8 *data_ptr, + dma_addr_t dma_addr, + unsigned int offset_and_len) +{ + unsigned int payload = offset_and_len >> 16; + unsigned int len = offset_and_len & 0xffff; + struct skb_frag_struct *frag; + struct page *page = data; + u16 prod = rxr->rx_prod; + struct sk_buff *skb; + int off, err; + + err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); + if (unlikely(err)) { + bnxt_reuse_rx_data(rxr, cons, data); + return NULL; + } + dma_addr -= bp->rx_dma_offset; + dma_unmap_page(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir); + + if (unlikely(!payload)) + payload = eth_get_headlen(data_ptr, len); + + skb = napi_alloc_skb(&rxr->bnapi->napi, payload); + if (!skb) { + __free_page(page); + return NULL; + } + + off = (void *)data_ptr - page_address(page); + skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE); + memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN, + payload + NET_IP_ALIGN); + + frag = &skb_shinfo(skb)->frags[0]; + skb_frag_size_sub(frag, payload); + frag->page_offset += payload; + skb->data_len -= payload; + skb->tail += payload; + + return skb; +} + static struct sk_buff *bnxt_rx_skb(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, - u16 prod, u8 *data, dma_addr_t dma_addr, - unsigned int len) + void *data, u8 *data_ptr, + dma_addr_t dma_addr, + unsigned int offset_and_len) { - int err; + u16 prod = rxr->rx_prod; struct sk_buff *skb; + int err; err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); if (unlikely(err)) { @@ -772,14 +842,14 @@ static struct sk_buff *bnxt_rx_skb(struct bnxt *bp, skb = build_skb(data, 0); dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size, - PCI_DMA_FROMDEVICE); + bp->rx_dir); if (!skb) { kfree(data); return NULL; } - skb_reserve(skb, BNXT_RX_OFFSET); - skb_put(skb, len); + skb_reserve(skb, bp->rx_offset); + skb_put(skb, offset_and_len & 0xffff); return skb; } @@ -815,7 +885,7 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi, * a sw_prod index that equals the cons index, so we * need to clear the cons entry now. */ - mapping = dma_unmap_addr(cons_rx_buf, mapping); + mapping = cons_rx_buf->mapping; page = cons_rx_buf->page; cons_rx_buf->page = NULL; @@ -878,14 +948,14 @@ static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data, if (!skb) return NULL; - dma_sync_single_for_cpu(&pdev->dev, mapping, - bp->rx_copy_thresh, PCI_DMA_FROMDEVICE); + dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh, + bp->rx_dir); - memcpy(skb->data - BNXT_RX_OFFSET, data, len + BNXT_RX_OFFSET); + memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN, + len + NET_IP_ALIGN); - dma_sync_single_for_device(&pdev->dev, mapping, - bp->rx_copy_thresh, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh, + bp->rx_dir); skb_put(skb, len); return skb; @@ -954,17 +1024,19 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, } prod_rx_buf->data = tpa_info->data; + prod_rx_buf->data_ptr = tpa_info->data_ptr; mapping = tpa_info->mapping; - dma_unmap_addr_set(prod_rx_buf, mapping, mapping); + prod_rx_buf->mapping = mapping; prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; prod_bd->rx_bd_haddr = cpu_to_le64(mapping); tpa_info->data = cons_rx_buf->data; + tpa_info->data_ptr = cons_rx_buf->data_ptr; cons_rx_buf->data = NULL; - tpa_info->mapping = dma_unmap_addr(cons_rx_buf, mapping); + tpa_info->mapping = cons_rx_buf->mapping; tpa_info->len = le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >> @@ -1130,7 +1202,6 @@ static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info, dev_kfree_skb_any(skb); return NULL; } - tcp_gro_complete(skb); if (nw_off) { /* tunnel */ struct udphdr *uh = NULL; @@ -1180,6 +1251,8 @@ static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp, RX_TPA_END_CMP_PAYLOAD_OFFSET) >> RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT; skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb); + if (likely(skb)) + tcp_gro_complete(skb); #endif return skb; } @@ -1189,17 +1262,18 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, u32 *raw_cons, struct rx_tpa_end_cmp *tpa_end, struct rx_tpa_end_cmp_ext *tpa_end1, - bool *agg_event) + u8 *event) { struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; u8 agg_id = TPA_END_AGG_ID(tpa_end); - u8 *data, agg_bufs; + u8 *data_ptr, agg_bufs; u16 cp_cons = RING_CMP(*raw_cons); unsigned int len; struct bnxt_tpa_info *tpa_info; dma_addr_t mapping; struct sk_buff *skb; + void *data; if (unlikely(bnapi->in_reset)) { int rc = bnxt_discard_rx(bp, bnapi, raw_cons, tpa_end); @@ -1211,7 +1285,8 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, tpa_info = &rxr->rx_tpa[agg_id]; data = tpa_info->data; - prefetch(data); + data_ptr = tpa_info->data_ptr; + prefetch(data_ptr); len = tpa_info->len; mapping = tpa_info->mapping; @@ -1222,7 +1297,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons)) return ERR_PTR(-EBUSY); - *agg_event = true; + *event |= BNXT_AGG_EVENT; cp_cons = NEXT_CMP(cp_cons); } @@ -1234,7 +1309,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, } if (len <= bp->rx_copy_thresh) { - skb = bnxt_copy_skb(bnapi, data, len, mapping); + skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping); if (!skb) { bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs); return NULL; @@ -1250,18 +1325,19 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, } tpa_info->data = new_data; + tpa_info->data_ptr = new_data + bp->rx_offset; tpa_info->mapping = new_mapping; skb = build_skb(data, 0); dma_unmap_single(&bp->pdev->dev, mapping, bp->rx_buf_use_size, - PCI_DMA_FROMDEVICE); + bp->rx_dir); if (!skb) { kfree(data); bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs); return NULL; } - skb_reserve(skb, BNXT_RX_OFFSET); + skb_reserve(skb, bp->rx_offset); skb_put(skb, len); } @@ -1307,7 +1383,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, * -EIO - packet aborted due to hw error indicated in BD */ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, - bool *agg_event) + u8 *event) { struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; @@ -1318,10 +1394,12 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons); struct bnxt_sw_rx_bd *rx_buf; unsigned int len; - u8 *data, agg_bufs, cmp_type; + u8 *data_ptr, agg_bufs, cmp_type; dma_addr_t dma_addr; struct sk_buff *skb; + void *data; int rc = 0; + u32 misc; rxcmp = (struct rx_cmp *) &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; @@ -1342,13 +1420,13 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp, (struct rx_tpa_start_cmp_ext *)rxcmp1); + *event |= BNXT_RX_EVENT; goto next_rx_no_prod; } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { skb = bnxt_tpa_end(bp, bnapi, &tmp_raw_cons, (struct rx_tpa_end_cmp *)rxcmp, - (struct rx_tpa_end_cmp_ext *)rxcmp1, - agg_event); + (struct rx_tpa_end_cmp_ext *)rxcmp1, event); if (unlikely(IS_ERR(skb))) return -EBUSY; @@ -1356,37 +1434,36 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, rc = -ENOMEM; if (likely(skb)) { skb_record_rx_queue(skb, bnapi->index); - skb_mark_napi_id(skb, &bnapi->napi); - if (bnxt_busy_polling(bnapi)) - netif_receive_skb(skb); - else - napi_gro_receive(&bnapi->napi, skb); + napi_gro_receive(&bnapi->napi, skb); rc = 1; } + *event |= BNXT_RX_EVENT; goto next_rx_no_prod; } cons = rxcmp->rx_cmp_opaque; rx_buf = &rxr->rx_buf_ring[cons]; data = rx_buf->data; + data_ptr = rx_buf->data_ptr; if (unlikely(cons != rxr->rx_next_cons)) { int rc1 = bnxt_discard_rx(bp, bnapi, raw_cons, rxcmp); bnxt_sched_reset(bp, rxr); return rc1; } - prefetch(data); + prefetch(data_ptr); - agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & RX_CMP_AGG_BUFS) >> - RX_CMP_AGG_BUFS_SHIFT; + misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1); + agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT; if (agg_bufs) { if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons)) return -EBUSY; cp_cons = NEXT_CMP(cp_cons); - *agg_event = true; + *event |= BNXT_AGG_EVENT; } + *event |= BNXT_RX_EVENT; rx_buf->data = NULL; if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) { @@ -1399,17 +1476,29 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, } len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT; - dma_addr = dma_unmap_addr(rx_buf, mapping); + dma_addr = rx_buf->mapping; + + if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) { + rc = 1; + goto next_rx; + } if (len <= bp->rx_copy_thresh) { - skb = bnxt_copy_skb(bnapi, data, len, dma_addr); + skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr); bnxt_reuse_rx_data(rxr, cons, data); if (!skb) { rc = -ENOMEM; goto next_rx; } } else { - skb = bnxt_rx_skb(bp, rxr, cons, prod, data, dma_addr, len); + u32 payload; + + if (rx_buf->data_ptr == data_ptr) + payload = misc & RX_CMP_PAYLOAD_OFFSET; + else + payload = 0; + skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr, + payload | len); if (!skb) { rc = -ENOMEM; goto next_rx; @@ -1460,11 +1549,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, } skb_record_rx_queue(skb, bnapi->index); - skb_mark_napi_id(skb, &bnapi->napi); - if (bnxt_busy_polling(bnapi)) - netif_receive_skb(skb); - else - napi_gro_receive(&bnapi->napi, skb); + napi_gro_receive(&bnapi->napi, skb); rc = 1; next_rx: @@ -1637,8 +1722,7 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) u32 cons; int tx_pkts = 0; int rx_pkts = 0; - bool rx_event = false; - bool agg_event = false; + u8 event = 0; struct tx_cmp *txcmp; while (1) { @@ -1660,12 +1744,11 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) if (unlikely(tx_pkts > bp->tx_wake_thresh)) rx_pkts = budget; } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) { - rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &agg_event); + rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event); if (likely(rc >= 0)) rx_pkts += rc; else if (rc == -EBUSY) /* partial completion */ break; - rx_event = true; } else if (unlikely((TX_CMP_TYPE(txcmp) == CMPL_BASE_TYPE_HWRM_DONE) || (TX_CMP_TYPE(txcmp) == @@ -1680,6 +1763,18 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) break; } + if (event & BNXT_TX_EVENT) { + struct bnxt_tx_ring_info *txr = bnapi->tx_ring; + void __iomem *db = txr->tx_doorbell; + u16 prod = txr->tx_prod; + + /* Sync BD data before updating doorbell */ + wmb(); + + writel(DB_KEY_TX | prod, db); + writel(DB_KEY_TX | prod, db); + } + cpr->cp_raw_cons = raw_cons; /* ACK completion ring before freeing tx ring and producing new * buffers in rx/agg rings to prevent overflowing the completion @@ -1688,14 +1783,14 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); if (tx_pkts) - bnxt_tx_int(bp, bnapi, tx_pkts); + bnapi->tx_int(bp, bnapi, tx_pkts); - if (rx_event) { + if (event & BNXT_RX_EVENT) { struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell); writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell); - if (agg_event) { + if (event & BNXT_AGG_EVENT) { writel(DB_KEY_RX | rxr->rx_agg_prod, rxr->rx_agg_doorbell); writel(DB_KEY_RX | rxr->rx_agg_prod, @@ -1716,7 +1811,7 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget) u32 cp_cons, tmp_raw_cons; u32 raw_cons = cpr->cp_raw_cons; u32 rx_pkts = 0; - bool agg_event = false; + u8 event = 0; while (1) { int rc; @@ -1740,7 +1835,7 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget) rxcmp1->rx_cmp_cfa_code_errors_v2 |= cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR); - rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &agg_event); + rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event); if (likely(rc == -EIO)) rx_pkts++; else if (rc == -EBUSY) /* partial completion */ @@ -1763,13 +1858,13 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget) writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell); writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell); - if (agg_event) { + if (event & BNXT_AGG_EVENT) { writel(DB_KEY_RX | rxr->rx_agg_prod, rxr->rx_agg_doorbell); writel(DB_KEY_RX | rxr->rx_agg_prod, rxr->rx_agg_doorbell); } if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) { - napi_complete(napi); + napi_complete_done(napi, rx_pkts); BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons); } return rx_pkts; @@ -1782,9 +1877,6 @@ static int bnxt_poll(struct napi_struct *napi, int budget) struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; int work_done = 0; - if (!bnxt_lock_napi(bnapi)) - return budget; - while (1) { work_done += bnxt_poll_work(bp, bnapi, budget - work_done); @@ -1792,42 +1884,16 @@ static int bnxt_poll(struct napi_struct *napi, int budget) break; if (!bnxt_has_work(bp, cpr)) { - napi_complete(napi); - BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons); + if (napi_complete_done(napi, work_done)) + BNXT_CP_DB_REARM(cpr->cp_doorbell, + cpr->cp_raw_cons); break; } } mmiowb(); - bnxt_unlock_napi(bnapi); return work_done; } -#ifdef CONFIG_NET_RX_BUSY_POLL -static int bnxt_busy_poll(struct napi_struct *napi) -{ - struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); - struct bnxt *bp = bnapi->bp; - struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; - int rx_work, budget = 4; - - if (atomic_read(&bp->intr_sem) != 0) - return LL_FLUSH_FAILED; - - if (!bp->link_info.link_up) - return LL_FLUSH_FAILED; - - if (!bnxt_lock_poll(bnapi)) - return LL_FLUSH_BUSY; - - rx_work = bnxt_poll_work(bp, bnapi, budget); - - BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons); - - bnxt_unlock_poll(bnapi); - return rx_work; -} -#endif - static void bnxt_free_tx_skbs(struct bnxt *bp) { int i, max_idx; @@ -1905,11 +1971,9 @@ static void bnxt_free_rx_skbs(struct bnxt *bp) if (!data) continue; - dma_unmap_single( - &pdev->dev, - dma_unmap_addr(tpa_info, mapping), - bp->rx_buf_use_size, - PCI_DMA_FROMDEVICE); + dma_unmap_single(&pdev->dev, tpa_info->mapping, + bp->rx_buf_use_size, + bp->rx_dir); tpa_info->data = NULL; @@ -1919,19 +1983,20 @@ static void bnxt_free_rx_skbs(struct bnxt *bp) for (j = 0; j < max_idx; j++) { struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j]; - u8 *data = rx_buf->data; + void *data = rx_buf->data; if (!data) continue; - dma_unmap_single(&pdev->dev, - dma_unmap_addr(rx_buf, mapping), - bp->rx_buf_use_size, - PCI_DMA_FROMDEVICE); + dma_unmap_single(&pdev->dev, rx_buf->mapping, + bp->rx_buf_use_size, bp->rx_dir); rx_buf->data = NULL; - kfree(data); + if (BNXT_RX_PAGE_MODE(bp)) + __free_page(data); + else + kfree(data); } for (j = 0; j < max_agg_idx; j++) { @@ -1942,8 +2007,7 @@ static void bnxt_free_rx_skbs(struct bnxt *bp) if (!page) continue; - dma_unmap_page(&pdev->dev, - dma_unmap_addr(rx_agg_buf, mapping), + dma_unmap_page(&pdev->dev, rx_agg_buf->mapping, BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE); rx_agg_buf->page = NULL; @@ -2034,6 +2098,9 @@ static void bnxt_free_rx_rings(struct bnxt *bp) struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; struct bnxt_ring_struct *ring; + if (rxr->xdp_prog) + bpf_prog_put(rxr->xdp_prog); + kfree(rxr->rx_tpa); rxr->rx_tpa = NULL; @@ -2172,6 +2239,8 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp) memset(txr->tx_push, 0, sizeof(struct tx_push_bd)); } ring->queue_id = bp->q_info[j].queue_id; + if (i < bp->tx_nr_rings_xdp) + continue; if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1)) j++; } @@ -2319,6 +2388,15 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) ring = &rxr->rx_ring_struct; bnxt_init_rxbd_pages(ring, type); + if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) { + rxr->xdp_prog = bpf_prog_add(bp->xdp_prog, 1); + if (IS_ERR(rxr->xdp_prog)) { + int rc = PTR_ERR(rxr->xdp_prog); + + rxr->xdp_prog = NULL; + return rc; + } + } prod = rxr->rx_prod; for (i = 0; i < bp->rx_ring_size; i++) { if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) { @@ -2365,6 +2443,7 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) return -ENOMEM; rxr->rx_tpa[i].data = data; + rxr->rx_tpa[i].data_ptr = data + bp->rx_offset; rxr->rx_tpa[i].mapping = mapping; } } else { @@ -2380,6 +2459,14 @@ static int bnxt_init_rx_rings(struct bnxt *bp) { int i, rc = 0; + if (BNXT_RX_PAGE_MODE(bp)) { + bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM; + bp->rx_dma_offset = XDP_PACKET_HEADROOM; + } else { + bp->rx_offset = BNXT_RX_OFFSET; + bp->rx_dma_offset = BNXT_RX_DMA_OFFSET; + } + for (i = 0; i < bp->rx_nr_rings; i++) { rc = bnxt_init_one_rx_ring(bp, i); if (rc) @@ -2503,9 +2590,11 @@ static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg) return pages; } -static void bnxt_set_tpa_flags(struct bnxt *bp) +void bnxt_set_tpa_flags(struct bnxt *bp) { bp->flags &= ~BNXT_FLAG_TPA; + if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) + return; if (bp->dev->features & NETIF_F_LRO) bp->flags |= BNXT_FLAG_LRO; if (bp->dev->features & NETIF_F_GRO) @@ -2535,7 +2624,7 @@ void bnxt_set_ring_params(struct bnxt *bp) agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE); bp->flags &= ~BNXT_FLAG_JUMBO; - if (rx_space > PAGE_SIZE) { + if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) { u32 jumbo_factor; bp->flags |= BNXT_FLAG_JUMBO; @@ -2587,6 +2676,27 @@ void bnxt_set_ring_params(struct bnxt *bp) bp->cp_ring_mask = bp->cp_bit - 1; } +int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode) +{ + if (page_mode) { + if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) + return -EOPNOTSUPP; + bp->dev->max_mtu = BNXT_MAX_PAGE_MODE_MTU; + bp->flags &= ~BNXT_FLAG_AGG_RINGS; + bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE; + bp->dev->hw_features &= ~NETIF_F_LRO; + bp->dev->features &= ~NETIF_F_LRO; + bp->rx_dir = DMA_BIDIRECTIONAL; + bp->rx_skb_func = bnxt_rx_page_skb; + } else { + bp->dev->max_mtu = BNXT_MAX_MTU; + bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE; + bp->rx_dir = DMA_FROM_DEVICE; + bp->rx_skb_func = bnxt_rx_skb; + } + return 0; +} + static void bnxt_free_vnic_attributes(struct bnxt *bp) { int i; @@ -2669,6 +2779,10 @@ static int bnxt_alloc_vnic_attributes(struct bnxt *bp) goto out; } + if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) && + !(vnic->flags & BNXT_VNIC_RSS_FLAG)) + continue; + /* Allocate rss table and hash key */ vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &vnic->rss_table_dma_addr, @@ -2892,6 +3006,8 @@ static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init) bnxt_free_stats(bp); bnxt_free_ring_grps(bp); bnxt_free_vnics(bp); + kfree(bp->tx_ring_map); + bp->tx_ring_map = NULL; kfree(bp->tx_ring); bp->tx_ring = NULL; kfree(bp->rx_ring); @@ -2944,6 +3060,12 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init) if (!bp->tx_ring) return -ENOMEM; + bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16), + GFP_KERNEL); + + if (!bp->tx_ring_map) + return -ENOMEM; + if (bp->flags & BNXT_FLAG_SHARED_RINGS) j = 0; else @@ -2952,6 +3074,15 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init) for (i = 0; i < bp->tx_nr_rings; i++, j++) { bp->tx_ring[i].bnapi = bp->bnapi[j]; bp->bnapi[j]->tx_ring = &bp->tx_ring[i]; + bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i; + if (i >= bp->tx_nr_rings_xdp) { + bp->tx_ring[i].txq_index = i - + bp->tx_nr_rings_xdp; + bp->bnapi[j]->tx_int = bnxt_tx_int; + } else { + bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP; + bp->bnapi[j]->tx_int = bnxt_tx_int_xdp; + } } rc = bnxt_alloc_stats(bp); @@ -2993,6 +3124,47 @@ alloc_mem_err: return rc; } +static void bnxt_disable_int(struct bnxt *bp) +{ + int i; + + if (!bp->bnapi) + return; + + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; + + if (ring->fw_ring_id != INVALID_HW_RING_ID) + BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); + } +} + +static void bnxt_disable_int_sync(struct bnxt *bp) +{ + int i; + + atomic_inc(&bp->intr_sem); + + bnxt_disable_int(bp); + for (i = 0; i < bp->cp_nr_rings; i++) + synchronize_irq(bp->irq_tbl[i].vector); +} + +static void bnxt_enable_int(struct bnxt *bp) +{ + int i; + + atomic_set(&bp->intr_sem, 0); + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + + BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons); + } +} + void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type, u16 cmpl_ring, u16 target_id) { @@ -3292,6 +3464,9 @@ static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp, CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID) +#define BNXT_NTP_TUNNEL_FLTR_FLAG \ + CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE + static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, struct bnxt_ntuple_filter *fltr) { @@ -3312,10 +3487,31 @@ static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4; req.ip_protocol = keys->basic.ip_proto; - req.src_ipaddr[0] = keys->addrs.v4addrs.src; - req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff); - req.dst_ipaddr[0] = keys->addrs.v4addrs.dst; - req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff); + if (keys->basic.n_proto == htons(ETH_P_IPV6)) { + int i; + + req.ethertype = htons(ETH_P_IPV6); + req.ip_addr_type = + CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6; + *(struct in6_addr *)&req.src_ipaddr[0] = + keys->addrs.v6addrs.src; + *(struct in6_addr *)&req.dst_ipaddr[0] = + keys->addrs.v6addrs.dst; + for (i = 0; i < 4; i++) { + req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff); + req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff); + } + } else { + req.src_ipaddr[0] = keys->addrs.v4addrs.src; + req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff); + req.dst_ipaddr[0] = keys->addrs.v4addrs.dst; + req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff); + } + if (keys->control.flags & FLOW_DIS_ENCAPSULATION) { + req.enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG); + req.tunnel_type = + CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL; + } req.src_port = keys->ports.src; req.src_port_mask = cpu_to_be16(0xffff); @@ -3562,6 +3758,12 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | VNIC_CFG_REQ_ENABLES_MRU); + } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) { + req.rss_rule = + cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]); + req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | + VNIC_CFG_REQ_ENABLES_MRU); + req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE); } else { req.rss_rule = cpu_to_le16(0xffff); } @@ -3665,6 +3867,27 @@ static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id, return rc; } +static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp) +{ + struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_vnic_qcaps_input req = {0}; + int rc; + + if (bp->hwrm_spec_code < 0x10600) + return 0; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1); + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!rc) { + if (resp->flags & + cpu_to_le32(VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP)) + bp->flags |= BNXT_FLAG_NEW_RSS_CAP; + } + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp) { u16 i; @@ -3768,7 +3991,7 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp, req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1); break; case HWRM_RING_ALLOC_CMPL: - req.ring_type = RING_ALLOC_REQ_RING_TYPE_CMPL; + req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL; req.length = cpu_to_le32(bp->cp_ring_mask + 1); if (bp->flags & BNXT_FLAG_USING_MSIX) req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; @@ -3787,7 +4010,7 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp, if (rc || err) { switch (ring_type) { - case RING_FREE_REQ_RING_TYPE_CMPL: + case RING_FREE_REQ_RING_TYPE_L2_CMPL: netdev_err(bp->dev, "hwrm_ring_alloc cp failed. rc:%x err:%x\n", rc, err); return -1; @@ -3811,6 +4034,30 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp, return rc; } +static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx) +{ + int rc; + + if (BNXT_PF(bp)) { + struct hwrm_func_cfg_input req = {0}; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); + req.fid = cpu_to_le16(0xffff); + req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR); + req.async_event_cr = cpu_to_le16(idx); + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + } else { + struct hwrm_func_vf_cfg_input req = {0}; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1); + req.enables = + cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR); + req.async_event_cr = cpu_to_le16(idx); + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + } + return rc; +} + static int bnxt_hwrm_ring_alloc(struct bnxt *bp) { int i, rc = 0; @@ -3827,6 +4074,12 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp) goto err_out; BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id; + + if (!i) { + rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id); + if (rc) + netdev_warn(bp->dev, "Failed to set async event completion ring.\n"); + } } for (i = 0; i < bp->tx_nr_rings; i++) { @@ -3901,7 +4154,7 @@ static int hwrm_ring_free_send_msg(struct bnxt *bp, if (rc || error_code) { switch (ring_type) { - case RING_FREE_REQ_RING_TYPE_CMPL: + case RING_FREE_REQ_RING_TYPE_L2_CMPL: netdev_err(bp->dev, "hwrm_ring_free cp failed. rc:%d\n", rc); return rc; @@ -3977,6 +4230,12 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) } } + /* The completion rings are about to be freed. After that the + * IRQ doorbell will not work anymore. So we need to disable + * IRQ here. + */ + bnxt_disable_int_sync(bp); + for (i = 0; i < bp->cp_nr_rings; i++) { struct bnxt_napi *bnapi = bp->bnapi[i]; struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; @@ -3984,7 +4243,7 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) if (ring->fw_ring_id != INVALID_HW_RING_ID) { hwrm_ring_free_send_msg(bp, ring, - RING_FREE_REQ_RING_TYPE_CMPL, + RING_FREE_REQ_RING_TYPE_L2_CMPL, INVALID_HW_RING_ID); ring->fw_ring_id = INVALID_HW_RING_ID; bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; @@ -3992,6 +4251,50 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) } } +/* Caller must hold bp->hwrm_cmd_lock */ +int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings) +{ + struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_func_qcfg_input req = {0}; + int rc; + + if (bp->hwrm_spec_code < 0x10601) + return 0; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); + req.fid = cpu_to_le16(fid); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!rc) + *tx_rings = le16_to_cpu(resp->alloc_tx_rings); + + return rc; +} + +static int bnxt_hwrm_reserve_tx_rings(struct bnxt *bp, int *tx_rings) +{ + struct hwrm_func_cfg_input req = {0}; + int rc; + + if (bp->hwrm_spec_code < 0x10601) + return 0; + + if (BNXT_VF(bp)) + return 0; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); + req.fid = cpu_to_le16(0xffff); + req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS); + req.num_tx_rings = cpu_to_le16(*tx_rings); + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) + return rc; + + mutex_lock(&bp->hwrm_cmd_lock); + rc = __bnxt_hwrm_get_tx_rings(bp, 0xffff, tx_rings); + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + static void bnxt_hwrm_set_coal_params(struct bnxt *bp, u32 max_bufs, u32 buf_tmrs, u16 flags, struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req) @@ -4249,7 +4552,7 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp) /* overwrite netdev dev_adr with admin VF MAC */ memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN); } else { - random_ether_addr(bp->dev->dev_addr); + eth_hw_addr_random(bp->dev); rc = bnxt_approve_mac(bp, bp->dev->dev_addr); } return rc; @@ -4463,8 +4766,12 @@ static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path, static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) { + struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; int rc; + if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) + goto skip_rss_ctx; + /* allocate context for vnic */ rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0); if (rc) { @@ -4484,6 +4791,7 @@ static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) bp->rsscos_nr_ctxs++; } +skip_rss_ctx: /* configure default vnic, ring grp */ rc = bnxt_hwrm_vnic_cfg(bp, vnic_id); if (rc) { @@ -4518,13 +4826,17 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp) int i, rc = 0; for (i = 0; i < bp->rx_nr_rings; i++) { + struct bnxt_vnic_info *vnic; u16 vnic_id = i + 1; u16 ring_id = i; if (vnic_id >= bp->nr_vnics) break; - bp->vnic_info[vnic_id].flags |= BNXT_VNIC_RFS_FLAG; + vnic = &bp->vnic_info[vnic_id]; + vnic->flags |= BNXT_VNIC_RFS_FLAG; + if (bp->flags & BNXT_FLAG_NEW_RSS_CAP) + vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG; rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1); if (rc) { netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", @@ -4698,40 +5010,13 @@ static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init) return bnxt_init_chip(bp, irq_re_init); } -static void bnxt_disable_int(struct bnxt *bp) -{ - int i; - - if (!bp->bnapi) - return; - - for (i = 0; i < bp->cp_nr_rings; i++) { - struct bnxt_napi *bnapi = bp->bnapi[i]; - struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; - - BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); - } -} - -static void bnxt_enable_int(struct bnxt *bp) -{ - int i; - - atomic_set(&bp->intr_sem, 0); - for (i = 0; i < bp->cp_nr_rings; i++) { - struct bnxt_napi *bnapi = bp->bnapi[i]; - struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; - - BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons); - } -} - static int bnxt_set_real_num_queues(struct bnxt *bp) { int rc; struct net_device *dev = bp->dev; - rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings); + rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings - + bp->tx_nr_rings_xdp); if (rc) return rc; @@ -4779,19 +5064,12 @@ static void bnxt_setup_msix(struct bnxt *bp) tcs = netdev_get_num_tc(dev); if (tcs > 1) { - bp->tx_nr_rings_per_tc = bp->tx_nr_rings / tcs; - if (bp->tx_nr_rings_per_tc == 0) { - netdev_reset_tc(dev); - bp->tx_nr_rings_per_tc = bp->tx_nr_rings; - } else { - int i, off, count; + int i, off, count; - bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs; - for (i = 0; i < tcs; i++) { - count = bp->tx_nr_rings_per_tc; - off = i * count; - netdev_set_tc_queue(dev, i, count, off); - } + for (i = 0; i < tcs; i++) { + count = bp->tx_nr_rings_per_tc; + off = i * count; + netdev_set_tc_queue(dev, i, count, off); } } @@ -4836,6 +5114,26 @@ static int bnxt_setup_int_mode(struct bnxt *bp) return rc; } +#ifdef CONFIG_RFS_ACCEL +static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp) +{ +#if defined(CONFIG_BNXT_SRIOV) + if (BNXT_VF(bp)) + return bp->vf.max_rsscos_ctxs; +#endif + return bp->pf.max_rsscos_ctxs; +} + +static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp) +{ +#if defined(CONFIG_BNXT_SRIOV) + if (BNXT_VF(bp)) + return bp->vf.max_vnics; +#endif + return bp->pf.max_vnics; +} +#endif + unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp) { #if defined(CONFIG_BNXT_SRIOV) @@ -5094,10 +5392,8 @@ static void bnxt_disable_napi(struct bnxt *bp) if (!bp->bnapi) return; - for (i = 0; i < bp->cp_nr_rings; i++) { + for (i = 0; i < bp->cp_nr_rings; i++) napi_disable(&bp->bnapi[i]->napi); - bnxt_disable_poll(bp->bnapi[i]); - } } static void bnxt_enable_napi(struct bnxt *bp) @@ -5106,7 +5402,6 @@ static void bnxt_enable_napi(struct bnxt *bp) for (i = 0; i < bp->cp_nr_rings; i++) { bp->bnapi[i]->in_reset = false; - bnxt_enable_poll(bp->bnapi[i]); napi_enable(&bp->bnapi[i]->napi); } } @@ -5150,7 +5445,7 @@ static void bnxt_report_link(struct bnxt *bp) if (bp->link_info.link_up) { const char *duplex; const char *flow_ctrl; - u16 speed; + u16 speed, fec; netif_carrier_on(bp->dev); if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL) @@ -5172,6 +5467,12 @@ static void bnxt_report_link(struct bnxt *bp) netdev_info(bp->dev, "EEE is %s\n", bp->eee.eee_active ? "active" : "not active"); + fec = bp->link_info.fec_cfg; + if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED)) + netdev_info(bp->dev, "FEC autoneg %s encodings: %s\n", + (fec & BNXT_FEC_AUTONEG) ? "on" : "off", + (fec & BNXT_FEC_ENC_BASE_R) ? "BaseR" : + (fec & BNXT_FEC_ENC_RS) ? "RS" : "None"); } else { netif_carrier_off(bp->dev); netdev_err(bp->dev, "NIC Link is Down\n"); @@ -5296,6 +5597,11 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state) } } } + + link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED; + if (bp->hwrm_spec_code >= 0x10504) + link_info->fec_cfg = le16_to_cpu(resp->fec_cfg); + /* TODO: need to add more logic to report VF link */ if (chng_link_state) { if (link_info->phy_link_status == BNXT_LINK_LINK) @@ -5384,7 +5690,7 @@ static void bnxt_hwrm_set_link_common(struct bnxt *bp, { u8 autoneg = bp->link_info.autoneg; u16 fw_link_speed = bp->link_info.req_link_speed; - u32 advertising = bp->link_info.advertising; + u16 advertising = bp->link_info.advertising; if (autoneg & BNXT_AUTONEG_SPEED) { req->auto_mode |= @@ -5489,6 +5795,45 @@ static int bnxt_hwrm_shutdown_link(struct bnxt *bp) return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); } +static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp) +{ + struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_port_led_qcaps_input req = {0}; + struct bnxt_pf_info *pf = &bp->pf; + int rc; + + if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601) + return 0; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1); + req.port_id = cpu_to_le16(pf->port_id); + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) { + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; + } + if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) { + int i; + + bp->num_leds = resp->num_leds; + memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) * + bp->num_leds); + for (i = 0; i < bp->num_leds; i++) { + struct bnxt_led_info *led = &bp->leds[i]; + __le16 caps = led->led_state_caps; + + if (!led->led_group_id || + !BNXT_LED_ALT_BLINK_CAP(caps)) { + bp->num_leds = 0; + break; + } + } + } + mutex_unlock(&bp->hwrm_cmd_lock); + return 0; +} + static bool bnxt_eee_config_ok(struct bnxt *bp) { struct ethtool_eee *eee = &bp->eee; @@ -5527,6 +5872,9 @@ static int bnxt_update_phy_setting(struct bnxt *bp) rc); return rc; } + if (!BNXT_SINGLE_PF(bp)) + return 0; + if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) != link_info->req_flow_ctrl) @@ -5678,19 +6026,6 @@ static int bnxt_open(struct net_device *dev) return __bnxt_open_nic(bp, true, true); } -static void bnxt_disable_int_sync(struct bnxt *bp) -{ - int i; - - atomic_inc(&bp->intr_sem); - if (!netif_running(bp->dev)) - return; - - bnxt_disable_int(bp); - for (i = 0; i < bp->cp_nr_rings; i++) - synchronize_irq(bp->irq_tbl[i].vector); -} - int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) { int rc = 0; @@ -5712,13 +6047,12 @@ int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) while (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state)) msleep(20); - /* Flush rings before disabling interrupts */ + /* Flush rings and and disable interrupts */ bnxt_shutdown_nic(bp, irq_re_init); /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */ bnxt_disable_napi(bp); - bnxt_disable_int_sync(bp); del_timer_sync(&bp->timer); bnxt_free_skbs(bp); @@ -5765,16 +6099,14 @@ static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) return -EOPNOTSUPP; } -static struct rtnl_link_stats64 * +static void bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { u32 i; struct bnxt *bp = netdev_priv(dev); - memset(stats, 0, sizeof(struct rtnl_link_stats64)); - if (!bp->bnapi) - return stats; + return; /* TODO check if we need to synchronize with bnxt_close path */ for (i = 0; i < bp->cp_nr_rings; i++) { @@ -5821,8 +6153,6 @@ bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns); stats->tx_errors = le64_to_cpu(tx->tx_err); } - - return stats; } static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask) @@ -5975,20 +6305,36 @@ skip_uc: return rc; } +/* If the chip and firmware supports RFS */ +static bool bnxt_rfs_supported(struct bnxt *bp) +{ + if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp)) + return true; + if (bp->flags & BNXT_FLAG_NEW_RSS_CAP) + return true; + return false; +} + +/* If runtime conditions support RFS */ static bool bnxt_rfs_capable(struct bnxt *bp) { #ifdef CONFIG_RFS_ACCEL - struct bnxt_pf_info *pf = &bp->pf; - int vnics; + int vnics, max_vnics, max_rss_ctxs; - if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_MSIX_CAP)) + if (!(bp->flags & BNXT_FLAG_MSIX_CAP)) return false; vnics = 1 + bp->rx_nr_rings; - if (vnics > pf->max_rsscos_ctxs || vnics > pf->max_vnics) { + max_vnics = bnxt_get_max_func_vnics(bp); + max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp); + + /* RSS contexts not a limiting factor */ + if (bp->flags & BNXT_FLAG_NEW_RSS_CAP) + max_rss_ctxs = max_vnics; + if (vnics > max_vnics || vnics > max_rss_ctxs) { netdev_warn(bp->dev, "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n", - min(pf->max_rsscos_ctxs - 1, pf->max_vnics - 1)); + min(max_rss_ctxs - 1, max_vnics - 1)); return false; } @@ -6044,6 +6390,9 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features) if (features & NETIF_F_LRO) flags |= BNXT_FLAG_LRO; + if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) + flags &= ~BNXT_FLAG_TPA; + if (features & NETIF_F_HW_VLAN_CTAG_RX) flags |= BNXT_FLAG_STRIP_VLAN; @@ -6296,6 +6645,62 @@ static void bnxt_sp_task(struct work_struct *work) clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); } +/* Under rtnl_lock */ +int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, int tcs, int tx_xdp) +{ + int max_rx, max_tx, tx_sets = 1; + int tx_rings_needed; + bool sh = true; + int rc; + + if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) + sh = false; + + if (tcs) + tx_sets = tcs; + + rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh); + if (rc) + return rc; + + if (max_rx < rx) + return -ENOMEM; + + tx_rings_needed = tx * tx_sets + tx_xdp; + if (max_tx < tx_rings_needed) + return -ENOMEM; + + if (bnxt_hwrm_reserve_tx_rings(bp, &tx_rings_needed) || + tx_rings_needed < (tx * tx_sets + tx_xdp)) + return -ENOMEM; + return 0; +} + +static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev) +{ + if (bp->bar2) { + pci_iounmap(pdev, bp->bar2); + bp->bar2 = NULL; + } + + if (bp->bar1) { + pci_iounmap(pdev, bp->bar1); + bp->bar1 = NULL; + } + + if (bp->bar0) { + pci_iounmap(pdev, bp->bar0); + bp->bar0 = NULL; + } +} + +static void bnxt_cleanup_pci(struct bnxt *bp) +{ + bnxt_unmap_bars(bp, bp->pdev); + pci_release_regions(bp->pdev); + pci_disable_device(bp->pdev); +} + static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) { int rc; @@ -6383,25 +6788,10 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) bp->current_interval = BNXT_TIMER_INTERVAL; clear_bit(BNXT_STATE_OPEN, &bp->state); - return 0; init_err_release: - if (bp->bar2) { - pci_iounmap(pdev, bp->bar2); - bp->bar2 = NULL; - } - - if (bp->bar1) { - pci_iounmap(pdev, bp->bar1); - bp->bar1 = NULL; - } - - if (bp->bar0) { - pci_iounmap(pdev, bp->bar0); - bp->bar0 = NULL; - } - + bnxt_unmap_bars(bp, pdev); pci_release_regions(pdev); init_err_disable: @@ -6458,9 +6848,10 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc) { struct bnxt *bp = netdev_priv(dev); bool sh = false; + int rc; if (tc > bp->max_tc) { - netdev_err(dev, "too many traffic classes requested: %d Max supported is %d\n", + netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n", tc, bp->max_tc); return -EINVAL; } @@ -6471,13 +6862,10 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc) if (bp->flags & BNXT_FLAG_SHARED_RINGS) sh = true; - if (tc) { - int max_rx_rings, max_tx_rings, rc; - - rc = bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, sh); - if (rc || bp->tx_nr_rings_per_tc * tc > max_tx_rings) - return -ENOMEM; - } + rc = bnxt_reserve_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings, + tc, bp->tx_nr_rings_xdp); + if (rc) + return rc; /* Needs to close the device and do hw resource re-allocations */ if (netif_running(bp->dev)) @@ -6521,6 +6909,7 @@ static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1, keys1->ports.ports == keys2->ports.ports && keys1->basic.ip_proto == keys2->basic.ip_proto && keys1->basic.n_proto == keys2->basic.n_proto && + keys1->control.flags == keys2->control.flags && ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) && ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr)) return true; @@ -6538,9 +6927,6 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, int rc = 0, idx, bit_id, l2_idx = 0; struct hlist_head *head; - if (skb->encapsulation) - return -EPROTONOSUPPORT; - if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) { struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; int off = 0, j; @@ -6567,12 +6953,23 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, goto err_free; } - if ((fkeys->basic.n_proto != htons(ETH_P_IP)) || + if ((fkeys->basic.n_proto != htons(ETH_P_IP) && + fkeys->basic.n_proto != htons(ETH_P_IPV6)) || ((fkeys->basic.ip_proto != IPPROTO_TCP) && (fkeys->basic.ip_proto != IPPROTO_UDP))) { rc = -EPROTONOSUPPORT; goto err_free; } + if (fkeys->basic.n_proto == htons(ETH_P_IPV6) && + bp->hwrm_spec_code < 0x10601) { + rc = -EPROTONOSUPPORT; + goto err_free; + } + if ((fkeys->control.flags & FLOW_DIS_ENCAPSULATION) && + bp->hwrm_spec_code < 0x10601) { + rc = -EPROTONOSUPPORT; + goto err_free; + } memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN); memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN); @@ -6779,9 +7176,7 @@ static const struct net_device_ops bnxt_netdev_ops = { #endif .ndo_udp_tunnel_add = bnxt_udp_tunnel_add, .ndo_udp_tunnel_del = bnxt_udp_tunnel_del, -#ifdef CONFIG_NET_RX_BUSY_POLL - .ndo_busy_poll = bnxt_busy_poll, -#endif + .ndo_xdp = bnxt_xdp, }; static void bnxt_remove_one(struct pci_dev *pdev) @@ -6801,15 +7196,12 @@ static void bnxt_remove_one(struct pci_dev *pdev) bnxt_hwrm_func_drv_unrgtr(bp); bnxt_free_hwrm_resources(bp); bnxt_dcb_free(bp); - pci_iounmap(pdev, bp->bar2); - pci_iounmap(pdev, bp->bar1); - pci_iounmap(pdev, bp->bar0); kfree(bp->edev); bp->edev = NULL; + if (bp->xdp_prog) + bpf_prog_put(bp->xdp_prog); + bnxt_cleanup_pci(bp); free_netdev(dev); - - pci_release_regions(pdev); - pci_disable_device(pdev); } static int bnxt_probe_phy(struct bnxt *bp) @@ -6920,8 +7312,17 @@ static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx, int rc; rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared); - if (rc) - return rc; + if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) { + /* Not enough rings, try disabling agg rings. */ + bp->flags &= ~BNXT_FLAG_AGG_RINGS; + rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared); + if (rc) + return rc; + bp->flags |= BNXT_FLAG_NO_AGG_RINGS; + bp->dev->hw_features &= ~NETIF_F_LRO; + bp->dev->features &= ~NETIF_F_LRO; + bnxt_set_ring_params(bp); + } if (bp->flags & BNXT_FLAG_ROCE_CAP) { int max_cp, max_stat, max_irq; @@ -6960,6 +7361,11 @@ static int bnxt_set_dflt_rings(struct bnxt *bp) return rc; bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings); bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings); + + rc = bnxt_hwrm_reserve_tx_rings(bp, &bp->tx_nr_rings_per_tc); + if (rc) + netdev_warn(bp->dev, "Unable to reserve tx rings\n"); + bp->tx_nr_rings = bp->tx_nr_rings_per_tc; bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : bp->tx_nr_rings + bp->rx_nr_rings; @@ -7001,7 +7407,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) struct bnxt *bp; int rc, max_irqs; - if (pdev->device == 0x16cd && pci_is_bridge(pdev)) + if (pci_is_bridge(pdev)) return -ENODEV; if (version_printed++ == 0) @@ -7027,17 +7433,16 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev->netdev_ops = &bnxt_netdev_ops; dev->watchdog_timeo = BNXT_TX_TIMEOUT; dev->ethtool_ops = &bnxt_ethtool_ops; - pci_set_drvdata(pdev, dev); rc = bnxt_alloc_hwrm_resources(bp); if (rc) - goto init_err; + goto init_err_pci_clean; mutex_init(&bp->hwrm_cmd_lock); rc = bnxt_hwrm_ver_get(bp); if (rc) - goto init_err; + goto init_err_pci_clean; bnxt_hwrm_fw_set_time(bp); @@ -7068,7 +7473,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* MTU range: 60 - 9500 */ dev->min_mtu = ETH_ZLEN; - dev->max_mtu = 9500; + dev->max_mtu = BNXT_MAX_MTU; bnxt_dcb_init(bp); @@ -7081,11 +7486,11 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) rc = bnxt_hwrm_func_drv_rgtr(bp); if (rc) - goto init_err; + goto init_err_pci_clean; rc = bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0); if (rc) - goto init_err; + goto init_err_pci_clean; bp->ulp_probe = bnxt_ulp_probe; @@ -7095,7 +7500,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) netdev_err(bp->dev, "hwrm query capability failure rc: %x\n", rc); rc = -1; - goto init_err; + goto init_err_pci_clean; } rc = bnxt_hwrm_queue_qportcfg(bp); @@ -7103,15 +7508,22 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n", rc); rc = -1; - goto init_err; + goto init_err_pci_clean; } bnxt_hwrm_func_qcfg(bp); + bnxt_hwrm_port_led_qcaps(bp); + bnxt_set_rx_skb_mode(bp, false); bnxt_set_tpa_flags(bp); bnxt_set_ring_params(bp); bnxt_set_max_func_irqs(bp, max_irqs); - bnxt_set_dflt_rings(bp); + rc = bnxt_set_dflt_rings(bp); + if (rc) { + netdev_err(bp->dev, "Not enough rings available.\n"); + rc = -ENOMEM; + goto init_err_pci_clean; + } /* Default RSS hash cfg. */ bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 | @@ -7126,7 +7538,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; } - if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp)) { + bnxt_hwrm_vnic_qcaps(bp); + if (bnxt_rfs_supported(bp)) { dev->hw_features |= NETIF_F_NTUPLE; if (bnxt_rfs_capable(bp)) { bp->flags |= BNXT_FLAG_RFS; @@ -7139,15 +7552,15 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) rc = bnxt_probe_phy(bp); if (rc) - goto init_err; + goto init_err_pci_clean; rc = bnxt_hwrm_func_reset(bp); if (rc) - goto init_err; + goto init_err_pci_clean; rc = bnxt_init_int_mode(bp); if (rc) - goto init_err; + goto init_err_pci_clean; rc = register_netdev(dev); if (rc) @@ -7164,10 +7577,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) init_err_clr_int: bnxt_clear_int_mode(bp); -init_err: - pci_iounmap(pdev, bp->bar0); - pci_release_regions(pdev); - pci_disable_device(pdev); +init_err_pci_clean: + bnxt_cleanup_pci(bp); init_err_free: free_netdev(dev); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 16defe9ececc..faf26a2f726b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1,6 +1,7 @@ /* Broadcom NetXtreme-C/E network driver. * * Copyright (c) 2014-2016 Broadcom Corporation + * Copyright (c) 2016-2017 Broadcom Limited * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -11,10 +12,10 @@ #define BNXT_H #define DRV_MODULE_NAME "bnxt_en" -#define DRV_MODULE_VERSION "1.6.0" +#define DRV_MODULE_VERSION "1.7.0" #define DRV_VER_MAJ 1 -#define DRV_VER_MIN 6 +#define DRV_VER_MIN 7 #define DRV_VER_UPD 0 struct tx_bd { @@ -416,6 +417,11 @@ struct rx_tpa_end_cmp_ext { #define BNXT_RX_PAGE_SIZE (1 << BNXT_RX_PAGE_SHIFT) +#define BNXT_MAX_MTU 9500 +#define BNXT_MAX_PAGE_MODE_MTU \ + ((unsigned int)PAGE_SIZE - VLAN_ETH_HLEN - NET_IP_ALIGN - \ + XDP_PACKET_HEADROOM) + #define BNXT_MIN_PKT_SIZE 52 #define BNXT_NUM_TESTS(bp) 0 @@ -507,17 +513,25 @@ struct rx_tpa_end_cmp_ext { #define BNXT_HWRM_REQS_PER_PAGE (BNXT_PAGE_SIZE / \ BNXT_HWRM_REQ_MAX_SIZE) +#define BNXT_RX_EVENT 1 +#define BNXT_AGG_EVENT 2 +#define BNXT_TX_EVENT 4 + struct bnxt_sw_tx_bd { struct sk_buff *skb; DEFINE_DMA_UNMAP_ADDR(mapping); u8 is_gso; u8 is_push; - unsigned short nr_frags; + union { + unsigned short nr_frags; + u16 rx_prod; + }; }; struct bnxt_sw_rx_bd { - u8 *data; - DEFINE_DMA_UNMAP_ADDR(mapping); + void *data; + u8 *data_ptr; + dma_addr_t mapping; }; struct bnxt_sw_rx_agg_bd { @@ -558,6 +572,7 @@ struct bnxt_tx_ring_info { struct bnxt_napi *bnapi; u16 tx_prod; u16 tx_cons; + u16 txq_index; void __iomem *tx_doorbell; struct tx_bd *tx_desc_ring[MAX_TX_PAGES]; @@ -576,7 +591,8 @@ struct bnxt_tx_ring_info { }; struct bnxt_tpa_info { - u8 *data; + void *data; + u8 *data_ptr; dma_addr_t mapping; u16 len; unsigned short gso_type; @@ -608,6 +624,8 @@ struct bnxt_rx_ring_info { void __iomem *rx_doorbell; void __iomem *rx_agg_doorbell; + struct bpf_prog *xdp_prog; + struct rx_bd *rx_desc_ring[MAX_RX_PAGES]; struct bnxt_sw_rx_bd *rx_buf_ring; @@ -654,20 +672,13 @@ struct bnxt_napi { struct bnxt_rx_ring_info *rx_ring; struct bnxt_tx_ring_info *tx_ring; -#ifdef CONFIG_NET_RX_BUSY_POLL - atomic_t poll_state; -#endif - bool in_reset; -}; + void (*tx_int)(struct bnxt *, struct bnxt_napi *, + int); + u32 flags; +#define BNXT_NAPI_FLAG_XDP 0x1 -#ifdef CONFIG_NET_RX_BUSY_POLL -enum bnxt_poll_state_t { - BNXT_STATE_IDLE = 0, - BNXT_STATE_NAPI, - BNXT_STATE_POLL, - BNXT_STATE_DISABLE, + bool in_reset; }; -#endif struct bnxt_irq { irq_handler_t handler; @@ -720,6 +731,7 @@ struct bnxt_vnic_info { #define BNXT_VNIC_RFS_FLAG 2 #define BNXT_VNIC_MCAST_FLAG 4 #define BNXT_VNIC_UCAST_FLAG 8 +#define BNXT_VNIC_RFS_NEW_RSS_FLAG 0x10 }; #if defined(CONFIG_BNXT_SRIOV) @@ -840,7 +852,7 @@ struct bnxt_link_info { #define BNXT_LINK_SPEED_40GB PORT_PHY_QCFG_RESP_LINK_SPEED_40GB #define BNXT_LINK_SPEED_50GB PORT_PHY_QCFG_RESP_LINK_SPEED_50GB u16 support_speeds; - u16 auto_link_speeds; + u16 auto_link_speeds; /* fw adv setting */ #define BNXT_LINK_SPEED_MSK_100MB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB #define BNXT_LINK_SPEED_MSK_1GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB #define BNXT_LINK_SPEED_MSK_2GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2GB @@ -855,6 +867,10 @@ struct bnxt_link_info { u16 force_link_speed; u32 preemphasis; u8 module_status; + u16 fec_cfg; +#define BNXT_FEC_AUTONEG PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED +#define BNXT_FEC_ENC_BASE_R PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED +#define BNXT_FEC_ENC_RS PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED /* copy of requested setting from ethtool cmd */ u8 autoneg; @@ -863,7 +879,7 @@ struct bnxt_link_info { u8 req_duplex; u8 req_flow_ctrl; u16 req_link_speed; - u32 advertising; + u16 advertising; /* user adv setting */ bool force_link_chng; /* a copy of phy_qcfg output used to report link @@ -879,6 +895,20 @@ struct bnxt_queue_info { u8 queue_profile; }; +#define BNXT_MAX_LED 4 + +struct bnxt_led_info { + u8 led_id; + u8 led_type; + u8 led_group_id; + u8 unused; + __le16 led_state_caps; +#define BNXT_LED_ALT_BLINK_CAP(x) ((x) & \ + cpu_to_le16(PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_ALT_SUPPORTED)) + + __le16 led_color_caps; +}; + #define BNXT_GRCPF_REG_WINDOW_BASE_OUT 0x400 #define BNXT_CAG_REG_LEGACY_INT_STATUS 0x4014 #define BNXT_CAG_REG_BASE 0x300000 @@ -956,10 +986,13 @@ struct bnxt { #define BNXT_FLAG_PORT_STATS 0x400 #define BNXT_FLAG_UDP_RSS_CAP 0x800 #define BNXT_FLAG_EEE_CAP 0x1000 + #define BNXT_FLAG_NEW_RSS_CAP 0x2000 #define BNXT_FLAG_ROCEV1_CAP 0x8000 #define BNXT_FLAG_ROCEV2_CAP 0x10000 #define BNXT_FLAG_ROCE_CAP (BNXT_FLAG_ROCEV1_CAP | \ BNXT_FLAG_ROCEV2_CAP) + #define BNXT_FLAG_NO_AGG_RINGS 0x20000 + #define BNXT_FLAG_RX_PAGE_MODE 0x40000 #define BNXT_FLAG_CHIP_NITRO_A0 0x1000000 #define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \ @@ -971,6 +1004,7 @@ struct bnxt { #define BNXT_NPAR(bp) ((bp)->port_partition_type) #define BNXT_SINGLE_PF(bp) (BNXT_PF(bp) && !BNXT_NPAR(bp)) #define BNXT_CHIP_TYPE_NITRO_A0(bp) ((bp)->flags & BNXT_FLAG_CHIP_NITRO_A0) +#define BNXT_RX_PAGE_MODE(bp) ((bp)->flags & BNXT_FLAG_RX_PAGE_MODE) struct bnxt_en_dev *edev; struct bnxt_en_dev * (*ulp_probe)(struct net_device *); @@ -979,12 +1013,21 @@ struct bnxt { struct bnxt_rx_ring_info *rx_ring; struct bnxt_tx_ring_info *tx_ring; + u16 *tx_ring_map; struct sk_buff * (*gro_func)(struct bnxt_tpa_info *, int, int, struct sk_buff *); + struct sk_buff * (*rx_skb_func)(struct bnxt *, + struct bnxt_rx_ring_info *, + u16, void *, u8 *, dma_addr_t, + unsigned int); + u32 rx_buf_size; u32 rx_buf_use_size; /* useable size */ + u16 rx_offset; + u16 rx_dma_offset; + enum dma_data_direction rx_dir; u32 rx_ring_size; u32 rx_agg_ring_size; u32 rx_copy_thresh; @@ -1000,6 +1043,7 @@ struct bnxt { int tx_nr_pages; int tx_nr_rings; int tx_nr_rings_per_tc; + int tx_nr_rings_xdp; int tx_wake_thresh; int tx_push_thresh; @@ -1132,6 +1176,11 @@ struct bnxt { struct ethtool_eee eee; u32 lpi_tmr_lo; u32 lpi_tmr_hi; + + u8 num_leds; + struct bnxt_led_info leds[BNXT_MAX_LED]; + + struct bpf_prog *xdp_prog; }; #define BNXT_RX_STATS_OFFSET(counter) \ @@ -1141,93 +1190,6 @@ struct bnxt { ((offsetof(struct tx_port_stats, counter) + \ sizeof(struct rx_port_stats) + 512) / 8) -#ifdef CONFIG_NET_RX_BUSY_POLL -static inline void bnxt_enable_poll(struct bnxt_napi *bnapi) -{ - atomic_set(&bnapi->poll_state, BNXT_STATE_IDLE); -} - -/* called from the NAPI poll routine to get ownership of a bnapi */ -static inline bool bnxt_lock_napi(struct bnxt_napi *bnapi) -{ - int rc = atomic_cmpxchg(&bnapi->poll_state, BNXT_STATE_IDLE, - BNXT_STATE_NAPI); - - return rc == BNXT_STATE_IDLE; -} - -static inline void bnxt_unlock_napi(struct bnxt_napi *bnapi) -{ - atomic_set(&bnapi->poll_state, BNXT_STATE_IDLE); -} - -/* called from the busy poll routine to get ownership of a bnapi */ -static inline bool bnxt_lock_poll(struct bnxt_napi *bnapi) -{ - int rc = atomic_cmpxchg(&bnapi->poll_state, BNXT_STATE_IDLE, - BNXT_STATE_POLL); - - return rc == BNXT_STATE_IDLE; -} - -static inline void bnxt_unlock_poll(struct bnxt_napi *bnapi) -{ - atomic_set(&bnapi->poll_state, BNXT_STATE_IDLE); -} - -static inline bool bnxt_busy_polling(struct bnxt_napi *bnapi) -{ - return atomic_read(&bnapi->poll_state) == BNXT_STATE_POLL; -} - -static inline void bnxt_disable_poll(struct bnxt_napi *bnapi) -{ - int old; - - while (1) { - old = atomic_cmpxchg(&bnapi->poll_state, BNXT_STATE_IDLE, - BNXT_STATE_DISABLE); - if (old == BNXT_STATE_IDLE) - break; - usleep_range(500, 5000); - } -} - -#else - -static inline void bnxt_enable_poll(struct bnxt_napi *bnapi) -{ -} - -static inline bool bnxt_lock_napi(struct bnxt_napi *bnapi) -{ - return true; -} - -static inline void bnxt_unlock_napi(struct bnxt_napi *bnapi) -{ -} - -static inline bool bnxt_lock_poll(struct bnxt_napi *bnapi) -{ - return false; -} - -static inline void bnxt_unlock_poll(struct bnxt_napi *bnapi) -{ -} - -static inline bool bnxt_busy_polling(struct bnxt_napi *bnapi) -{ - return false; -} - -static inline void bnxt_disable_poll(struct bnxt_napi *bnapi) -{ -} - -#endif - #define I2C_DEV_ADDR_A0 0xa0 #define I2C_DEV_ADDR_A2 0xa2 #define SFP_EEPROM_SFF_8472_COMP_ADDR 0x5e @@ -1238,7 +1200,23 @@ static inline void bnxt_disable_poll(struct bnxt_napi *bnapi) #define SFF_MODULE_ID_QSFP28 0x11 #define BNXT_MAX_PHY_I2C_RESP_SIZE 64 +static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr) +{ + /* Tell compiler to fetch tx indices from memory. */ + barrier(); + + return bp->tx_ring_size - + ((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask); +} + +extern const u16 bnxt_lhint_arr[]; + +int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, + u16 prod, gfp_t gfp); +void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data); +void bnxt_set_tpa_flags(struct bnxt *bp); void bnxt_set_ring_params(struct bnxt *); +int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode); void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16); int _hwrm_send_message(struct bnxt *, void *, u32, int); int hwrm_send_message(struct bnxt *, void *, u32, int); @@ -1246,6 +1224,7 @@ int hwrm_send_message_silent(struct bnxt *, void *, u32, int); int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap, int bmap_size); int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id); +int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings); int bnxt_hwrm_set_coal(struct bnxt *); unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp); void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max); @@ -1259,6 +1238,7 @@ int bnxt_hwrm_set_link_setting(struct bnxt *, bool, bool); int bnxt_hwrm_fw_set_time(struct bnxt *); int bnxt_open_nic(struct bnxt *, bool, bool); int bnxt_close_nic(struct bnxt *, bool, bool); +int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, int tcs, int tx_xdp); int bnxt_setup_mq_tc(struct net_device *dev, u8 tc); int bnxt_get_max_rings(struct bnxt *, int *, int *, bool); void bnxt_restore_pf_fw_resources(struct bnxt *bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 784aa77610bc..6903a873f072 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -357,7 +357,7 @@ static void bnxt_get_channels(struct net_device *dev, int max_rx_rings, max_tx_rings, tcs; bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, true); - channel->max_combined = max_t(int, max_rx_rings, max_tx_rings); + channel->max_combined = min_t(int, max_rx_rings, max_tx_rings); if (bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false)) { max_rx_rings = 0; @@ -387,9 +387,10 @@ static int bnxt_set_channels(struct net_device *dev, struct ethtool_channels *channel) { struct bnxt *bp = netdev_priv(dev); - int max_rx_rings, max_tx_rings, tcs; - u32 rc = 0; + int req_tx_rings, req_rx_rings, tcs; bool sh = false; + int tx_xdp = 0; + int rc = 0; if (channel->other_count) return -EINVAL; @@ -409,19 +410,22 @@ static int bnxt_set_channels(struct net_device *dev, if (channel->combined_count) sh = true; - bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, sh); - tcs = netdev_get_num_tc(dev); - if (tcs > 1) - max_tx_rings /= tcs; - - if (sh && - channel->combined_count > max_t(int, max_rx_rings, max_tx_rings)) - return -ENOMEM; - if (!sh && (channel->rx_count > max_rx_rings || - channel->tx_count > max_tx_rings)) - return -ENOMEM; + req_tx_rings = sh ? channel->combined_count : channel->tx_count; + req_rx_rings = sh ? channel->combined_count : channel->rx_count; + if (bp->tx_nr_rings_xdp) { + if (!sh) { + netdev_err(dev, "Only combined mode supported when XDP is enabled.\n"); + return -EINVAL; + } + tx_xdp = req_rx_rings; + } + rc = bnxt_reserve_rings(bp, req_tx_rings, req_rx_rings, tcs, tx_xdp); + if (rc) { + netdev_warn(dev, "Unable to allocate the requested rings\n"); + return rc; + } if (netif_running(dev)) { if (BNXT_PF(bp)) { @@ -439,19 +443,17 @@ static int bnxt_set_channels(struct net_device *dev, if (sh) { bp->flags |= BNXT_FLAG_SHARED_RINGS; - bp->rx_nr_rings = min_t(int, channel->combined_count, - max_rx_rings); - bp->tx_nr_rings_per_tc = min_t(int, channel->combined_count, - max_tx_rings); + bp->rx_nr_rings = channel->combined_count; + bp->tx_nr_rings_per_tc = channel->combined_count; } else { bp->flags &= ~BNXT_FLAG_SHARED_RINGS; bp->rx_nr_rings = channel->rx_count; bp->tx_nr_rings_per_tc = channel->tx_count; } - - bp->tx_nr_rings = bp->tx_nr_rings_per_tc; + bp->tx_nr_rings_xdp = tx_xdp; + bp->tx_nr_rings = bp->tx_nr_rings_per_tc + tx_xdp; if (tcs > 1) - bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs; + bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs + tx_xdp; bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : bp->tx_nr_rings + bp->rx_nr_rings; @@ -524,24 +526,49 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd) fltr_found: fkeys = &fltr->fkeys; - if (fkeys->basic.ip_proto == IPPROTO_TCP) - fs->flow_type = TCP_V4_FLOW; - else if (fkeys->basic.ip_proto == IPPROTO_UDP) - fs->flow_type = UDP_V4_FLOW; - else - goto fltr_err; + if (fkeys->basic.n_proto == htons(ETH_P_IP)) { + if (fkeys->basic.ip_proto == IPPROTO_TCP) + fs->flow_type = TCP_V4_FLOW; + else if (fkeys->basic.ip_proto == IPPROTO_UDP) + fs->flow_type = UDP_V4_FLOW; + else + goto fltr_err; - fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src; - fs->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0); + fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src; + fs->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0); - fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst; - fs->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0); + fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst; + fs->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0); + + fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src; + fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0); + + fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst; + fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0); + } else { + int i; - fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src; - fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0); + if (fkeys->basic.ip_proto == IPPROTO_TCP) + fs->flow_type = TCP_V6_FLOW; + else if (fkeys->basic.ip_proto == IPPROTO_UDP) + fs->flow_type = UDP_V6_FLOW; + else + goto fltr_err; + + *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6src[0] = + fkeys->addrs.v6addrs.src; + *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6dst[0] = + fkeys->addrs.v6addrs.dst; + for (i = 0; i < 4; i++) { + fs->m_u.tcp_ip6_spec.ip6src[i] = cpu_to_be32(~0); + fs->m_u.tcp_ip6_spec.ip6dst[i] = cpu_to_be32(~0); + } + fs->h_u.tcp_ip6_spec.psrc = fkeys->ports.src; + fs->m_u.tcp_ip6_spec.psrc = cpu_to_be16(~0); - fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst; - fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0); + fs->h_u.tcp_ip6_spec.pdst = fkeys->ports.dst; + fs->m_u.tcp_ip6_spec.pdst = cpu_to_be16(~0); + } fs->ring_cookie = fltr->rxq; rc = 0; @@ -893,7 +920,7 @@ u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause) static void bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info, struct ethtool_link_ksettings *lk_ksettings) { - u16 fw_speeds = link_info->auto_link_speeds; + u16 fw_speeds = link_info->advertising; u8 fw_pause = 0; if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) @@ -1090,8 +1117,9 @@ static int bnxt_set_link_ksettings(struct net_device *dev, struct bnxt *bp = netdev_priv(dev); struct bnxt_link_info *link_info = &bp->link_info; const struct ethtool_link_settings *base = &lk_ksettings->base; - u32 speed, fw_advertising = 0; bool set_pause = false; + u16 fw_advertising = 0; + u32 speed; int rc = 0; if (!BNXT_SINGLE_PF(bp)) @@ -1550,17 +1578,37 @@ static int bnxt_flash_package_from_file(struct net_device *dev, bnxt_hwrm_cmd_hdr_init(bp, &install, HWRM_NVM_INSTALL_UPDATE, -1, -1); install.install_type = cpu_to_le32(install_type); - rc = hwrm_send_message(bp, &install, sizeof(install), - INSTALL_PACKAGE_TIMEOUT); - if (rc) - return -EOPNOTSUPP; + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &install, sizeof(install), + INSTALL_PACKAGE_TIMEOUT); + if (rc) { + rc = -EOPNOTSUPP; + goto flash_pkg_exit; + } + + if (resp->error_code) { + u8 error_code = ((struct hwrm_err_output *)resp)->cmd_err; + + if (error_code == NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) { + install.flags |= cpu_to_le16( + NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG); + rc = _hwrm_send_message(bp, &install, sizeof(install), + INSTALL_PACKAGE_TIMEOUT); + if (rc) { + rc = -EOPNOTSUPP; + goto flash_pkg_exit; + } + } + } if (resp->result) { netdev_err(dev, "PKG install error = %d, problem_item = %d\n", (s8)resp->result, (int)resp->problem_item); - return -ENOPKG; + rc = -ENOPKG; } - return 0; +flash_pkg_exit: + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; } static int bnxt_flash_device(struct net_device *dev, @@ -2039,6 +2087,47 @@ static int bnxt_nway_reset(struct net_device *dev) return rc; } +static int bnxt_set_phys_id(struct net_device *dev, + enum ethtool_phys_id_state state) +{ + struct hwrm_port_led_cfg_input req = {0}; + struct bnxt *bp = netdev_priv(dev); + struct bnxt_pf_info *pf = &bp->pf; + struct bnxt_led_cfg *led_cfg; + u8 led_state; + __le16 duration; + int i, rc; + + if (!bp->num_leds || BNXT_VF(bp)) + return -EOPNOTSUPP; + + if (state == ETHTOOL_ID_ACTIVE) { + led_state = PORT_LED_CFG_REQ_LED0_STATE_BLINKALT; + duration = cpu_to_le16(500); + } else if (state == ETHTOOL_ID_INACTIVE) { + led_state = PORT_LED_CFG_REQ_LED1_STATE_DEFAULT; + duration = cpu_to_le16(0); + } else { + return -EINVAL; + } + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_CFG, -1, -1); + req.port_id = cpu_to_le16(pf->port_id); + req.num_leds = bp->num_leds; + led_cfg = (struct bnxt_led_cfg *)&req.led0_id; + for (i = 0; i < bp->num_leds; i++, led_cfg++) { + req.enables |= BNXT_LED_DFLT_ENABLES(i); + led_cfg->led_id = bp->leds[i].led_id; + led_cfg->led_state = led_state; + led_cfg->led_blink_on = duration; + led_cfg->led_blink_off = duration; + led_cfg->led_group_id = bp->leds[i].led_group_id; + } + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) + rc = -EIO; + return rc; +} + const struct ethtool_ops bnxt_ethtool_ops = { .get_link_ksettings = bnxt_get_link_ksettings, .set_link_ksettings = bnxt_set_link_ksettings, @@ -2070,5 +2159,6 @@ const struct ethtool_ops bnxt_ethtool_ops = { .set_eee = bnxt_set_eee, .get_module_info = bnxt_get_module_info, .get_module_eeprom = bnxt_get_module_eeprom, - .nway_reset = bnxt_nway_reset + .nway_reset = bnxt_nway_reset, + .set_phys_id = bnxt_set_phys_id, }; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h index 3abc03b60dbc..ed1e555292e9 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h @@ -10,6 +10,29 @@ #ifndef BNXT_ETHTOOL_H #define BNXT_ETHTOOL_H +struct bnxt_led_cfg { + u8 led_id; + u8 led_state; + u8 led_color; + u8 unused; + __le16 led_blink_on; + __le16 led_blink_off; + u8 led_group_id; + u8 rsvd; +}; + +#define BNXT_LED_DFLT_ENA \ + (PORT_LED_CFG_REQ_ENABLES_LED0_ID | \ + PORT_LED_CFG_REQ_ENABLES_LED0_STATE | \ + PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_ON | \ + PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_OFF | \ + PORT_LED_CFG_REQ_ENABLES_LED0_GROUP_ID) + +#define BNXT_LED_DFLT_ENA_SHIFT 6 + +#define BNXT_LED_DFLT_ENABLES(x) \ + cpu_to_le32(BNXT_LED_DFLT_ENA << (BNXT_LED_DFLT_ENA_SHIFT * (x))) + extern const struct ethtool_ops bnxt_ethtool_ops; u32 _bnxt_fw_to_ethtool_adv_spds(u16, u8); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h index 2ddfa51519a1..6e275c23d68b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h @@ -1,7 +1,7 @@ /* Broadcom NetXtreme-C/E network driver. * * Copyright (c) 2014-2016 Broadcom Corporation - * Copyright (c) 2016 Broadcom Limited + * Copyright (c) 2016-2017 Broadcom Limited * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -11,12 +11,12 @@ #ifndef BNXT_HSI_H #define BNXT_HSI_H -/* HSI and HWRM Specification 1.6.0 */ +/* HSI and HWRM Specification 1.7.0 */ #define HWRM_VERSION_MAJOR 1 -#define HWRM_VERSION_MINOR 6 +#define HWRM_VERSION_MINOR 7 #define HWRM_VERSION_UPDATE 0 -#define HWRM_VERSION_STR "1.6.0" +#define HWRM_VERSION_STR "1.7.0" /* * Following is the signature for HWRM message field that indicates not * applicable (All F's). Need to cast it the size of the field if needed. @@ -549,6 +549,8 @@ struct hwrm_ver_get_output { __le32 dev_caps_cfg; #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED 0x1UL #define VER_GET_RESP_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED 0x2UL + #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED 0x4UL + #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED 0x8UL u8 roce_fw_maj; u8 roce_fw_min; u8 roce_fw_bld; @@ -832,20 +834,32 @@ struct hwrm_func_qcfg_output { __le32 min_bw; #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_MASK 0xfffffffUL #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_SFT 0 - #define FUNC_QCFG_RESP_MIN_BW_RSVD 0x10000000UL + #define FUNC_QCFG_RESP_MIN_BW_SCALE 0x10000000UL + #define FUNC_QCFG_RESP_MIN_BW_SCALE_BITS (0x0UL << 28) + #define FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define FUNC_QCFG_RESP_MIN_BW_SCALE_LAST FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID __le32 max_bw; #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_MASK 0xfffffffUL #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_SFT 0 - #define FUNC_QCFG_RESP_MAX_BW_RSVD 0x10000000UL + #define FUNC_QCFG_RESP_MAX_BW_SCALE 0x10000000UL + #define FUNC_QCFG_RESP_MAX_BW_SCALE_BITS (0x0UL << 28) + #define FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define FUNC_QCFG_RESP_MAX_BW_SCALE_LAST FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID @@ -921,20 +935,32 @@ struct hwrm_func_cfg_input { __le32 min_bw; #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_MASK 0xfffffffUL #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_SFT 0 - #define FUNC_CFG_REQ_MIN_BW_RSVD 0x10000000UL + #define FUNC_CFG_REQ_MIN_BW_SCALE 0x10000000UL + #define FUNC_CFG_REQ_MIN_BW_SCALE_BITS (0x0UL << 28) + #define FUNC_CFG_REQ_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define FUNC_CFG_REQ_MIN_BW_SCALE_LAST FUNC_CFG_REQ_MIN_BW_SCALE_BYTES #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID __le32 max_bw; #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_SFT 0 - #define FUNC_CFG_REQ_MAX_BW_RSVD 0x10000000UL + #define FUNC_CFG_REQ_MAX_BW_SCALE 0x10000000UL + #define FUNC_CFG_REQ_MAX_BW_SCALE_BITS (0x0UL << 28) + #define FUNC_CFG_REQ_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define FUNC_CFG_REQ_MAX_BW_SCALE_LAST FUNC_CFG_REQ_MAX_BW_SCALE_BYTES #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID @@ -1529,6 +1555,20 @@ struct hwrm_port_phy_qcfg_output { #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASET 0x8UL #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE 0x9UL #define PORT_PHY_QCFG_RESP_PHY_TYPE_SGMIIEXTPHY 0xaUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_L 0xbUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_S 0xcUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_N 0xdUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASESR 0xeUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR4 0xfUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR4 0x10UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR4 0x11UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER4 0x12UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR10 0x13UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASECR4 0x14UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASESR4 0x15UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASELR4 0x16UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASEER4 0x17UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_ACTIVE_CABLE 0x18UL u8 media_type; #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP 0x1UL @@ -1919,6 +1959,219 @@ struct hwrm_port_phy_i2c_read_output { u8 valid; }; +/* hwrm_port_led_cfg */ +/* Input (64 bytes) */ +struct hwrm_port_led_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define PORT_LED_CFG_REQ_ENABLES_LED0_ID 0x1UL + #define PORT_LED_CFG_REQ_ENABLES_LED0_STATE 0x2UL + #define PORT_LED_CFG_REQ_ENABLES_LED0_COLOR 0x4UL + #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_ON 0x8UL + #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_OFF 0x10UL + #define PORT_LED_CFG_REQ_ENABLES_LED0_GROUP_ID 0x20UL + #define PORT_LED_CFG_REQ_ENABLES_LED1_ID 0x40UL + #define PORT_LED_CFG_REQ_ENABLES_LED1_STATE 0x80UL + #define PORT_LED_CFG_REQ_ENABLES_LED1_COLOR 0x100UL + #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_ON 0x200UL + #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_OFF 0x400UL + #define PORT_LED_CFG_REQ_ENABLES_LED1_GROUP_ID 0x800UL + #define PORT_LED_CFG_REQ_ENABLES_LED2_ID 0x1000UL + #define PORT_LED_CFG_REQ_ENABLES_LED2_STATE 0x2000UL + #define PORT_LED_CFG_REQ_ENABLES_LED2_COLOR 0x4000UL + #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_ON 0x8000UL + #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_OFF 0x10000UL + #define PORT_LED_CFG_REQ_ENABLES_LED2_GROUP_ID 0x20000UL + #define PORT_LED_CFG_REQ_ENABLES_LED3_ID 0x40000UL + #define PORT_LED_CFG_REQ_ENABLES_LED3_STATE 0x80000UL + #define PORT_LED_CFG_REQ_ENABLES_LED3_COLOR 0x100000UL + #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_ON 0x200000UL + #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_OFF 0x400000UL + #define PORT_LED_CFG_REQ_ENABLES_LED3_GROUP_ID 0x800000UL + __le16 port_id; + u8 num_leds; + u8 rsvd; + u8 led0_id; + u8 led0_state; + #define PORT_LED_CFG_REQ_LED0_STATE_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED0_STATE_OFF 0x1UL + #define PORT_LED_CFG_REQ_LED0_STATE_ON 0x2UL + #define PORT_LED_CFG_REQ_LED0_STATE_BLINK 0x3UL + #define PORT_LED_CFG_REQ_LED0_STATE_BLINKALT 0x4UL + u8 led0_color; + #define PORT_LED_CFG_REQ_LED0_COLOR_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED0_COLOR_AMBER 0x1UL + #define PORT_LED_CFG_REQ_LED0_COLOR_GREEN 0x2UL + #define PORT_LED_CFG_REQ_LED0_COLOR_GREENAMBER 0x3UL + u8 unused_0; + __le16 led0_blink_on; + __le16 led0_blink_off; + u8 led0_group_id; + u8 rsvd0; + u8 led1_id; + u8 led1_state; + #define PORT_LED_CFG_REQ_LED1_STATE_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED1_STATE_OFF 0x1UL + #define PORT_LED_CFG_REQ_LED1_STATE_ON 0x2UL + #define PORT_LED_CFG_REQ_LED1_STATE_BLINK 0x3UL + #define PORT_LED_CFG_REQ_LED1_STATE_BLINKALT 0x4UL + u8 led1_color; + #define PORT_LED_CFG_REQ_LED1_COLOR_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED1_COLOR_AMBER 0x1UL + #define PORT_LED_CFG_REQ_LED1_COLOR_GREEN 0x2UL + #define PORT_LED_CFG_REQ_LED1_COLOR_GREENAMBER 0x3UL + u8 unused_1; + __le16 led1_blink_on; + __le16 led1_blink_off; + u8 led1_group_id; + u8 rsvd1; + u8 led2_id; + u8 led2_state; + #define PORT_LED_CFG_REQ_LED2_STATE_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED2_STATE_OFF 0x1UL + #define PORT_LED_CFG_REQ_LED2_STATE_ON 0x2UL + #define PORT_LED_CFG_REQ_LED2_STATE_BLINK 0x3UL + #define PORT_LED_CFG_REQ_LED2_STATE_BLINKALT 0x4UL + u8 led2_color; + #define PORT_LED_CFG_REQ_LED2_COLOR_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED2_COLOR_AMBER 0x1UL + #define PORT_LED_CFG_REQ_LED2_COLOR_GREEN 0x2UL + #define PORT_LED_CFG_REQ_LED2_COLOR_GREENAMBER 0x3UL + u8 unused_2; + __le16 led2_blink_on; + __le16 led2_blink_off; + u8 led2_group_id; + u8 rsvd2; + u8 led3_id; + u8 led3_state; + #define PORT_LED_CFG_REQ_LED3_STATE_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED3_STATE_OFF 0x1UL + #define PORT_LED_CFG_REQ_LED3_STATE_ON 0x2UL + #define PORT_LED_CFG_REQ_LED3_STATE_BLINK 0x3UL + #define PORT_LED_CFG_REQ_LED3_STATE_BLINKALT 0x4UL + u8 led3_color; + #define PORT_LED_CFG_REQ_LED3_COLOR_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED3_COLOR_AMBER 0x1UL + #define PORT_LED_CFG_REQ_LED3_COLOR_GREEN 0x2UL + #define PORT_LED_CFG_REQ_LED3_COLOR_GREENAMBER 0x3UL + u8 unused_3; + __le16 led3_blink_on; + __le16 led3_blink_off; + u8 led3_group_id; + u8 rsvd3; +}; + +/* Output (16 bytes) */ +struct hwrm_port_led_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_port_led_qcaps */ +/* Input (24 bytes) */ +struct hwrm_port_led_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + __le16 unused_0[3]; +}; + +/* Output (48 bytes) */ +struct hwrm_port_led_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 num_leds; + u8 unused_0[3]; + u8 led0_id; + u8 led0_type; + #define PORT_LED_QCAPS_RESP_LED0_TYPE_SPEED 0x0UL + #define PORT_LED_QCAPS_RESP_LED0_TYPE_ACTIVITY 0x1UL + #define PORT_LED_QCAPS_RESP_LED0_TYPE_INVALID 0xffUL + u8 led0_group_id; + u8 unused_1; + __le16 led0_state_caps; + #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ENABLED 0x1UL + #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_OFF_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ON_SUPPORTED 0x4UL + #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_SUPPORTED 0x8UL + #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL + __le16 led0_color_caps; + #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_RSVD 0x1UL + #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_AMBER_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_GREEN_SUPPORTED 0x4UL + u8 led1_id; + u8 led1_type; + #define PORT_LED_QCAPS_RESP_LED1_TYPE_SPEED 0x0UL + #define PORT_LED_QCAPS_RESP_LED1_TYPE_ACTIVITY 0x1UL + #define PORT_LED_QCAPS_RESP_LED1_TYPE_INVALID 0xffUL + u8 led1_group_id; + u8 unused_2; + __le16 led1_state_caps; + #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ENABLED 0x1UL + #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_OFF_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ON_SUPPORTED 0x4UL + #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_SUPPORTED 0x8UL + #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL + __le16 led1_color_caps; + #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_RSVD 0x1UL + #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_AMBER_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_GREEN_SUPPORTED 0x4UL + u8 led2_id; + u8 led2_type; + #define PORT_LED_QCAPS_RESP_LED2_TYPE_SPEED 0x0UL + #define PORT_LED_QCAPS_RESP_LED2_TYPE_ACTIVITY 0x1UL + #define PORT_LED_QCAPS_RESP_LED2_TYPE_INVALID 0xffUL + u8 led2_group_id; + u8 unused_3; + __le16 led2_state_caps; + #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ENABLED 0x1UL + #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_OFF_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ON_SUPPORTED 0x4UL + #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_SUPPORTED 0x8UL + #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL + __le16 led2_color_caps; + #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_RSVD 0x1UL + #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_AMBER_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_GREEN_SUPPORTED 0x4UL + u8 led3_id; + u8 led3_type; + #define PORT_LED_QCAPS_RESP_LED3_TYPE_SPEED 0x0UL + #define PORT_LED_QCAPS_RESP_LED3_TYPE_ACTIVITY 0x1UL + #define PORT_LED_QCAPS_RESP_LED3_TYPE_INVALID 0xffUL + u8 led3_group_id; + u8 unused_4; + __le16 led3_state_caps; + #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ENABLED 0x1UL + #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_OFF_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ON_SUPPORTED 0x4UL + #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_SUPPORTED 0x8UL + #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL + __le16 led3_color_caps; + #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_RSVD 0x1UL + #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_AMBER_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_GREEN_SUPPORTED 0x4UL + u8 unused_5; + u8 unused_6; + u8 unused_7; + u8 valid; +}; + /* hwrm_queue_qportcfg */ /* Input (24 bytes) */ struct hwrm_queue_qportcfg_input { @@ -2216,20 +2469,32 @@ struct hwrm_queue_cos2bw_qcfg_output { __le32 queue_id0_min_bw; #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_RSVD 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID __le32 queue_id0_max_bw; #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_RSVD 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID @@ -2244,20 +2509,32 @@ struct hwrm_queue_cos2bw_qcfg_output { __le32 queue_id1_min_bw; #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_RSVD 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BYTES #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID __le32 queue_id1_max_bw; #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_RSVD 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BYTES #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID @@ -2272,20 +2549,32 @@ struct hwrm_queue_cos2bw_qcfg_output { __le32 queue_id2_min_bw; #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_RSVD 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BYTES #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID __le32 queue_id2_max_bw; #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_RSVD 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BYTES #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID @@ -2300,20 +2589,32 @@ struct hwrm_queue_cos2bw_qcfg_output { __le32 queue_id3_min_bw; #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_RSVD 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BYTES #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID __le32 queue_id3_max_bw; #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_RSVD 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BYTES #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID @@ -2328,20 +2629,32 @@ struct hwrm_queue_cos2bw_qcfg_output { __le32 queue_id4_min_bw; #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_RSVD 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BYTES #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID __le32 queue_id4_max_bw; #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_RSVD 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BYTES #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID @@ -2356,20 +2669,32 @@ struct hwrm_queue_cos2bw_qcfg_output { __le32 queue_id5_min_bw; #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_RSVD 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BYTES #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID __le32 queue_id5_max_bw; #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_RSVD 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BYTES #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID @@ -2384,20 +2709,32 @@ struct hwrm_queue_cos2bw_qcfg_output { __le32 queue_id6_min_bw; #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_RSVD 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BYTES #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID __le32 queue_id6_max_bw; #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_RSVD 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BYTES #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID @@ -2412,20 +2749,32 @@ struct hwrm_queue_cos2bw_qcfg_output { __le32 queue_id7_min_bw; #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_RSVD 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BYTES #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID __le32 queue_id7_max_bw; #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_RSVD 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BYTES #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID @@ -2467,20 +2816,32 @@ struct hwrm_queue_cos2bw_cfg_input { __le32 queue_id0_min_bw; #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_RSVD 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID __le32 queue_id0_max_bw; #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_RSVD 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID @@ -2495,20 +2856,32 @@ struct hwrm_queue_cos2bw_cfg_input { __le32 queue_id1_min_bw; #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_RSVD 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BYTES #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID __le32 queue_id1_max_bw; #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_RSVD 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BYTES #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID @@ -2523,20 +2896,32 @@ struct hwrm_queue_cos2bw_cfg_input { __le32 queue_id2_min_bw; #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_RSVD 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BYTES #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID __le32 queue_id2_max_bw; #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_RSVD 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BYTES #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID @@ -2551,20 +2936,32 @@ struct hwrm_queue_cos2bw_cfg_input { __le32 queue_id3_min_bw; #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_RSVD 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BYTES #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID __le32 queue_id3_max_bw; #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_RSVD 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BYTES #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID @@ -2579,20 +2976,32 @@ struct hwrm_queue_cos2bw_cfg_input { __le32 queue_id4_min_bw; #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_RSVD 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BYTES #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID __le32 queue_id4_max_bw; #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_RSVD 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BYTES #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID @@ -2607,20 +3016,32 @@ struct hwrm_queue_cos2bw_cfg_input { __le32 queue_id5_min_bw; #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_RSVD 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BYTES #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID __le32 queue_id5_max_bw; #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_RSVD 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BYTES #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID @@ -2635,20 +3056,32 @@ struct hwrm_queue_cos2bw_cfg_input { __le32 queue_id6_min_bw; #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_RSVD 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BYTES #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID __le32 queue_id6_max_bw; #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_RSVD 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BYTES #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID @@ -2663,20 +3096,32 @@ struct hwrm_queue_cos2bw_cfg_input { __le32 queue_id7_min_bw; #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_RSVD 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BYTES #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID __le32 queue_id7_max_bw; #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_RSVD 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BYTES #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID @@ -2797,6 +3242,41 @@ struct hwrm_vnic_cfg_output { u8 valid; }; +/* hwrm_vnic_qcaps */ +/* Input (24 bytes) */ +struct hwrm_vnic_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + __le32 unused_0; +}; + +/* Output (24 bytes) */ +struct hwrm_vnic_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 mru; + u8 unused_0; + u8 unused_1; + __le32 flags; + #define VNIC_QCAPS_RESP_FLAGS_UNUSED 0x1UL + #define VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP 0x2UL + #define VNIC_QCAPS_RESP_FLAGS_BD_STALL_CAP 0x4UL + #define VNIC_QCAPS_RESP_FLAGS_ROCE_DUAL_VNIC_CAP 0x8UL + #define VNIC_QCAPS_RESP_FLAGS_ROCE_ONLY_VNIC_CAP 0x10UL + #define VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP 0x20UL + __le32 unused_2; + u8 unused_3; + u8 unused_4; + u8 unused_5; + u8 valid; +}; + /* hwrm_vnic_tpa_cfg */ /* Input (40 bytes) */ struct hwrm_vnic_tpa_cfg_input { @@ -2992,9 +3472,10 @@ struct hwrm_ring_alloc_input { #define RING_ALLOC_REQ_ENABLES_RESERVED4 0x10UL #define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL u8 ring_type; - #define RING_ALLOC_REQ_RING_TYPE_CMPL 0x0UL + #define RING_ALLOC_REQ_RING_TYPE_L2_CMPL 0x0UL #define RING_ALLOC_REQ_RING_TYPE_TX 0x1UL #define RING_ALLOC_REQ_RING_TYPE_RX 0x2UL + #define RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL 0x3UL u8 unused_0; __le16 unused_1; __le64 page_tbl_addr; @@ -3028,10 +3509,16 @@ struct hwrm_ring_alloc_input { __le32 max_bw; #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_SFT 0 - #define RING_ALLOC_REQ_MAX_BW_RSVD 0x10000000UL + #define RING_ALLOC_REQ_MAX_BW_SCALE 0x10000000UL + #define RING_ALLOC_REQ_MAX_BW_SCALE_BITS (0x0UL << 28) + #define RING_ALLOC_REQ_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define RING_ALLOC_REQ_MAX_BW_SCALE_LAST RING_ALLOC_REQ_MAX_BW_SCALE_BYTES #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29) + #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_LAST RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID @@ -3066,9 +3553,10 @@ struct hwrm_ring_free_input { __le16 target_id; __le64 resp_addr; u8 ring_type; - #define RING_FREE_REQ_RING_TYPE_CMPL 0x0UL + #define RING_FREE_REQ_RING_TYPE_L2_CMPL 0x0UL #define RING_FREE_REQ_RING_TYPE_TX 0x1UL #define RING_FREE_REQ_RING_TYPE_RX 0x2UL + #define RING_FREE_REQ_RING_TYPE_ROCE_CMPL 0x3UL u8 unused_0; __le16 ring_id; __le32 unused_1; @@ -3166,9 +3654,10 @@ struct hwrm_ring_reset_input { __le16 target_id; __le64 resp_addr; u8 ring_type; - #define RING_RESET_REQ_RING_TYPE_CMPL 0x0UL + #define RING_RESET_REQ_RING_TYPE_L2_CMPL 0x0UL #define RING_RESET_REQ_RING_TYPE_TX 0x1UL #define RING_RESET_REQ_RING_TYPE_RX 0x2UL + #define RING_RESET_REQ_RING_TYPE_ROCE_CMPL 0x3UL u8 unused_0; __le16 ring_id; __le32 unused_1; @@ -3597,6 +4086,7 @@ struct hwrm_cfa_ntuple_filter_alloc_input { __le32 flags; #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP 0x2UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_METER 0x4UL __le32 enables; #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x2UL @@ -3697,7 +4187,7 @@ struct hwrm_cfa_ntuple_filter_free_output { }; /* hwrm_cfa_ntuple_filter_cfg */ -/* Input (40 bytes) */ +/* Input (48 bytes) */ struct hwrm_cfa_ntuple_filter_cfg_input { __le16 req_type; __le16 cmpl_ring; @@ -3707,10 +4197,14 @@ struct hwrm_cfa_ntuple_filter_cfg_input { __le32 enables; #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_DST_ID 0x1UL #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL + #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_METER_INSTANCE_ID 0x4UL __le32 unused_0; __le64 ntuple_filter_id; __le32 new_dst_id; __le32 new_mirror_vnic_id; + __le16 new_meter_instance_id; + #define CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_INVALID 0xffffUL + __le16 unused_1[3]; }; /* Output (16 bytes) */ @@ -4058,9 +4552,7 @@ struct hwrm_fw_set_structured_data_input { __le64 src_data_addr; __le16 data_len; u8 hdr_cnt; - u8 unused_0; - __le16 port_id; - __le16 unused_1; + u8 unused_0[5]; }; /* Output (16 bytes) */ @@ -4077,7 +4569,7 @@ struct hwrm_fw_set_structured_data_output { }; /* hwrm_fw_get_structured_data */ -/* Input (40 bytes) */ +/* Input (32 bytes) */ struct hwrm_fw_get_structured_data_input { __le16 req_type; __le16 cmpl_ring; @@ -4095,10 +4587,9 @@ struct hwrm_fw_get_structured_data_input { #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_ADMIN 0x200UL #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_PEER 0x201UL #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_OPERATIONAL 0x202UL + #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_HOST_OPERATIONAL 0x300UL u8 count; u8 unused_0; - __le16 port_id; - __le16 unused_1[3]; }; /* Output (16 bytes) */ @@ -4582,7 +5073,11 @@ struct hwrm_nvm_install_update_input { __le32 install_type; #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_NORMAL 0x0UL #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_ALL 0xffffffffUL - __le32 unused_0; + __le16 flags; + #define NVM_INSTALL_UPDATE_REQ_FLAGS_ERASE_UNUSED_SPACE 0x1UL + #define NVM_INSTALL_UPDATE_REQ_FLAGS_REMOVE_UNUSED_PKG 0x2UL + #define NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG 0x4UL + __le16 unused_0; }; /* Output (24 bytes) */ @@ -4608,6 +5103,15 @@ struct hwrm_nvm_install_update_output { u8 valid; }; +/* Command specific Error Codes (8 bytes) */ +struct hwrm_nvm_install_update_cmd_err { + u8 code; + #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN 0x0UL + #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR 0x1UL + #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE 0x2UL + u8 unused_0[7]; +}; + /* Hardware Resource Manager Specification */ /* Input (16 bytes) */ struct input { @@ -4735,11 +5239,26 @@ struct cmd_nums { #define HWRM_WOL_FILTER_FREE (0xf1UL) #define HWRM_WOL_FILTER_QCFG (0xf2UL) #define HWRM_WOL_REASON_QCFG (0xf3UL) + #define HWRM_CFA_METER_PROFILE_ALLOC (0xf5UL) + #define HWRM_CFA_METER_PROFILE_FREE (0xf6UL) + #define HWRM_CFA_METER_PROFILE_CFG (0xf7UL) + #define HWRM_CFA_METER_INSTANCE_ALLOC (0xf8UL) + #define HWRM_CFA_METER_INSTANCE_FREE (0xf9UL) + #define HWRM_CFA_VF_PAIR_ALLOC (0x100UL) + #define HWRM_CFA_VF_PAIR_FREE (0x101UL) + #define HWRM_CFA_VF_PAIR_INFO (0x102UL) + #define HWRM_CFA_FLOW_ALLOC (0x103UL) + #define HWRM_CFA_FLOW_FREE (0x104UL) + #define HWRM_CFA_FLOW_FLUSH (0x105UL) + #define HWRM_CFA_FLOW_STATS (0x106UL) + #define HWRM_CFA_FLOW_INFO (0x107UL) #define HWRM_DBG_READ_DIRECT (0xff10UL) #define HWRM_DBG_READ_INDIRECT (0xff11UL) #define HWRM_DBG_WRITE_DIRECT (0xff12UL) #define HWRM_DBG_WRITE_INDIRECT (0xff13UL) #define HWRM_DBG_DUMP (0xff14UL) + #define HWRM_NVM_VALIDATE_OPTION (0xffefUL) + #define HWRM_NVM_FLUSH (0xfff0UL) #define HWRM_NVM_GET_VARIABLE (0xfff1UL) #define HWRM_NVM_SET_VARIABLE (0xfff2UL) #define HWRM_NVM_INSTALL_UPDATE (0xfff3UL) @@ -4939,12 +5458,13 @@ struct ctx_hw_stats { struct hwrm_struct_hdr { __le16 struct_id; #define STRUCT_HDR_STRUCT_ID_LLDP_CFG 0x41bUL - #define STRUCT_HDR_STRUCT_ID_DCBX_ETS_CFG 0x41dUL - #define STRUCT_HDR_STRUCT_ID_DCBX_PFC_CFG 0x41fUL - #define STRUCT_HDR_STRUCT_ID_DCBX_APP_CFG 0x421UL - #define STRUCT_HDR_STRUCT_ID_DCBX_STATE_CFG 0x422UL - #define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC_CFG 0x424UL - #define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE_CFG 0x426UL + #define STRUCT_HDR_STRUCT_ID_DCBX_ETS 0x41dUL + #define STRUCT_HDR_STRUCT_ID_DCBX_PFC 0x41fUL + #define STRUCT_HDR_STRUCT_ID_DCBX_APP 0x421UL + #define STRUCT_HDR_STRUCT_ID_DCBX_FEATURE_STATE 0x422UL + #define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC 0x424UL + #define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE 0x426UL + #define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION 0xaUL __le16 len; u8 version; u8 count; @@ -4954,14 +5474,14 @@ struct hwrm_struct_hdr { __le16 unused_0[3]; }; -/* DCBX Application configuration structure (8 bytes) */ -struct hwrm_struct_data_dcbx_app_cfg { - __le16 protocol_id; +/* DCBX Application configuration structure (1057) (8 bytes) */ +struct hwrm_struct_data_dcbx_app { + __be16 protocol_id; u8 protocol_selector; - #define STRUCT_DATA_DCBX_APP_CFG_PROTOCOL_SELECTOR_ETHER_TYPE 0x1UL - #define STRUCT_DATA_DCBX_APP_CFG_PROTOCOL_SELECTOR_TCP_PORT 0x2UL - #define STRUCT_DATA_DCBX_APP_CFG_PROTOCOL_SELECTOR_UDP_PORT 0x3UL - #define STRUCT_DATA_DCBX_APP_CFG_PROTOCOL_SELECTOR_TCP_UDP_PORT 0x4UL + #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_ETHER_TYPE 0x1UL + #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_PORT 0x2UL + #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_UDP_PORT 0x3UL + #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_UDP_PORT 0x4UL u8 priority; u8 valid; u8 unused_0[3]; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index c69602508666..0b8cd7443843 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -15,6 +15,7 @@ #include <linux/etherdevice.h> #include "bnxt_hsi.h" #include "bnxt.h" +#include "bnxt_ulp.h" #include "bnxt_sriov.h" #include "bnxt_ethtool.h" @@ -416,6 +417,7 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs) u16 vf_ring_grps; struct hwrm_func_cfg_input req = {0}; struct bnxt_pf_info *pf = &bp->pf; + int total_vf_tx_rings = 0; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); @@ -429,6 +431,8 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs) vf_rx_rings = (pf->max_rx_rings - bp->rx_nr_rings) / num_vfs; vf_ring_grps = (bp->pf.max_hw_ring_grps - bp->rx_nr_rings) / num_vfs; vf_tx_rings = (pf->max_tx_rings - bp->tx_nr_rings) / num_vfs; + vf_vnics = (pf->max_vnics - bp->nr_vnics) / num_vfs; + vf_vnics = min_t(u16, vf_vnics, vf_rx_rings); req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU | FUNC_CFG_REQ_ENABLES_MRU | @@ -451,7 +455,6 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs) req.num_rx_rings = cpu_to_le16(vf_rx_rings); req.num_hw_ring_grps = cpu_to_le16(vf_ring_grps); req.num_l2_ctxs = cpu_to_le16(4); - vf_vnics = 1; req.num_vnics = cpu_to_le16(vf_vnics); /* FIXME spec currently uses 1 bit for stats ctx */ @@ -459,6 +462,8 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs) mutex_lock(&bp->hwrm_cmd_lock); for (i = 0; i < num_vfs; i++) { + int vf_tx_rsvd = vf_tx_rings; + req.fid = cpu_to_le16(pf->first_vf_id + i); rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); @@ -466,10 +471,15 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs) break; pf->active_vfs = i + 1; pf->vf[i].fw_fid = le16_to_cpu(req.fid); + rc = __bnxt_hwrm_get_tx_rings(bp, pf->vf[i].fw_fid, + &vf_tx_rsvd); + if (rc) + break; + total_vf_tx_rings += vf_tx_rsvd; } mutex_unlock(&bp->hwrm_cmd_lock); if (!rc) { - pf->max_tx_rings -= vf_tx_rings * num_vfs; + pf->max_tx_rings -= total_vf_tx_rings; pf->max_rx_rings -= vf_rx_rings * num_vfs; pf->max_hw_ring_grps -= vf_ring_grps * num_vfs; pf->max_cp_rings -= vf_cp_rings * num_vfs; @@ -506,6 +516,8 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs) min_rx_rings) rx_ok = 1; } + if (bp->pf.max_vnics - bp->nr_vnics < min_rx_rings) + rx_ok = 0; if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings) tx_ok = 1; @@ -544,6 +556,8 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs) if (rc) goto err_out2; + bnxt_ulp_sriov_cfg(bp, *num_vfs); + rc = pci_enable_sriov(bp->pdev, *num_vfs); if (rc) goto err_out2; @@ -585,6 +599,8 @@ void bnxt_sriov_disable(struct bnxt *bp) rtnl_lock(); bnxt_restore_pf_fw_resources(bp); rtnl_unlock(); + + bnxt_ulp_sriov_cfg(bp, 0); } int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c new file mode 100644 index 000000000000..899c30fb5188 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c @@ -0,0 +1,240 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2016-2017 Broadcom Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/pci.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/if_vlan.h> +#include <linux/bpf.h> +#include <linux/bpf_trace.h> +#include <linux/filter.h> +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "bnxt_xdp.h" + +static void bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr, + dma_addr_t mapping, u32 len, u16 rx_prod) +{ + struct bnxt_sw_tx_bd *tx_buf; + struct tx_bd_ext *txbd1; + struct tx_bd *txbd; + u32 flags; + u16 prod; + + prod = txr->tx_prod; + tx_buf = &txr->tx_buf_ring[prod]; + tx_buf->rx_prod = rx_prod; + + txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; + flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD | + (2 << TX_BD_FLAGS_BD_CNT_SHIFT) | TX_BD_FLAGS_COAL_NOW | + TX_BD_FLAGS_PACKET_END | bnxt_lhint_arr[len >> 9]; + txbd->tx_bd_len_flags_type = cpu_to_le32(flags); + txbd->tx_bd_opaque = prod; + txbd->tx_bd_haddr = cpu_to_le64(mapping); + + prod = NEXT_TX(prod); + txbd1 = (struct tx_bd_ext *) + &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; + + txbd1->tx_bd_hsize_lflags = cpu_to_le32(0); + txbd1->tx_bd_mss = cpu_to_le32(0); + txbd1->tx_bd_cfa_action = cpu_to_le32(0); + txbd1->tx_bd_cfa_meta = cpu_to_le32(0); + + prod = NEXT_TX(prod); + txr->tx_prod = prod; +} + +void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts) +{ + struct bnxt_tx_ring_info *txr = bnapi->tx_ring; + struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; + struct bnxt_sw_tx_bd *tx_buf; + u16 tx_cons = txr->tx_cons; + u16 last_tx_cons = tx_cons; + u16 rx_prod; + int i; + + for (i = 0; i < nr_pkts; i++) { + last_tx_cons = tx_cons; + tx_cons = NEXT_TX(tx_cons); + tx_cons = NEXT_TX(tx_cons); + } + txr->tx_cons = tx_cons; + if (bnxt_tx_avail(bp, txr) == bp->tx_ring_size) { + rx_prod = rxr->rx_prod; + } else { + tx_buf = &txr->tx_buf_ring[last_tx_cons]; + rx_prod = tx_buf->rx_prod; + } + writel(DB_KEY_RX | rx_prod, rxr->rx_doorbell); +} + +/* returns the following: + * true - packet consumed by XDP and new buffer is allocated. + * false - packet should be passed to the stack. + */ +bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, + struct page *page, u8 **data_ptr, unsigned int *len, u8 *event) +{ + struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog); + struct bnxt_tx_ring_info *txr; + struct bnxt_sw_rx_bd *rx_buf; + struct pci_dev *pdev; + struct xdp_buff xdp; + dma_addr_t mapping; + void *orig_data; + u32 tx_avail; + u32 offset; + u32 act; + + if (!xdp_prog) + return false; + + pdev = bp->pdev; + txr = rxr->bnapi->tx_ring; + rx_buf = &rxr->rx_buf_ring[cons]; + offset = bp->rx_offset; + + xdp.data_hard_start = *data_ptr - offset; + xdp.data = *data_ptr; + xdp.data_end = *data_ptr + *len; + orig_data = xdp.data; + mapping = rx_buf->mapping - bp->rx_dma_offset; + + dma_sync_single_for_cpu(&pdev->dev, mapping + offset, *len, bp->rx_dir); + + rcu_read_lock(); + act = bpf_prog_run_xdp(xdp_prog, &xdp); + rcu_read_unlock(); + + tx_avail = bnxt_tx_avail(bp, txr); + /* If the tx ring is not full, we must not update the rx producer yet + * because we may still be transmitting on some BDs. + */ + if (tx_avail != bp->tx_ring_size) + *event &= ~BNXT_RX_EVENT; + + if (orig_data != xdp.data) { + offset = xdp.data - xdp.data_hard_start; + *data_ptr = xdp.data_hard_start + offset; + *len = xdp.data_end - xdp.data; + } + switch (act) { + case XDP_PASS: + return false; + + case XDP_TX: + if (tx_avail < 2) { + trace_xdp_exception(bp->dev, xdp_prog, act); + bnxt_reuse_rx_data(rxr, cons, page); + return true; + } + + *event = BNXT_TX_EVENT; + dma_sync_single_for_device(&pdev->dev, mapping + offset, *len, + bp->rx_dir); + bnxt_xmit_xdp(bp, txr, mapping + offset, *len, + NEXT_RX(rxr->rx_prod)); + bnxt_reuse_rx_data(rxr, cons, page); + return true; + default: + bpf_warn_invalid_xdp_action(act); + /* Fall thru */ + case XDP_ABORTED: + trace_xdp_exception(bp->dev, xdp_prog, act); + /* Fall thru */ + case XDP_DROP: + bnxt_reuse_rx_data(rxr, cons, page); + break; + } + return true; +} + +/* Under rtnl_lock */ +static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog) +{ + struct net_device *dev = bp->dev; + int tx_xdp = 0, rc, tc; + struct bpf_prog *old; + + if (prog && bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) { + netdev_warn(dev, "MTU %d larger than largest XDP supported MTU %d.\n", + bp->dev->mtu, BNXT_MAX_PAGE_MODE_MTU); + return -EOPNOTSUPP; + } + if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) { + netdev_warn(dev, "ethtool rx/tx channels must be combined to support XDP.\n"); + return -EOPNOTSUPP; + } + if (prog) + tx_xdp = bp->rx_nr_rings; + + tc = netdev_get_num_tc(dev); + if (!tc) + tc = 1; + rc = bnxt_reserve_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings, + tc, tx_xdp); + if (rc) { + netdev_warn(dev, "Unable to reserve enough TX rings to support XDP.\n"); + return rc; + } + if (netif_running(dev)) + bnxt_close_nic(bp, true, false); + + old = xchg(&bp->xdp_prog, prog); + if (old) + bpf_prog_put(old); + + if (prog) { + bnxt_set_rx_skb_mode(bp, true); + } else { + int rx, tx; + + bnxt_set_rx_skb_mode(bp, false); + bnxt_get_max_rings(bp, &rx, &tx, true); + if (rx > 1) { + bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS; + bp->dev->hw_features |= NETIF_F_LRO; + } + } + bp->tx_nr_rings_xdp = tx_xdp; + bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc + tx_xdp; + bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings); + bp->num_stat_ctxs = bp->cp_nr_rings; + bnxt_set_tpa_flags(bp); + bnxt_set_ring_params(bp); + + if (netif_running(dev)) + return bnxt_open_nic(bp, true, false); + + return 0; +} + +int bnxt_xdp(struct net_device *dev, struct netdev_xdp *xdp) +{ + struct bnxt *bp = netdev_priv(dev); + int rc; + + switch (xdp->command) { + case XDP_SETUP_PROG: + rc = bnxt_xdp_set(bp, xdp->prog); + break; + case XDP_QUERY_PROG: + xdp->prog_attached = !!bp->xdp_prog; + rc = 0; + break; + default: + rc = -EINVAL; + break; + } + return rc; +} diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h new file mode 100644 index 000000000000..b529f2c5355b --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h @@ -0,0 +1,19 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2016-2017 Broadcom Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef BNXT_XDP_H +#define BNXT_XDP_H + +void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts); +bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, + struct page *page, u8 **data_ptr, unsigned int *len, + u8 *event); +int bnxt_xdp(struct net_device *dev, struct netdev_xdp *xdp); + +#endif diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index b1d2ac818710..cec94bbb2ea5 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -3665,7 +3665,7 @@ static int cnic_cm_destroy(struct cnic_sock *csk) static inline u16 cnic_get_vlan(struct net_device *dev, struct net_device **vlan_dev) { - if (dev->priv_flags & IFF_802_1Q_VLAN) { + if (is_vlan_dev(dev)) { *vlan_dev = vlan_dev_real_dev(dev); return vlan_dev_vlan_id(dev); } diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c index 435a2e4739d1..55c8e25b43d9 100644 --- a/drivers/net/ethernet/broadcom/sb1250-mac.c +++ b/drivers/net/ethernet/broadcom/sb1250-mac.c @@ -2537,7 +2537,7 @@ static int sbmac_poll(struct napi_struct *napi, int budget) sbdma_tx_process(sc, &(sc->sbm_txdma), 1); if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); #ifdef CONFIG_SBMAC_COALESCE __raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) | @@ -2617,7 +2617,7 @@ out_out: return err; } -static int __exit sbmac_remove(struct platform_device *pldev) +static int sbmac_remove(struct platform_device *pldev) { struct net_device *dev = platform_get_drvdata(pldev); struct sbmac_softc *sc = netdev_priv(dev); @@ -2634,7 +2634,7 @@ static int __exit sbmac_remove(struct platform_device *pldev) static struct platform_driver sbmac_driver = { .probe = sbmac_probe, - .remove = __exit_p(sbmac_remove), + .remove = sbmac_remove, .driver = { .name = sbmac_string, }, diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index ae42de4fdddf..30d1eb9ebec9 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -20,6 +20,7 @@ #include <linux/moduleparam.h> #include <linux/stringify.h> #include <linux/kernel.h> +#include <linux/sched/signal.h> #include <linux/types.h> #include <linux/compiler.h> #include <linux/slab.h> @@ -14145,8 +14146,8 @@ static const struct ethtool_ops tg3_ethtool_ops = { .set_link_ksettings = tg3_set_link_ksettings, }; -static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *stats) +static void tg3_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) { struct tg3 *tp = netdev_priv(dev); @@ -14154,13 +14155,11 @@ static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev, if (!tp->hw_stats) { *stats = tp->net_stats_prev; spin_unlock_bh(&tp->lock); - return stats; + return; } tg3_get_nstats(tp, stats); spin_unlock_bh(&tp->lock); - - return stats; } static void tg3_set_rx_mode(struct net_device *dev) diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index 112030828c4b..6e13c937d715 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -1881,7 +1881,7 @@ bnad_napi_poll_rx(struct napi_struct *napi, int budget) return rcvd; poll_exit: - napi_complete(napi); + napi_complete_done(napi, rcvd); rx_ctrl->rx_complete++; @@ -3111,7 +3111,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) * Used spin_lock to synchronize reading of stats structures, which * is written by BNA under the same lock. */ -static struct rtnl_link_stats64 * +static void bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) { struct bnad *bnad = netdev_priv(netdev); @@ -3123,8 +3123,6 @@ bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) bnad_netdev_hwstats_fill(bnad, stats); spin_unlock_irqrestore(&bnad->bna_lock, flags); - - return stats; } static void @@ -3427,7 +3425,7 @@ static const struct net_device_ops bnad_netdev_ops = { .ndo_open = bnad_open, .ndo_stop = bnad_stop, .ndo_start_xmit = bnad_start_xmit, - .ndo_get_stats64 = bnad_get_stats64, + .ndo_get_stats64 = bnad_get_stats64, .ndo_set_rx_mode = bnad_set_rx_mode, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = bnad_set_mac_address, diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index baba2db9d9c2..30606b11b128 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -1146,7 +1146,7 @@ static int macb_poll(struct napi_struct *napi, int budget) work_done = bp->macbgem_ops.mog_rx(bp, budget); if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); /* Packets received while interrupts were disabled */ status = macb_readl(bp, RSR); @@ -1622,7 +1622,7 @@ static void macb_init_rx_buffer_size(struct macb *bp, size_t size) } } - netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%Zu]\n", + netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%zu]\n", bp->dev->mtu, bp->rx_buffer_size); } @@ -2146,6 +2146,9 @@ static int macb_open(struct net_device *dev) netif_tx_start_all_queues(dev); + if (bp->ptp_info) + bp->ptp_info->ptp_init(dev); + return 0; } @@ -2167,6 +2170,9 @@ static int macb_close(struct net_device *dev) macb_free_consistent(bp); + if (bp->ptp_info) + bp->ptp_info->ptp_remove(dev); + return 0; } @@ -2440,6 +2446,17 @@ static int macb_set_ringparam(struct net_device *netdev, return 0; } +static int macb_get_ts_info(struct net_device *netdev, + struct ethtool_ts_info *info) +{ + struct macb *bp = netdev_priv(netdev); + + if (bp->ptp_info) + return bp->ptp_info->get_ts_info(netdev, info); + + return ethtool_op_get_ts_info(netdev, info); +} + static const struct ethtool_ops macb_ethtool_ops = { .get_regs_len = macb_get_regs_len, .get_regs = macb_get_regs, @@ -2457,7 +2474,7 @@ static const struct ethtool_ops gem_ethtool_ops = { .get_regs_len = macb_get_regs_len, .get_regs = macb_get_regs, .get_link = ethtool_op_get_link, - .get_ts_info = ethtool_op_get_ts_info, + .get_ts_info = macb_get_ts_info, .get_ethtool_stats = gem_get_ethtool_stats, .get_strings = gem_get_ethtool_strings, .get_sset_count = gem_get_sset_count, @@ -2470,6 +2487,7 @@ static const struct ethtool_ops gem_ethtool_ops = { static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct phy_device *phydev = dev->phydev; + struct macb *bp = netdev_priv(dev); if (!netif_running(dev)) return -EINVAL; @@ -2477,7 +2495,17 @@ static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) if (!phydev) return -ENODEV; - return phy_mii_ioctl(phydev, rq, cmd); + if (!bp->ptp_info) + return phy_mii_ioctl(phydev, rq, cmd); + + switch (cmd) { + case SIOCSHWTSTAMP: + return bp->ptp_info->set_hwtst(dev, rq, cmd); + case SIOCGHWTSTAMP: + return bp->ptp_info->get_hwtst(dev, rq); + default: + return phy_mii_ioctl(phydev, rq, cmd); + } } static int macb_set_features(struct net_device *netdev, diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index fc8550a5d47f..234a49eaccfd 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -10,6 +10,8 @@ #ifndef _MACB_H #define _MACB_H +#include <linux/phy.h> + #define MACB_GREGS_NBR 16 #define MACB_GREGS_VERSION 2 #define MACB_MAX_QUEUES 8 @@ -131,6 +133,20 @@ #define GEM_RXIPCCNT 0x01a8 /* IP header Checksum Error Counter */ #define GEM_RXTCPCCNT 0x01ac /* TCP Checksum Error Counter */ #define GEM_RXUDPCCNT 0x01b0 /* UDP Checksum Error Counter */ +#define GEM_TISUBN 0x01bc /* 1588 Timer Increment Sub-ns */ +#define GEM_TSH 0x01c0 /* 1588 Timer Seconds High */ +#define GEM_TSL 0x01d0 /* 1588 Timer Seconds Low */ +#define GEM_TN 0x01d4 /* 1588 Timer Nanoseconds */ +#define GEM_TA 0x01d8 /* 1588 Timer Adjust */ +#define GEM_TI 0x01dc /* 1588 Timer Increment */ +#define GEM_EFTSL 0x01e0 /* PTP Event Frame Tx Seconds Low */ +#define GEM_EFTN 0x01e4 /* PTP Event Frame Tx Nanoseconds */ +#define GEM_EFRSL 0x01e8 /* PTP Event Frame Rx Seconds Low */ +#define GEM_EFRN 0x01ec /* PTP Event Frame Rx Nanoseconds */ +#define GEM_PEFTSL 0x01f0 /* PTP Peer Event Frame Tx Secs Low */ +#define GEM_PEFTN 0x01f4 /* PTP Peer Event Frame Tx Ns */ +#define GEM_PEFRSL 0x01f8 /* PTP Peer Event Frame Rx Sec Low */ +#define GEM_PEFRN 0x01fc /* PTP Peer Event Frame Rx Ns */ #define GEM_DCFG1 0x0280 /* Design Config 1 */ #define GEM_DCFG2 0x0284 /* Design Config 2 */ #define GEM_DCFG3 0x0288 /* Design Config 3 */ @@ -174,6 +190,7 @@ #define MACB_NCR_TPF_SIZE 1 #define MACB_TZQ_OFFSET 12 /* Transmit zero quantum pause frame */ #define MACB_TZQ_SIZE 1 +#define MACB_SRTSM_OFFSET 15 /* Bitfields in NCFGR */ #define MACB_SPD_OFFSET 0 /* Speed */ @@ -319,6 +336,32 @@ #define MACB_PTZ_SIZE 1 #define MACB_WOL_OFFSET 14 /* Enable wake-on-lan interrupt */ #define MACB_WOL_SIZE 1 +#define MACB_DRQFR_OFFSET 18 /* PTP Delay Request Frame Received */ +#define MACB_DRQFR_SIZE 1 +#define MACB_SFR_OFFSET 19 /* PTP Sync Frame Received */ +#define MACB_SFR_SIZE 1 +#define MACB_DRQFT_OFFSET 20 /* PTP Delay Request Frame Transmitted */ +#define MACB_DRQFT_SIZE 1 +#define MACB_SFT_OFFSET 21 /* PTP Sync Frame Transmitted */ +#define MACB_SFT_SIZE 1 +#define MACB_PDRQFR_OFFSET 22 /* PDelay Request Frame Received */ +#define MACB_PDRQFR_SIZE 1 +#define MACB_PDRSFR_OFFSET 23 /* PDelay Response Frame Received */ +#define MACB_PDRSFR_SIZE 1 +#define MACB_PDRQFT_OFFSET 24 /* PDelay Request Frame Transmitted */ +#define MACB_PDRQFT_SIZE 1 +#define MACB_PDRSFT_OFFSET 25 /* PDelay Response Frame Transmitted */ +#define MACB_PDRSFT_SIZE 1 +#define MACB_SRI_OFFSET 26 /* TSU Seconds Register Increment */ +#define MACB_SRI_SIZE 1 + +/* Timer increment fields */ +#define MACB_TI_CNS_OFFSET 0 +#define MACB_TI_CNS_SIZE 8 +#define MACB_TI_ACNS_OFFSET 8 +#define MACB_TI_ACNS_SIZE 8 +#define MACB_TI_NIT_OFFSET 16 +#define MACB_TI_NIT_SIZE 8 /* Bitfields in MAN */ #define MACB_DATA_OFFSET 0 /* data */ @@ -388,6 +431,17 @@ #define GEM_DAW64_OFFSET 23 #define GEM_DAW64_SIZE 1 +/* Bitfields in TISUBN */ +#define GEM_SUBNSINCR_OFFSET 0 +#define GEM_SUBNSINCR_SIZE 16 + +/* Bitfields in TI */ +#define GEM_NSINCR_OFFSET 0 +#define GEM_NSINCR_SIZE 8 + +/* Bitfields in ADJ */ +#define GEM_ADDSUB_OFFSET 31 +#define GEM_ADDSUB_SIZE 1 /* Constants for CLK */ #define MACB_CLK_DIV8 0 #define MACB_CLK_DIV16 1 @@ -415,6 +469,7 @@ #define MACB_CAPS_NO_GIGABIT_HALF 0x00000008 #define MACB_CAPS_USRIO_DISABLED 0x00000010 #define MACB_CAPS_JUMBO 0x00000020 +#define MACB_CAPS_GEM_HAS_PTP 0x00000040 #define MACB_CAPS_FIFO_MODE 0x10000000 #define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000 #define MACB_CAPS_SG_DISABLED 0x40000000 @@ -792,6 +847,20 @@ struct macb_or_gem_ops { int (*mog_rx)(struct macb *bp, int budget); }; +/* MACB-PTP interface: adapt to platform needs. */ +struct macb_ptp_info { + void (*ptp_init)(struct net_device *ndev); + void (*ptp_remove)(struct net_device *ndev); + s32 (*get_ptp_max_adj)(void); + unsigned int (*get_tsu_rate)(struct macb *bp); + int (*get_ts_info)(struct net_device *dev, + struct ethtool_ts_info *info); + int (*get_hwtst)(struct net_device *netdev, + struct ifreq *ifr); + int (*set_hwtst)(struct net_device *netdev, + struct ifreq *ifr, int cmd); +}; + struct macb_config { u32 caps; unsigned int dma_burst_length; @@ -885,6 +954,7 @@ struct macb { u32 wol; + struct macb_ptp_info *ptp_info; /* macb-ptp interface */ #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT enum macb_hw_dma_cap hw_dma_cap; #endif @@ -895,4 +965,9 @@ static inline bool macb_is_gem(struct macb *bp) return !!(bp->caps & MACB_CAPS_MACB_IS_GEM); } +static inline bool gem_has_ptp(struct macb *bp) +{ + return !!(bp->caps & MACB_CAPS_GEM_HAS_PTP); +} + #endif /* _MACB_H */ diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c index ce7de6f72512..2bd7c638b178 100644 --- a/drivers/net/ethernet/calxeda/xgmac.c +++ b/drivers/net/ethernet/calxeda/xgmac.c @@ -1247,7 +1247,7 @@ static int xgmac_poll(struct napi_struct *napi, int budget) work_done = xgmac_rx(priv, budget); if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); __raw_writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA); } return work_done; @@ -1446,9 +1446,9 @@ static void xgmac_poll_controller(struct net_device *dev) } #endif -static struct rtnl_link_stats64 * +static void xgmac_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *storage) + struct rtnl_link_stats64 *storage) { struct xgmac_priv *priv = netdev_priv(dev); void __iomem *base = priv->base; @@ -1476,7 +1476,6 @@ xgmac_get_stats64(struct net_device *dev, writel(0, base + XGMAC_MMC_CTRL); spin_unlock_bh(&priv->stats_lock); - return storage; } static int xgmac_set_mac_address(struct net_device *dev, void *p) diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c index b00c3002360e..50384cede8be 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c @@ -296,12 +296,16 @@ lio_ethtool_get_channels(struct net_device *dev, rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf6x, lio->ifidx); tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf6x, lio->ifidx); } else if (OCTEON_CN23XX_PF(oct)) { - struct octeon_config *conf23 = CHIP_CONF(oct, cn23xx_pf); - max_rx = CFG_GET_OQ_MAX_Q(conf23); - max_tx = CFG_GET_IQ_MAX_Q(conf23); - rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf23, lio->ifidx); - tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf23, lio->ifidx); + max_rx = oct->sriov_info.num_pf_rings; + max_tx = oct->sriov_info.num_pf_rings; + rx_count = lio->linfo.num_rxpciq; + tx_count = lio->linfo.num_txpciq; + } else if (OCTEON_CN23XX_VF(oct)) { + max_tx = oct->sriov_info.rings_per_vf; + max_rx = oct->sriov_info.rings_per_vf; + rx_count = lio->linfo.num_rxpciq; + tx_count = lio->linfo.num_txpciq; } channel->max_rx = max_rx; diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 39a9665c9d00..be9c0e3f5ade 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -15,6 +15,7 @@ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or * NONINFRINGEMENT. See the GNU General Public License for more details. ***********************************************************************/ +#include <linux/module.h> #include <linux/pci.h> #include <linux/firmware.h> #include <net/vxlan.h> @@ -2223,25 +2224,6 @@ static void if_cfg_callback(struct octeon_device *oct, wake_up_interruptible(&ctx->wc); } -/** - * \brief Select queue based on hash - * @param dev Net device - * @param skb sk_buff structure - * @returns selected queue number - */ -static u16 select_q(struct net_device *dev, struct sk_buff *skb, - void *accel_priv __attribute__((unused)), - select_queue_fallback_t fallback __attribute__((unused))) -{ - u32 qindex = 0; - struct lio *lio; - - lio = GET_LIO(dev); - qindex = skb_tx_hash(dev, skb); - - return (u16)(qindex % (lio->linfo.num_txpciq)); -} - /** Routine to push packets arriving on Octeon interface upto network layer. * @param oct_id - octeon device id. * @param skbuff - skbuff struct to be passed to network layer. @@ -2263,6 +2245,7 @@ liquidio_push_packet(u32 octeon_id __attribute__((unused)), struct skb_shared_hwtstamps *shhwtstamps; u64 ns; u16 vtag = 0; + u32 r_dh_off; struct net_device *netdev = (struct net_device *)arg; struct octeon_droq *droq = container_of(param, struct octeon_droq, napi); @@ -2308,6 +2291,8 @@ liquidio_push_packet(u32 octeon_id __attribute__((unused)), put_page(pg_info->page); } + r_dh_off = (rh->r_dh.len - 1) * BYTES_PER_DHLEN_UNIT; + if (((oct->chip_id == OCTEON_CN66XX) || (oct->chip_id == OCTEON_CN68XX)) && ptp_enable) { @@ -2320,16 +2305,27 @@ liquidio_push_packet(u32 octeon_id __attribute__((unused)), /* Nanoseconds are in the first 64-bits * of the packet. */ - memcpy(&ns, (skb->data), sizeof(ns)); + memcpy(&ns, (skb->data + r_dh_off), + sizeof(ns)); + r_dh_off -= BYTES_PER_DHLEN_UNIT; shhwtstamps = skb_hwtstamps(skb); shhwtstamps->hwtstamp = ns_to_ktime(ns + lio->ptp_adjust); } - skb_pull(skb, sizeof(ns)); } } + if (rh->r_dh.has_hash) { + __be32 *hash_be = (__be32 *)(skb->data + r_dh_off); + u32 hash = be32_to_cpu(*hash_be); + + skb_set_hash(skb, hash, PKT_HASH_TYPE_L4); + r_dh_off -= BYTES_PER_DHLEN_UNIT; + } + + skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT); + skb->protocol = eth_type_trans(skb, skb->dev); if ((netdev->features & NETIF_F_RXCSUM) && (((rh->r_dh.encap_on) && @@ -2365,7 +2361,6 @@ liquidio_push_packet(u32 octeon_id __attribute__((unused)), if (packet_was_received) { droq->stats.rx_bytes_received += len; droq->stats.rx_pkts_received++; - netdev->last_rx = jiffies; } else { droq->stats.rx_dropped++; netif_info(lio, rx_err, lio->netdev, @@ -2441,7 +2436,7 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget) iq = oct->instr_queue[iq_no]; if (iq) { /* Process iq buffers with in the budget limits */ - tx_done = octeon_flush_iq(oct, iq, 1, budget); + tx_done = octeon_flush_iq(oct, iq, budget); /* Update iq read-index rather than waiting for next interrupt. * Return back if tx_done is false. */ @@ -2451,8 +2446,12 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget) __func__, iq_no); } - if ((work_done < budget) && (tx_done)) { - napi_complete(napi); + /* force enable interrupt if reg cnts are high to avoid wraparound */ + if ((work_done < budget && tx_done) || + (iq && iq->pkt_in_done >= MAX_REG_CNT) || + (droq->pkt_count >= MAX_REG_CNT)) { + tx_done = 1; + napi_complete_done(napi, work_done); octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no, POLL_EVENT_ENABLE_INTR, 0); return 0; @@ -2629,7 +2628,9 @@ static int liquidio_open(struct net_device *netdev) oct->droq[0]->ops.poll_mode = 1; } - oct_ptp_open(netdev); + if ((oct->chip_id == OCTEON_CN66XX || oct->chip_id == OCTEON_CN68XX) && + ptp_enable) + oct_ptp_open(netdev); ifstate_set(lio, LIO_IFSTATE_RUNNING); @@ -2677,13 +2678,7 @@ static int liquidio_stop(struct net_device *netdev) lio->linfo.link.s.link_up = 0; lio->link_changes++; - /* Pause for a moment and wait for Octeon to flush out (to the wire) any - * egress packets that are in-flight. - */ - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(msecs_to_jiffies(100)); - - /* Now it should be safe to tell Octeon that nic interface is down. */ + /* Tell Octeon that nic interface is down. */ send_rx_ctrl_cmd(lio, 0); if (OCTEON_CN23XX_PF(oct)) { @@ -2973,9 +2968,13 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr) */ static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { + struct lio *lio = GET_LIO(netdev); + switch (cmd) { case SIOCSHWTSTAMP: - return hwtstamp_ioctl(netdev, ifr); + if ((lio->oct_dev->chip_id == OCTEON_CN66XX || + lio->oct_dev->chip_id == OCTEON_CN68XX) && ptp_enable) + return hwtstamp_ioctl(netdev, ifr); default: return -EOPNOTSUPP; } @@ -3322,11 +3321,11 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) netif_trans_update(netdev); - if (skb_shinfo(skb)->gso_size) - stats->tx_done += skb_shinfo(skb)->gso_segs; + if (tx_info->s.gso_segs) + stats->tx_done += tx_info->s.gso_segs; else stats->tx_done++; - stats->tx_tot_bytes += skb->len; + stats->tx_tot_bytes += ndata.datasize; return NETDEV_TX_OK; @@ -3741,7 +3740,6 @@ static const struct net_device_ops lionetdevops = { .ndo_set_vf_vlan = liquidio_set_vf_vlan, .ndo_get_vf_config = liquidio_get_vf_config, .ndo_set_vf_link_state = liquidio_set_vf_link_state, - .ndo_select_queue = select_q }; /** \brief Entry point for the liquidio module diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index 70d96c10c673..9d5e03502c76 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -15,6 +15,7 @@ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or * NONINFRINGEMENT. See the GNU General Public License for more details. ***********************************************************************/ +#include <linux/module.h> #include <linux/pci.h> #include <net/vxlan.h> #include "liquidio_common.h" @@ -1455,26 +1456,6 @@ static void if_cfg_callback(struct octeon_device *oct, wake_up_interruptible(&ctx->wc); } -/** - * \brief Select queue based on hash - * @param dev Net device - * @param skb sk_buff structure - * @returns selected queue number - */ -static u16 select_q(struct net_device *dev, struct sk_buff *skb, - void *accel_priv __attribute__((unused)), - select_queue_fallback_t fallback __attribute__((unused))) -{ - struct lio *lio; - u32 qindex; - - lio = GET_LIO(dev); - - qindex = skb_tx_hash(dev, skb); - - return (u16)(qindex % (lio->linfo.num_txpciq)); -} - /** Routine to push packets arriving on Octeon interface upto network layer. * @param oct_id - octeon device id. * @param skbuff - skbuff struct to be passed to network layer. @@ -1497,6 +1478,7 @@ liquidio_push_packet(u32 octeon_id __attribute__((unused)), struct net_device *netdev = (struct net_device *)arg; struct sk_buff *skb = (struct sk_buff *)skbuff; u16 vtag = 0; + u32 r_dh_off; if (netdev) { struct lio *lio = GET_LIO(netdev); @@ -1540,7 +1522,20 @@ liquidio_push_packet(u32 octeon_id __attribute__((unused)), put_page(pg_info->page); } - skb_pull(skb, rh->r_dh.len * 8); + r_dh_off = (rh->r_dh.len - 1) * BYTES_PER_DHLEN_UNIT; + + if (rh->r_dh.has_hwtstamp) + r_dh_off -= BYTES_PER_DHLEN_UNIT; + + if (rh->r_dh.has_hash) { + __be32 *hash_be = (__be32 *)(skb->data + r_dh_off); + u32 hash = be32_to_cpu(*hash_be); + + skb_set_hash(skb, hash, PKT_HASH_TYPE_L4); + r_dh_off -= BYTES_PER_DHLEN_UNIT; + } + + skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT); skb->protocol = eth_type_trans(skb, skb->dev); if ((netdev->features & NETIF_F_RXCSUM) && @@ -1577,7 +1572,6 @@ liquidio_push_packet(u32 octeon_id __attribute__((unused)), if (packet_was_received) { droq->stats.rx_bytes_received += len; droq->stats.rx_pkts_received++; - netdev->last_rx = jiffies; } else { droq->stats.rx_dropped++; netif_info(lio, rx_err, lio->netdev, @@ -1627,7 +1621,7 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget) iq = oct->instr_queue[iq_no]; if (iq) { /* Process iq buffers with in the budget limits */ - tx_done = octeon_flush_iq(oct, iq, 1, budget); + tx_done = octeon_flush_iq(oct, iq, budget); /* Update iq read-index rather than waiting for next interrupt. * Return back if tx_done is false. */ @@ -1637,8 +1631,12 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget) __func__, iq_no); } - if ((work_done < budget) && (tx_done)) { - napi_complete(napi); + /* force enable interrupt if reg cnts are high to avoid wraparound */ + if ((work_done < budget && tx_done) || + (iq && iq->pkt_in_done >= MAX_REG_CNT) || + (droq->pkt_count >= MAX_REG_CNT)) { + tx_done = 1; + napi_complete_done(napi, work_done); octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no, POLL_EVENT_ENABLE_INTR, 0); return 0; @@ -2440,11 +2438,11 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) netif_trans_update(netdev); - if (skb_shinfo(skb)->gso_size) - stats->tx_done += skb_shinfo(skb)->gso_segs; + if (tx_info->s.gso_segs) + stats->tx_done += tx_info->s.gso_segs; else stats->tx_done++; - stats->tx_tot_bytes += skb->len; + stats->tx_tot_bytes += ndata.datasize; return NETDEV_TX_OK; @@ -2703,7 +2701,6 @@ static const struct net_device_ops lionetdevops = { .ndo_set_features = liquidio_set_features, .ndo_udp_tunnel_add = liquidio_add_vxlan_port, .ndo_udp_tunnel_del = liquidio_del_vxlan_port, - .ndo_select_queue = select_q, }; static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf) diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h index ba329f6ca779..294c6f3c6b48 100644 --- a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h +++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h @@ -98,6 +98,9 @@ enum octeon_tag_type { #define CVM_DRV_INVALID_APP (CVM_DRV_APP_START + 0x2) #define CVM_DRV_APP_END (CVM_DRV_INVALID_APP - 1) +#define BYTES_PER_DHLEN_UNIT 8 +#define MAX_REG_CNT 2000000U + static inline u32 incr_index(u32 index, u32 count, u32 max) { if ((index + count) >= max) diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_config.h b/drivers/net/ethernet/cavium/liquidio/octeon_config.h index 1cb3514fc949..b3dc2e9651a8 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_config.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_config.h @@ -429,15 +429,11 @@ struct octeon_config { /* The following config values are fixed and should not be modified. */ -/* Maximum address space to be mapped for Octeon's BAR1 index-based access. */ -#define MAX_BAR1_MAP_INDEX 2 +#define BAR1_INDEX_DYNAMIC_MAP 2 +#define BAR1_INDEX_STATIC_MAP 15 #define OCTEON_BAR1_ENTRY_SIZE (4 * 1024 * 1024) -/* BAR1 Index 0 to (MAX_BAR1_MAP_INDEX - 1) for normal mapped memory access. - * Bar1 register at MAX_BAR1_MAP_INDEX used by driver for dynamic access. - */ -#define MAX_BAR1_IOREMAP_SIZE ((MAX_BAR1_MAP_INDEX + 1) * \ - OCTEON_BAR1_ENTRY_SIZE) +#define MAX_BAR1_IOREMAP_SIZE (16 * OCTEON_BAR1_ENTRY_SIZE) /* Response lists - 1 ordered, 1 unordered-blocking, 1 unordered-nonblocking * NoResponse Lists are now maintained with each IQ. (Dec' 2007). diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_console.c b/drivers/net/ethernet/cavium/liquidio/octeon_console.c index 3265e0b7923e..53f38d05f7c2 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_console.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_console.c @@ -18,6 +18,7 @@ /** * @file octeon_console.c */ +#include <linux/moduleparam.h> #include <linux/pci.h> #include <linux/netdevice.h> #include <linux/crc32.h> @@ -549,6 +550,16 @@ int octeon_init_consoles(struct octeon_device *oct) return ret; } + /* Dedicate one of Octeon's BAR1 index registers to create a static + * mapping to a region of Octeon DRAM that contains the PCI console + * named block. + */ + oct->console_nb_info.bar1_index = BAR1_INDEX_STATIC_MAP; + oct->fn_list.bar1_idx_setup(oct, addr, oct->console_nb_info.bar1_index, + true); + oct->console_nb_info.dram_region_base = addr + & ~(OCTEON_BAR1_ENTRY_SIZE - 1ULL); + /* num_consoles > 0, is an indication that the consoles * are accessible */ diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c index a8df493a5012..9675ffbf25e6 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c @@ -1361,6 +1361,8 @@ void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq) spin_lock_bh(&droq->lock); writel(droq->pkt_count, droq->pkts_sent_reg); droq->pkt_count = 0; + /* this write needs to be flushed before we release the lock */ + mmiowb(); spin_unlock_bh(&droq->lock); oct = droq->oct_dev; } @@ -1368,6 +1370,8 @@ void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq) spin_lock_bh(&iq->lock); writel(iq->pkt_in_done, iq->inst_cnt_reg); iq->pkt_in_done = 0; + /* this write needs to be flushed before we release the lock */ + mmiowb(); spin_unlock_bh(&iq->lock); oct = iq->oct_dev; } diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h index 18f6836250a6..c301a3852482 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h @@ -477,6 +477,12 @@ struct octeon_device { /* Console caches */ struct octeon_console console[MAX_OCTEON_MAPS]; + /* Console named block info */ + struct { + u64 dram_region_base; + int bar1_index; + } console_nb_info; + /* Coprocessor clock rate. */ u64 coproc_clock_rate; diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h index e04ca8f0b4a7..4608a5af35a3 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h @@ -369,5 +369,5 @@ int octeon_setup_iq(struct octeon_device *oct, int ifidx, void *app_ctx); int octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq, - u32 pending_thresh, u32 napi_budget); + u32 napi_budget); #endif /* __OCTEON_IQ_H__ */ diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c index 73696b427f06..201b9875f9bb 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c @@ -131,6 +131,7 @@ int octeon_mbox_write(struct octeon_device *oct, { struct octeon_mbox *mbox = oct->mbox[mbox_cmd->q_no]; u32 count, i, ret = OCTEON_MBOX_STATUS_SUCCESS; + long timeout = LIO_MBOX_WRITE_WAIT_TIME; unsigned long flags; spin_lock_irqsave(&mbox->lock, flags); @@ -158,7 +159,7 @@ int octeon_mbox_write(struct octeon_device *oct, count = 0; while (readq(mbox->mbox_write_reg) != OCTEON_PFVFSIG) { - schedule_timeout_uninterruptible(LIO_MBOX_WRITE_WAIT_TIME); + schedule_timeout_uninterruptible(timeout); if (count++ == LIO_MBOX_WRITE_WAIT_CNT) { ret = OCTEON_MBOX_STATUS_FAILED; break; @@ -171,7 +172,7 @@ int octeon_mbox_write(struct octeon_device *oct, count = 0; while (readq(mbox->mbox_write_reg) != OCTEON_PFVFACK) { - schedule_timeout_uninterruptible(10); + schedule_timeout_uninterruptible(timeout); if (count++ == LIO_MBOX_WRITE_WAIT_CNT) { ret = OCTEON_MBOX_STATUS_FAILED; break; diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h index fe60a3e6247b..c9376fe075bc 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h @@ -31,8 +31,8 @@ #define OCTEON_PFVFSIG 0x1122334455667788 #define OCTEON_PFVFERR 0xDEADDEADDEADDEAD -#define LIO_MBOX_WRITE_WAIT_CNT 1000 -#define LIO_MBOX_WRITE_WAIT_TIME 10 +#define LIO_MBOX_WRITE_WAIT_CNT 1000 +#define LIO_MBOX_WRITE_WAIT_TIME msecs_to_jiffies(1) enum octeon_mbox_cmd_status { OCTEON_MBOX_STATUS_SUCCESS = 0, diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_main.h b/drivers/net/ethernet/cavium/liquidio/octeon_main.h index 8cd389148166..aa36e9ae7676 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_main.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_main.h @@ -23,6 +23,8 @@ #ifndef _OCTEON_MAIN_H_ #define _OCTEON_MAIN_H_ +#include <linux/sched/signal.h> + #if BITS_PER_LONG == 32 #define CVM_CAST64(v) ((long long)(v)) #elif BITS_PER_LONG == 64 diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c index 13a18c9a7a51..5cd96e7d426c 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c @@ -23,7 +23,7 @@ #include "response_manager.h" #include "octeon_device.h" -#define MEMOPS_IDX MAX_BAR1_MAP_INDEX +#define MEMOPS_IDX BAR1_INDEX_DYNAMIC_MAP #ifdef __BIG_ENDIAN_BITFIELD static inline void @@ -96,6 +96,25 @@ __octeon_pci_rw_core_mem(struct octeon_device *oct, u64 addr, u32 copy_len = 0, index_reg_val = 0; unsigned long flags; u8 __iomem *mapped_addr; + u64 static_mapping_base; + + static_mapping_base = oct->console_nb_info.dram_region_base; + + if (static_mapping_base && + static_mapping_base == (addr & ~(OCTEON_BAR1_ENTRY_SIZE - 1ULL))) { + int bar1_index = oct->console_nb_info.bar1_index; + + mapped_addr = oct->mmio[1].hw_addr + + (bar1_index << ilog2(OCTEON_BAR1_ENTRY_SIZE)) + + (addr & (OCTEON_BAR1_ENTRY_SIZE - 1ULL)); + + if (op) + octeon_pci_fastread(oct, mapped_addr, hostbuf, len); + else + octeon_pci_fastwrite(oct, mapped_addr, hostbuf, len); + + return; + } spin_lock_irqsave(&oct->mem_access_lock, flags); diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.c b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c index c3d6a8228362..0243be8dd56f 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_nic.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c @@ -49,7 +49,7 @@ octeon_alloc_soft_command_resp(struct octeon_device *oct, /* Add in the response related fields. Opcode and Param are already * there. */ - if (OCTEON_CN23XX_PF(oct)) { + if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) { ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3; rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd3.rdp; irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh; @@ -70,7 +70,7 @@ octeon_alloc_soft_command_resp(struct octeon_device *oct, *sc->status_word = COMPLETION_WORD_INIT; - if (OCTEON_CN23XX_PF(oct)) + if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) sc->cmd.cmd3.rptr = sc->dmarptr; else sc->cmd.cmd2.rptr = sc->dmarptr; diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c index 3ce66759e80a..707bc15adec6 100644 --- a/drivers/net/ethernet/cavium/liquidio/request_manager.c +++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c @@ -455,7 +455,7 @@ lio_process_iq_request_list(struct octeon_device *oct, /* Can only be called from process context */ int octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq, - u32 pending_thresh, u32 napi_budget) + u32 napi_budget) { u32 inst_processed = 0; u32 tot_inst_processed = 0; @@ -468,33 +468,32 @@ octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq, iq->octeon_read_index = oct->fn_list.update_iq_read_idx(iq); - if (atomic_read(&iq->instr_pending) >= (s32)pending_thresh) { - do { - /* Process any outstanding IQ packets. */ - if (iq->flush_index == iq->octeon_read_index) - break; - - if (napi_budget) - inst_processed = lio_process_iq_request_list - (oct, iq, - napi_budget - tot_inst_processed); - else - inst_processed = - lio_process_iq_request_list(oct, iq, 0); + do { + /* Process any outstanding IQ packets. */ + if (iq->flush_index == iq->octeon_read_index) + break; - if (inst_processed) { - atomic_sub(inst_processed, &iq->instr_pending); - iq->stats.instr_processed += inst_processed; - } + if (napi_budget) + inst_processed = + lio_process_iq_request_list(oct, iq, + napi_budget - + tot_inst_processed); + else + inst_processed = + lio_process_iq_request_list(oct, iq, 0); + + if (inst_processed) { + atomic_sub(inst_processed, &iq->instr_pending); + iq->stats.instr_processed += inst_processed; + } - tot_inst_processed += inst_processed; - inst_processed = 0; + tot_inst_processed += inst_processed; + inst_processed = 0; - } while (tot_inst_processed < napi_budget); + } while (tot_inst_processed < napi_budget); - if (napi_budget && (tot_inst_processed >= napi_budget)) - tx_done = 0; - } + if (napi_budget && (tot_inst_processed >= napi_budget)) + tx_done = 0; iq->last_db_time = jiffies; @@ -530,7 +529,7 @@ static void __check_db_timeout(struct octeon_device *oct, u64 iq_no) iq->last_db_time = jiffies; /* Flush the instruction queue */ - octeon_flush_iq(oct, iq, 1, 0); + octeon_flush_iq(oct, iq, 0); lio_enable_irq(NULL, iq); } diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c index 21f80f5744ba..a2138686c605 100644 --- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c +++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c @@ -501,7 +501,7 @@ static int octeon_mgmt_napi_poll(struct napi_struct *napi, int budget) if (work_done < budget) { /* We stopped because no more packets were available. */ - napi_complete(napi); + napi_complete_done(napi, work_done); octeon_mgmt_enable_rx_irq(p); } octeon_mgmt_update_rx_stats(netdev); diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c index 2e74bbaa38e1..02a986cdbb39 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c @@ -471,12 +471,46 @@ static void nicvf_get_ringparam(struct net_device *netdev, struct nicvf *nic = netdev_priv(netdev); struct queue_set *qs = nic->qs; - ring->rx_max_pending = MAX_RCV_BUF_COUNT; - ring->rx_pending = qs->rbdr_len; + ring->rx_max_pending = MAX_CMP_QUEUE_LEN; + ring->rx_pending = qs->cq_len; ring->tx_max_pending = MAX_SND_QUEUE_LEN; ring->tx_pending = qs->sq_len; } +static int nicvf_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct nicvf *nic = netdev_priv(netdev); + struct queue_set *qs = nic->qs; + u32 rx_count, tx_count; + + /* Due to HW errata this is not supported on T88 pass 1.x silicon */ + if (pass1_silicon(nic->pdev)) + return -EINVAL; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + tx_count = clamp_t(u32, ring->tx_pending, + MIN_SND_QUEUE_LEN, MAX_SND_QUEUE_LEN); + rx_count = clamp_t(u32, ring->rx_pending, + MIN_CMP_QUEUE_LEN, MAX_CMP_QUEUE_LEN); + + if ((tx_count == qs->sq_len) && (rx_count == qs->cq_len)) + return 0; + + /* Permitted lengths are 1K, 2K, 4K, 8K, 16K, 32K, 64K */ + qs->sq_len = rounddown_pow_of_two(tx_count); + qs->cq_len = rounddown_pow_of_two(rx_count); + + if (netif_running(netdev)) { + nicvf_stop(netdev); + nicvf_open(netdev); + } + + return 0; +} + static int nicvf_get_rss_hash_opts(struct nicvf *nic, struct ethtool_rxnfc *info) { @@ -635,7 +669,7 @@ static int nicvf_get_rxfh(struct net_device *dev, u32 *indir, u8 *hkey, } static int nicvf_set_rxfh(struct net_device *dev, const u32 *indir, - const u8 *hkey, u8 hfunc) + const u8 *hkey, const u8 hfunc) { struct nicvf *nic = netdev_priv(dev); struct nicvf_rss_info *rss = &nic->rss_info; @@ -787,6 +821,7 @@ static const struct ethtool_ops nicvf_ethtool_ops = { .get_regs = nicvf_get_regs, .get_coalesce = nicvf_get_coalesce, .get_ringparam = nicvf_get_ringparam, + .set_ringparam = nicvf_set_ringparam, .get_rxnfc = nicvf_get_rxnfc, .set_rxnfc = nicvf_set_rxnfc, .get_rxfh_key_size = nicvf_get_rxfh_key_size, diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index 2006f58b14b1..6feaa24bcfd4 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -749,7 +749,7 @@ static int nicvf_poll(struct napi_struct *napi, int budget) if (work_done < budget) { /* Slow packet rate, exit polling */ - napi_complete(napi); + napi_complete_done(napi, work_done); /* Re-enable interrupts */ cq_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq->cq_idx); @@ -1274,7 +1274,8 @@ int nicvf_open(struct net_device *netdev) /* Configure receive side scaling and MTU */ if (!nic->sqs_mode) { nicvf_rss_init(nic); - if (nicvf_update_hw_max_frs(nic, netdev->mtu)) + err = nicvf_update_hw_max_frs(nic, netdev->mtu); + if (err) goto cleanup; /* Clear percpu stats */ @@ -1461,8 +1462,8 @@ void nicvf_update_stats(struct nicvf *nic) nicvf_update_sq_stats(nic, qidx); } -static struct rtnl_link_stats64 *nicvf_get_stats64(struct net_device *netdev, - struct rtnl_link_stats64 *stats) +static void nicvf_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) { struct nicvf *nic = netdev_priv(netdev); struct nicvf_hw_stats *hw_stats = &nic->hw_stats; @@ -1478,7 +1479,6 @@ static struct rtnl_link_stats64 *nicvf_get_stats64(struct net_device *netdev, stats->tx_packets = hw_stats->tx_frames; stats->tx_dropped = hw_stats->tx_drops; - return stats; } static void nicvf_tx_timeout(struct net_device *dev) diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index d2ac133e36f1..ac0390be3b12 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c @@ -603,7 +603,7 @@ void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs, cq_cfg.ena = 1; cq_cfg.reset = 0; cq_cfg.caching = 0; - cq_cfg.qsize = CMP_QSIZE; + cq_cfg.qsize = ilog2(qs->cq_len >> 10); cq_cfg.avg_con = 0; nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(u64 *)&cq_cfg); @@ -652,9 +652,12 @@ static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs, sq_cfg.ena = 1; sq_cfg.reset = 0; sq_cfg.ldwb = 0; - sq_cfg.qsize = SND_QSIZE; + sq_cfg.qsize = ilog2(qs->sq_len >> 10); sq_cfg.tstmp_bgx_intf = 0; - sq_cfg.cq_limit = 0; + /* CQ's level at which HW will stop processing SQEs to avoid + * transmitting a pkt with no space in CQ to post CQE_TX. + */ + sq_cfg.cq_limit = (CMP_QUEUE_PIPELINE_RSVD * 256) / qs->cq_len; nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(u64 *)&sq_cfg); /* Set threshold value for interrupt generation */ @@ -816,11 +819,21 @@ int nicvf_config_data_transfer(struct nicvf *nic, bool enable) { bool disable = false; struct queue_set *qs = nic->qs; + struct queue_set *pqs = nic->pnicvf->qs; int qidx; if (!qs) return 0; + /* Take primary VF's queue lengths. + * This is needed to take queue lengths set from ethtool + * into consideration. + */ + if (nic->sqs_mode && pqs) { + qs->cq_len = pqs->cq_len; + qs->sq_len = pqs->sq_len; + } + if (enable) { if (nicvf_alloc_resources(nic)) return -ENOMEM; diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h index 9e2104675bc9..5cb84da99a2d 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h @@ -59,8 +59,9 @@ /* Default queue count per QS, its lengths and threshold values */ #define DEFAULT_RBDR_CNT 1 -#define SND_QSIZE SND_QUEUE_SIZE2 +#define SND_QSIZE SND_QUEUE_SIZE0 #define SND_QUEUE_LEN (1ULL << (SND_QSIZE + 10)) +#define MIN_SND_QUEUE_LEN (1ULL << (SND_QUEUE_SIZE0 + 10)) #define MAX_SND_QUEUE_LEN (1ULL << (SND_QUEUE_SIZE6 + 10)) #define SND_QUEUE_THRESH 2ULL #define MIN_SQ_DESC_PER_PKT_XMIT 2 @@ -70,11 +71,18 @@ /* Keep CQ and SQ sizes same, if timestamping * is enabled this equation will change. */ -#define CMP_QSIZE CMP_QUEUE_SIZE2 +#define CMP_QSIZE CMP_QUEUE_SIZE0 #define CMP_QUEUE_LEN (1ULL << (CMP_QSIZE + 10)) +#define MIN_CMP_QUEUE_LEN (1ULL << (CMP_QUEUE_SIZE0 + 10)) +#define MAX_CMP_QUEUE_LEN (1ULL << (CMP_QUEUE_SIZE6 + 10)) #define CMP_QUEUE_CQE_THRESH (NAPI_POLL_WEIGHT / 2) #define CMP_QUEUE_TIMER_THRESH 80 /* ~2usec */ +/* No of CQEs that might anyway gets used by HW due to pipelining + * effects irrespective of PASS/DROP/LEVELS being configured + */ +#define CMP_QUEUE_PIPELINE_RSVD 544 + #define RBDR_SIZE RBDR_SIZE0 #define RCV_BUF_COUNT (1ULL << (RBDR_SIZE + 13)) #define MAX_RCV_BUF_COUNT (1ULL << (RBDR_SIZE6 + 13)) @@ -93,8 +101,8 @@ * RED accepts pkt if unused CQE < 2304 & >= 2560 * DROPs pkts if unused CQE < 2304 */ -#define RQ_PASS_CQ_LVL 160ULL -#define RQ_DROP_CQ_LVL 144ULL +#define RQ_PASS_CQ_LVL 192ULL +#define RQ_DROP_CQ_LVL 184ULL /* RED and Backpressure levels of RBDR for pkt reception * For RBDR, level is a measure of fullness i.e 0x0 means empty diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index 1e4695270da6..4c8e8cf730bb 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c @@ -978,17 +978,15 @@ static void bgx_print_qlm_mode(struct bgx *bgx, u8 lmacid) struct device *dev = &bgx->pdev->dev; struct lmac *lmac; char str[20]; - u8 dlm; - if (lmacid > bgx->max_lmac) + if (!bgx->is_dlm && lmacid) return; lmac = &bgx->lmac[lmacid]; - dlm = (lmacid / 2) + (bgx->bgx_id * 2); if (!bgx->is_dlm) sprintf(str, "BGX%d QLM mode", bgx->bgx_id); else - sprintf(str, "BGX%d DLM%d mode", bgx->bgx_id, dlm); + sprintf(str, "BGX%d LMAC%d mode", bgx->bgx_id, lmacid); switch (lmac->lmac_type) { case BGX_MODE_SGMII: @@ -1074,7 +1072,6 @@ static void lmac_set_training(struct bgx *bgx, struct lmac *lmac, int lmacid) static void bgx_set_lmac_config(struct bgx *bgx, u8 idx) { struct lmac *lmac; - struct lmac *olmac; u64 cmr_cfg; u8 lmac_type; u8 lane_to_sds; @@ -1094,62 +1091,26 @@ static void bgx_set_lmac_config(struct bgx *bgx, u8 idx) return; } - /* On 81xx BGX can be split across 2 DLMs - * firmware programs lmac_type of LMAC0 and LMAC2 + /* For DLMs or SLMs on 80/81/83xx so many lane configurations + * are possible and vary across boards. Also Kernel doesn't have + * any way to identify board type/info and since firmware does, + * just take lmac type and serdes lane config as is. */ - if ((idx == 0) || (idx == 2)) { - cmr_cfg = bgx_reg_read(bgx, idx, BGX_CMRX_CFG); - lmac_type = (u8)((cmr_cfg >> 8) & 0x07); - lane_to_sds = (u8)(cmr_cfg & 0xFF); - /* Check if config is not reset value */ - if ((lmac_type == 0) && (lane_to_sds == 0xE4)) - lmac->lmac_type = BGX_MODE_INVALID; - else - lmac->lmac_type = lmac_type; - lmac_set_training(bgx, lmac, lmac->lmacid); - lmac_set_lane2sds(bgx, lmac); - - olmac = &bgx->lmac[idx + 1]; - /* Check if other LMAC on the same DLM is already configured by - * firmware, if so use the same config or else set as same, as - * that of LMAC 0/2. - * This check is needed as on 80xx only one lane of each of the - * DLM of BGX0 is used, so have to rely on firmware for - * distingushing 80xx from 81xx. - */ - cmr_cfg = bgx_reg_read(bgx, idx + 1, BGX_CMRX_CFG); - lmac_type = (u8)((cmr_cfg >> 8) & 0x07); - lane_to_sds = (u8)(cmr_cfg & 0xFF); - if ((lmac_type == 0) && (lane_to_sds == 0xE4)) { - olmac->lmac_type = lmac->lmac_type; - lmac_set_lane2sds(bgx, olmac); - } else { - olmac->lmac_type = lmac_type; - olmac->lane_to_sds = lane_to_sds; - } - lmac_set_training(bgx, olmac, olmac->lmacid); - } -} - -static bool is_dlm0_in_bgx_mode(struct bgx *bgx) -{ - struct lmac *lmac; - - if (!bgx->is_dlm) - return true; - - lmac = &bgx->lmac[0]; - if (lmac->lmac_type == BGX_MODE_INVALID) - return false; - - return true; + cmr_cfg = bgx_reg_read(bgx, idx, BGX_CMRX_CFG); + lmac_type = (u8)((cmr_cfg >> 8) & 0x07); + lane_to_sds = (u8)(cmr_cfg & 0xFF); + /* Check if config is reset value */ + if ((lmac_type == 0) && (lane_to_sds == 0xE4)) + lmac->lmac_type = BGX_MODE_INVALID; + else + lmac->lmac_type = lmac_type; + lmac->lane_to_sds = lane_to_sds; + lmac_set_training(bgx, lmac, lmac->lmacid); } static void bgx_get_qlm_mode(struct bgx *bgx) { struct lmac *lmac; - struct lmac *lmac01; - struct lmac *lmac23; u8 idx; /* Init all LMAC's type to invalid */ @@ -1165,29 +1126,9 @@ static void bgx_get_qlm_mode(struct bgx *bgx) if (bgx->lmac_count > bgx->max_lmac) bgx->lmac_count = bgx->max_lmac; - for (idx = 0; idx < bgx->max_lmac; idx++) - bgx_set_lmac_config(bgx, idx); - - if (!bgx->is_dlm || bgx->is_rgx) { - bgx_print_qlm_mode(bgx, 0); - return; - } - - if (bgx->lmac_count) { - bgx_print_qlm_mode(bgx, 0); - bgx_print_qlm_mode(bgx, 2); - } - - /* If DLM0 is not in BGX mode then LMAC0/1 have - * to be configured with serdes lanes of DLM1 - */ - if (is_dlm0_in_bgx_mode(bgx) || (bgx->lmac_count > 2)) - return; for (idx = 0; idx < bgx->lmac_count; idx++) { - lmac01 = &bgx->lmac[idx]; - lmac23 = &bgx->lmac[idx + 2]; - lmac01->lmac_type = lmac23->lmac_type; - lmac01->lane_to_sds = lmac23->lane_to_sds; + bgx_set_lmac_config(bgx, idx); + bgx_print_qlm_mode(bgx, idx); } } diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c index 86f467a2c485..d56142b98534 100644 --- a/drivers/net/ethernet/chelsio/cxgb/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb/sge.c @@ -1605,7 +1605,7 @@ int t1_poll(struct napi_struct *napi, int budget) int work_done = process_responses(adapter, budget); if (likely(work_done < budget)) { - napi_complete(napi); + napi_complete_done(napi, work_done); writel(adapter->sge->respQ.cidx, adapter->regs + A_SG_SLEEPING); } diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.c b/drivers/net/ethernet/chelsio/cxgb3/l2t.c index 5f226eda8cd6..52063587e1e9 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/l2t.c +++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.c @@ -351,7 +351,7 @@ struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst, e->smt_idx = smt_idx; atomic_set(&e->refcnt, 1); neigh_replace(e, neigh); - if (neigh->dev->priv_flags & IFF_802_1Q_VLAN) + if (is_vlan_dev(neigh->dev)) e->vlan = vlan_dev_vlan_id(neigh->dev); else e->vlan = VLAN_NONE; diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c index e4b5b057f417..1b9d154f1149 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c @@ -1843,7 +1843,7 @@ static int ofld_poll(struct napi_struct *napi, int budget) __skb_queue_head_init(&queue); skb_queue_splice_init(&q->rx_queue, &queue); if (skb_queue_empty(&queue)) { - napi_complete(napi); + napi_complete_done(napi, work_done); spin_unlock_irq(&q->lock); return work_done; } @@ -2414,7 +2414,7 @@ static int napi_rx_handler(struct napi_struct *napi, int budget) int work_done = process_responses(adap, qs, budget); if (likely(work_done < budget)) { - napi_complete(napi); + napi_complete_done(napi, work_done); /* * Because we don't atomically flush the following diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 0bce1bf9ca0f..163543b1ea0b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -263,6 +263,11 @@ struct tp_params { u32 vlan_pri_map; /* cached TP_VLAN_PRI_MAP */ u32 ingress_config; /* cached TP_INGRESS_CONFIG */ + /* cached TP_OUT_CONFIG compressed error vector + * and passing outer header info for encapsulated packets. + */ + int rx_pkt_encap; + /* TP_VLAN_PRI_MAP Compressed Filter Tuple field offsets. This is a * subset of the set of fields which may be present in the Compressed * Filter Tuple portion of filters and TCP TCB connections. The @@ -581,22 +586,6 @@ struct sge_rspq { /* state for an SGE response queue */ rspq_handler_t handler; rspq_flush_handler_t flush_handler; struct t4_lro_mgr lro_mgr; -#ifdef CONFIG_NET_RX_BUSY_POLL -#define CXGB_POLL_STATE_IDLE 0 -#define CXGB_POLL_STATE_NAPI BIT(0) /* NAPI owns this poll */ -#define CXGB_POLL_STATE_POLL BIT(1) /* poll owns this poll */ -#define CXGB_POLL_STATE_NAPI_YIELD BIT(2) /* NAPI yielded this poll */ -#define CXGB_POLL_STATE_POLL_YIELD BIT(3) /* poll yielded this poll */ -#define CXGB_POLL_YIELD (CXGB_POLL_STATE_NAPI_YIELD | \ - CXGB_POLL_STATE_POLL_YIELD) -#define CXGB_POLL_LOCKED (CXGB_POLL_STATE_NAPI | \ - CXGB_POLL_STATE_POLL) -#define CXGB_POLL_USER_PEND (CXGB_POLL_STATE_POLL | \ - CXGB_POLL_STATE_POLL_YIELD) - unsigned int bpoll_state; - spinlock_t bpoll_lock; /* lock for busy poll */ -#endif /* CONFIG_NET_RX_BUSY_POLL */ - }; struct sge_eth_stats { /* Ethernet queue statistics */ @@ -782,6 +771,10 @@ struct vf_info { bool pf_set_mac; }; +struct mbox_list { + struct list_head list; +}; + struct adapter { void __iomem *regs; void __iomem *bar2; @@ -844,6 +837,10 @@ struct adapter { struct work_struct db_drop_task; bool tid_release_task_busy; + /* lock for mailbox cmd list */ + spinlock_t mbox_lock; + struct mbox_list mlist; + /* support for mailbox command/reply logging */ #define T4_OS_LOG_MBOX_CMDS 256 struct mbox_cmd_log *mbox_log; @@ -1160,102 +1157,6 @@ static inline struct adapter *netdev2adap(const struct net_device *dev) return netdev2pinfo(dev)->adapter; } -#ifdef CONFIG_NET_RX_BUSY_POLL -static inline void cxgb_busy_poll_init_lock(struct sge_rspq *q) -{ - spin_lock_init(&q->bpoll_lock); - q->bpoll_state = CXGB_POLL_STATE_IDLE; -} - -static inline bool cxgb_poll_lock_napi(struct sge_rspq *q) -{ - bool rc = true; - - spin_lock(&q->bpoll_lock); - if (q->bpoll_state & CXGB_POLL_LOCKED) { - q->bpoll_state |= CXGB_POLL_STATE_NAPI_YIELD; - rc = false; - } else { - q->bpoll_state = CXGB_POLL_STATE_NAPI; - } - spin_unlock(&q->bpoll_lock); - return rc; -} - -static inline bool cxgb_poll_unlock_napi(struct sge_rspq *q) -{ - bool rc = false; - - spin_lock(&q->bpoll_lock); - if (q->bpoll_state & CXGB_POLL_STATE_POLL_YIELD) - rc = true; - q->bpoll_state = CXGB_POLL_STATE_IDLE; - spin_unlock(&q->bpoll_lock); - return rc; -} - -static inline bool cxgb_poll_lock_poll(struct sge_rspq *q) -{ - bool rc = true; - - spin_lock_bh(&q->bpoll_lock); - if (q->bpoll_state & CXGB_POLL_LOCKED) { - q->bpoll_state |= CXGB_POLL_STATE_POLL_YIELD; - rc = false; - } else { - q->bpoll_state |= CXGB_POLL_STATE_POLL; - } - spin_unlock_bh(&q->bpoll_lock); - return rc; -} - -static inline bool cxgb_poll_unlock_poll(struct sge_rspq *q) -{ - bool rc = false; - - spin_lock_bh(&q->bpoll_lock); - if (q->bpoll_state & CXGB_POLL_STATE_POLL_YIELD) - rc = true; - q->bpoll_state = CXGB_POLL_STATE_IDLE; - spin_unlock_bh(&q->bpoll_lock); - return rc; -} - -static inline bool cxgb_poll_busy_polling(struct sge_rspq *q) -{ - return q->bpoll_state & CXGB_POLL_USER_PEND; -} -#else -static inline void cxgb_busy_poll_init_lock(struct sge_rspq *q) -{ -} - -static inline bool cxgb_poll_lock_napi(struct sge_rspq *q) -{ - return true; -} - -static inline bool cxgb_poll_unlock_napi(struct sge_rspq *q) -{ - return false; -} - -static inline bool cxgb_poll_lock_poll(struct sge_rspq *q) -{ - return false; -} - -static inline bool cxgb_poll_unlock_poll(struct sge_rspq *q) -{ - return false; -} - -static inline bool cxgb_poll_busy_polling(struct sge_rspq *q) -{ - return false; -} -#endif /* CONFIG_NET_RX_BUSY_POLL */ - /* Return a version number to identify the type of adapter. The scheme is: * - bits 0..9: chip version * - bits 10..15: chip revision @@ -1312,7 +1213,6 @@ irqreturn_t t4_sge_intr_msix(int irq, void *cookie); int t4_sge_init(struct adapter *adap); void t4_sge_start(struct adapter *adap); void t4_sge_stop(struct adapter *adap); -int cxgb_busy_poll(struct napi_struct *napi); void cxgb4_set_ethtool_ops(struct net_device *netdev); int cxgb4_write_rss(const struct port_info *pi, const u16 *queues); extern int dbfifo_int_thresh; @@ -1488,6 +1388,7 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, const u8 *fw_data, unsigned int fw_size, struct fw_hdr *card_fw, enum dev_state state, int *reset); int t4_prep_adapter(struct adapter *adapter); +int t4_shutdown_adapter(struct adapter *adapter); enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS }; int t4_bar2_sge_qregs(struct adapter *adapter, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index acc231293e4d..f6e739da7bb7 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -1416,7 +1416,7 @@ static unsigned int xdigit2int(unsigned char c) * <pattern data>[/<pattern mask>][@<anchor>] * * Up to 2 filter patterns can be specified. If 2 are supplied the first one - * must be anchored at 0. An omited mask is taken as a mask of 1s, an omitted + * must be anchored at 0. An omitted mask is taken as a mask of 1s, an omitted * anchor is taken as 0. */ static ssize_t mps_trc_write(struct file *file, const char __user *buf, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 6f951877430b..afb0967d2ce6 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -188,18 +188,24 @@ static void link_report(struct net_device *dev) const struct port_info *p = netdev_priv(dev); switch (p->link_cfg.speed) { - case 10000: - s = "10Gbps"; + case 100: + s = "100Mbps"; break; case 1000: - s = "1000Mbps"; + s = "1Gbps"; break; - case 100: - s = "100Mbps"; + case 10000: + s = "10Gbps"; + break; + case 25000: + s = "25Gbps"; break; case 40000: s = "40Gbps"; break; + case 100000: + s = "100Gbps"; + break; default: pr_info("%s: unsupported speed: %d\n", dev->name, p->link_cfg.speed); @@ -738,14 +744,8 @@ static void quiesce_rx(struct adapter *adap) for (i = 0; i < adap->sge.ingr_sz; i++) { struct sge_rspq *q = adap->sge.ingr_map[i]; - if (q && q->handler) { + if (q && q->handler) napi_disable(&q->napi); - local_bh_disable(); - while (!cxgb_poll_lock_napi(q)) - mdelay(1); - local_bh_enable(); - } - } } @@ -776,10 +776,9 @@ static void enable_rx(struct adapter *adap) if (!q) continue; - if (q->handler) { - cxgb_busy_poll_init_lock(q); + if (q->handler) napi_enable(&q->napi); - } + /* 0-increment GTS to start the timer and enable interrupts */ t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A), SEINTARM_V(q->intr_params) | @@ -1806,7 +1805,7 @@ static void check_neigh_update(struct neighbour *neigh) const struct device *parent; const struct net_device *netdev = neigh->dev; - if (netdev->priv_flags & IFF_802_1Q_VLAN) + if (is_vlan_dev(netdev)) netdev = vlan_dev_real_dev(netdev); parent = netdev->dev.parent; if (parent && parent->driver == &cxgb4_driver.driver) @@ -2112,7 +2111,7 @@ static int cxgb4_inet6addr_handler(struct notifier_block *this, #if IS_ENABLED(CONFIG_BONDING) struct adapter *adap; #endif - if (event_dev->priv_flags & IFF_802_1Q_VLAN) + if (is_vlan_dev(event_dev)) event_dev = vlan_dev_real_dev(event_dev); #if IS_ENABLED(CONFIG_BONDING) if (event_dev->flags & IFF_MASTER) { @@ -2369,8 +2368,8 @@ int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid, } EXPORT_SYMBOL(cxgb4_remove_server_filter); -static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev, - struct rtnl_link_stats64 *ns) +static void cxgb_get_stats(struct net_device *dev, + struct rtnl_link_stats64 *ns) { struct port_stats stats; struct port_info *p = netdev_priv(dev); @@ -2383,7 +2382,7 @@ static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev, spin_lock(&adapter->stats_lock); if (!netif_device_present(dev)) { spin_unlock(&adapter->stats_lock); - return ns; + return; } t4_get_port_stats_offset(adapter, p->tx_chan, &stats, &p->stats_base); @@ -2401,7 +2400,7 @@ static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev, ns->rx_over_errors = 0; ns->rx_crc_errors = stats.rx_fcs_err; ns->rx_frame_errors = stats.rx_symbol_err; - ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 + + ns->rx_dropped = stats.rx_ovflow0 + stats.rx_ovflow1 + stats.rx_ovflow2 + stats.rx_ovflow3 + stats.rx_trunc0 + stats.rx_trunc1 + stats.rx_trunc2 + stats.rx_trunc3; @@ -2417,7 +2416,6 @@ static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev, ns->tx_errors = stats.tx_error_frames; ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err + ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors; - return ns; } static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd) @@ -2578,6 +2576,19 @@ static int cxgb_get_vf_config(struct net_device *dev, ether_addr_copy(ivi->mac, adap->vfinfo[vf].vf_mac_addr); return 0; } + +static int cxgb_get_phys_port_id(struct net_device *dev, + struct netdev_phys_item_id *ppid) +{ + struct port_info *pi = netdev_priv(dev); + unsigned int phy_port_id; + + phy_port_id = pi->adapter->adap_idx * 10 + pi->port_id; + ppid->id_len = sizeof(phy_port_id); + memcpy(ppid->id, &phy_port_id, ppid->id_len); + return 0; +} + #endif static int cxgb_set_mac_addr(struct net_device *dev, void *p) @@ -2745,9 +2756,6 @@ static const struct net_device_ops cxgb4_netdev_ops = { .ndo_fcoe_enable = cxgb_fcoe_enable, .ndo_fcoe_disable = cxgb_fcoe_disable, #endif /* CONFIG_CHELSIO_T4_FCOE */ -#ifdef CONFIG_NET_RX_BUSY_POLL - .ndo_busy_poll = cxgb_busy_poll, -#endif .ndo_set_tx_maxrate = cxgb_set_tx_maxrate, .ndo_setup_tc = cxgb_setup_tc, }; @@ -2757,6 +2765,7 @@ static const struct net_device_ops cxgb4_mgmt_netdev_ops = { .ndo_open = dummy_open, .ndo_set_vf_mac = cxgb_set_vf_mac, .ndo_get_vf_config = cxgb_get_vf_config, + .ndo_get_phys_port_id = cxgb_get_phys_port_id, }; #endif @@ -2777,8 +2786,24 @@ static const struct ethtool_ops cxgb4_mgmt_ethtool_ops = { void t4_fatal_err(struct adapter *adap) { - t4_set_reg_field(adap, SGE_CONTROL_A, GLOBALENABLE_F, 0); - t4_intr_disable(adap); + int port; + + /* Disable the SGE since ULDs are going to free resources that + * could be exposed to the adapter. RDMA MWs for example... + */ + t4_shutdown_adapter(adap); + for_each_port(adap, port) { + struct net_device *dev = adap->port[port]; + + /* If we get here in very early initialization the network + * devices may not have been set up yet. + */ + if (!dev) + continue; + + netif_tx_stop_all_queues(dev); + netif_carrier_off(dev); + } dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n"); } @@ -4397,9 +4422,9 @@ static void print_port_info(const struct net_device *dev) spd = " 8 GT/s"; if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M) - bufp += sprintf(bufp, "100/"); + bufp += sprintf(bufp, "100M/"); if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G) - bufp += sprintf(bufp, "1000/"); + bufp += sprintf(bufp, "1G/"); if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G) bufp += sprintf(bufp, "10G/"); if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_25G) @@ -4511,12 +4536,14 @@ static int config_mgmt_dev(struct pci_dev *pdev) int err; snprintf(name, IFNAMSIZ, "mgmtpf%d%d", adap->adap_idx, adap->pf); - netdev = alloc_netdev(0, name, NET_NAME_UNKNOWN, dummy_setup); + netdev = alloc_netdev(sizeof(struct port_info), name, NET_NAME_UNKNOWN, + dummy_setup); if (!netdev) return -ENOMEM; pi = netdev_priv(netdev); pi->adapter = adap; + pi->port_id = adap->pf % adap->params.nports; SET_NETDEV_DEV(netdev, &pdev->dev); adap->port[0] = netdev; @@ -4606,6 +4633,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) u32 whoami, pl_rev; enum chip_type chip; static int adap_idx = 1; +#ifdef CONFIG_PCI_IOV + u32 v, port_vec; +#endif printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION); @@ -4707,6 +4737,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) spin_lock_init(&adapter->stats_lock); spin_lock_init(&adapter->tid_release_lock); spin_lock_init(&adapter->win0_lock); + spin_lock_init(&adapter->mbox_lock); + + INIT_LIST_HEAD(&adapter->mlist.list); INIT_WORK(&adapter->tid_release_task, process_tid_release_list); INIT_WORK(&adapter->db_full_task, process_db_full); @@ -4874,8 +4907,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) "continuing\n"); adapter->params.offload = 0; } else { - adapter->tc_u32 = cxgb4_init_tc_u32(adapter, - CXGB4_MAX_LINK_HANDLE); + adapter->tc_u32 = cxgb4_init_tc_u32(adapter); if (!adapter->tc_u32) dev_warn(&pdev->dev, "could not offload tc u32, continuing\n"); @@ -4982,6 +5014,19 @@ sriov: err = -ENOMEM; goto free_adapter; } + spin_lock_init(&adapter->mbox_lock); + INIT_LIST_HEAD(&adapter->mlist.list); + + v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC); + err = t4_query_params(adapter, adapter->mbox, adapter->pf, 0, 1, + &v, &port_vec); + if (err < 0) { + dev_err(adapter->pdev_dev, "Could not fetch port params\n"); + goto free_adapter; + } + + adapter->params.nports = hweight32(port_vec); pci_set_drvdata(pdev, adapter); return 0; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c index 52af62e0ecb6..a1b19422b339 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c @@ -437,28 +437,26 @@ void cxgb4_cleanup_tc_u32(struct adapter *adap) t4_free_mem(adap->tc_u32); } -struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap, - unsigned int size) +struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap) { + unsigned int max_tids = adap->tids.nftids; struct cxgb4_tc_u32_table *t; unsigned int i; - if (!size) + if (!max_tids) return NULL; t = t4_alloc_mem(sizeof(*t) + - (size * sizeof(struct cxgb4_link))); + (max_tids * sizeof(struct cxgb4_link))); if (!t) return NULL; - t->size = size; + t->size = max_tids; for (i = 0; i < t->size; i++) { struct cxgb4_link *link = &t->table[i]; unsigned int bmap_size; - unsigned int max_tids; - max_tids = adap->tids.nftids; bmap_size = BITS_TO_LONGS(max_tids); link->tid_map = t4_alloc_mem(sizeof(unsigned long) * bmap_size); if (!link->tid_map) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.h index 6bdc885eff22..021261a41c13 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.h @@ -37,8 +37,6 @@ #include <net/pkt_cls.h> -#define CXGB4_MAX_LINK_HANDLE 32 - static inline bool can_tc_u32_offload(struct net_device *dev) { struct adapter *adap = netdev2adap(dev); @@ -52,6 +50,5 @@ int cxgb4_delete_knode(struct net_device *dev, __be16 protocol, struct tc_cls_u32_offload *cls); void cxgb4_cleanup_tc_u32(struct adapter *adapter); -struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap, - unsigned int size); +struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap); #endif /* __CXGB4_TC_U32_H */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c index 8098902c094a..d0868c2320da 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c @@ -408,10 +408,9 @@ static void enable_rx(struct adapter *adap, struct sge_rspq *q) if (!q) return; - if (q->handler) { - cxgb_busy_poll_init_lock(q); + if (q->handler) napi_enable(&q->napi); - } + /* 0-increment GTS to start the timer and enable interrupts */ t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A), SEINTARM_V(q->intr_params) | @@ -420,13 +419,8 @@ static void enable_rx(struct adapter *adap, struct sge_rspq *q) static void quiesce_rx(struct adapter *adap, struct sge_rspq *q) { - if (q && q->handler) { + if (q && q->handler) napi_disable(&q->napi); - local_bh_disable(); - while (!cxgb_poll_lock_napi(q)) - mdelay(1); - local_bh_enable(); - } } static void enable_rx_uld(struct adapter *adap, unsigned int uld_type) @@ -597,7 +591,6 @@ void t4_uld_mem_free(struct adapter *adap) void t4_uld_clean_up(struct adapter *adap) { - struct sge_uld_rxq_info *rxq_info; unsigned int i; if (!adap->uld) @@ -605,7 +598,6 @@ void t4_uld_clean_up(struct adapter *adap) for (i = 0; i < CXGB4_ULD_MAX; i++) { if (!adap->uld[i].handle) continue; - rxq_info = adap->sge.uld_rxq_info[i]; if (adap->flags & FULL_INIT_DONE) quiesce_rx_uld(adap, i); if (adap->flags & USING_MSIX) diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c index 60a26037a1c6..7c8c5b9a3c22 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c +++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c @@ -432,7 +432,7 @@ struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh, else lport = netdev2pinfo(physdev)->lport; - if (neigh->dev->priv_flags & IFF_802_1Q_VLAN) + if (is_vlan_dev(neigh->dev)) vlan = vlan_dev_vlan_id(neigh->dev); else vlan = VLAN_NONE; diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c index cbd68a8fe2e4..c9026352a842 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sched.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c @@ -397,9 +397,6 @@ static struct sched_class *t4_sched_class_lookup(struct port_info *pi, struct ch_sched_params info; struct ch_sched_params tp; - memset(&info, 0, sizeof(info)); - memset(&tp, 0, sizeof(tp)); - memcpy(&tp, p, sizeof(tp)); /* Don't try to match class parameter */ tp.u.params.class = SCHED_CLS_NONE; @@ -409,7 +406,6 @@ static struct sched_class *t4_sched_class_lookup(struct port_info *pi, if (e->state == SCHED_STATE_UNUSED) continue; - memset(&info, 0, sizeof(info)); memcpy(&info, &e->info, sizeof(info)); /* Don't try to match class parameter */ info.u.params.class = SCHED_CLS_NONE; @@ -458,7 +454,6 @@ static struct sched_class *t4_sched_class_alloc(struct port_info *pi, if (!e) goto out; - memset(&np, 0, sizeof(np)); memcpy(&np, p, sizeof(np)); np.u.params.class = e->idx; diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 9f606478c29c..f05f0d400324 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -43,9 +43,7 @@ #include <linux/export.h> #include <net/ipv6.h> #include <net/tcp.h> -#ifdef CONFIG_NET_RX_BUSY_POLL #include <net/busy_poll.h> -#endif /* CONFIG_NET_RX_BUSY_POLL */ #ifdef CONFIG_CHELSIO_T4_FCOE #include <scsi/fc/fc_fcoe.h> #endif /* CONFIG_CHELSIO_T4_FCOE */ @@ -1774,15 +1772,20 @@ static inline int uld_send(struct adapter *adap, struct sk_buff *skb, struct sge_uld_txq *txq; unsigned int idx = skb_txq(skb); - txq_info = adap->sge.uld_txq_info[tx_uld_type]; - txq = &txq_info->uldtxq[idx]; - if (unlikely(is_ctrl_pkt(skb))) { /* Single ctrl queue is a requirement for LE workaround path */ if (adap->tids.nsftids) idx = 0; return ctrl_xmit(&adap->sge.ctrlq[idx], skb); } + + txq_info = adap->sge.uld_txq_info[tx_uld_type]; + if (unlikely(!txq_info)) { + WARN_ON(true); + return NET_XMIT_DROP; + } + + txq = &txq_info->uldtxq[idx]; return ofld_xmit(txq, skb); } @@ -2038,16 +2041,22 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, struct sge *s = &q->adap->sge; int cpl_trace_pkt = is_t4(q->adap->params.chip) ? CPL_TRACE_PKT : CPL_TRACE_PKT_T5; + u16 err_vec; struct port_info *pi; if (unlikely(*(u8 *)rsp == cpl_trace_pkt)) return handle_trace_pkt(q->adap, si); pkt = (const struct cpl_rx_pkt *)rsp; - csum_ok = pkt->csum_calc && !pkt->err_vec && + /* Compressed error vector is enabled for T6 only */ + if (q->adap->params.tp.rx_pkt_encap) + err_vec = T6_COMPR_RXERR_VEC_G(be16_to_cpu(pkt->err_vec)); + else + err_vec = be16_to_cpu(pkt->err_vec); + + csum_ok = pkt->csum_calc && !err_vec && (q->netdev->features & NETIF_F_RXCSUM); if ((pkt->l2info & htonl(RXF_TCP_F)) && - !(cxgb_poll_busy_polling(q)) && (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) { do_gro(rxq, si, pkt); return 0; @@ -2092,7 +2101,12 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, if (!(pkt->l2info & cpu_to_be32(CPL_RX_PKT_FLAGS))) { if ((pkt->l2info & cpu_to_be32(RXF_FCOE_F)) && (pi->fcoe.flags & CXGB_FCOE_ENABLED)) { - if (!(pkt->err_vec & cpu_to_be16(RXERR_CSUM_F))) + if (q->adap->params.tp.rx_pkt_encap) + csum_ok = err_vec & + T6_COMPR_RXERR_SUM_F; + else + csum_ok = err_vec & RXERR_CSUM_F; + if (!csum_ok) skb->ip_summed = CHECKSUM_UNNECESSARY; } } @@ -2273,38 +2287,6 @@ static int process_responses(struct sge_rspq *q, int budget) return budget - budget_left; } -#ifdef CONFIG_NET_RX_BUSY_POLL -int cxgb_busy_poll(struct napi_struct *napi) -{ - struct sge_rspq *q = container_of(napi, struct sge_rspq, napi); - unsigned int params, work_done; - u32 val; - - if (!cxgb_poll_lock_poll(q)) - return LL_FLUSH_BUSY; - - work_done = process_responses(q, 4); - params = QINTR_TIMER_IDX_V(TIMERREG_COUNTER0_X) | QINTR_CNT_EN_V(1); - q->next_intr_params = params; - val = CIDXINC_V(work_done) | SEINTARM_V(params); - - /* If we don't have access to the new User GTS (T5+), use the old - * doorbell mechanism; otherwise use the new BAR2 mechanism. - */ - if (unlikely(!q->bar2_addr)) - t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A), - val | INGRESSQID_V((u32)q->cntxt_id)); - else { - writel(val | INGRESSQID_V(q->bar2_qid), - q->bar2_addr + SGE_UDB_GTS); - wmb(); - } - - cxgb_poll_unlock_poll(q); - return work_done; -} -#endif /* CONFIG_NET_RX_BUSY_POLL */ - /** * napi_rx_handler - the NAPI handler for Rx processing * @napi: the napi instance @@ -2323,9 +2305,6 @@ static int napi_rx_handler(struct napi_struct *napi, int budget) int work_done; u32 val; - if (!cxgb_poll_lock_napi(q)) - return budget; - work_done = process_responses(q, budget); if (likely(work_done < budget)) { int timer_index; @@ -2365,7 +2344,6 @@ static int napi_rx_handler(struct napi_struct *napi, int budget) q->bar2_addr + SGE_UDB_GTS); wmb(); } - cxgb_poll_unlock_napi(q); return work_done; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index e8139514d32c..87000cd39737 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -284,6 +284,7 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd, 1, 1, 3, 5, 10, 10, 20, 50, 100, 200 }; + struct mbox_list entry; u16 access = 0; u16 execute = 0; u32 v; @@ -311,11 +312,62 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd, timeout = -timeout; } + /* Queue ourselves onto the mailbox access list. When our entry is at + * the front of the list, we have rights to access the mailbox. So we + * wait [for a while] till we're at the front [or bail out with an + * EBUSY] ... + */ + spin_lock(&adap->mbox_lock); + list_add_tail(&entry.list, &adap->mlist.list); + spin_unlock(&adap->mbox_lock); + + delay_idx = 0; + ms = delay[0]; + + for (i = 0; ; i += ms) { + /* If we've waited too long, return a busy indication. This + * really ought to be based on our initial position in the + * mailbox access list but this is a start. We very rearely + * contend on access to the mailbox ... + */ + pcie_fw = t4_read_reg(adap, PCIE_FW_A); + if (i > FW_CMD_MAX_TIMEOUT || (pcie_fw & PCIE_FW_ERR_F)) { + spin_lock(&adap->mbox_lock); + list_del(&entry.list); + spin_unlock(&adap->mbox_lock); + ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -EBUSY; + t4_record_mbox(adap, cmd, size, access, ret); + return ret; + } + + /* If we're at the head, break out and start the mailbox + * protocol. + */ + if (list_first_entry(&adap->mlist.list, struct mbox_list, + list) == &entry) + break; + + /* Delay for a bit before checking again ... */ + if (sleep_ok) { + ms = delay[delay_idx]; /* last element may repeat */ + if (delay_idx < ARRAY_SIZE(delay) - 1) + delay_idx++; + msleep(ms); + } else { + mdelay(ms); + } + } + + /* Loop trying to get ownership of the mailbox. Return an error + * if we can't gain ownership. + */ v = MBOWNER_G(t4_read_reg(adap, ctl_reg)); for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++) v = MBOWNER_G(t4_read_reg(adap, ctl_reg)); - if (v != MBOX_OWNER_DRV) { + spin_lock(&adap->mbox_lock); + list_del(&entry.list); + spin_unlock(&adap->mbox_lock); ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT; t4_record_mbox(adap, cmd, MBOX_LEN, access, ret); return ret; @@ -366,6 +418,9 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd, execute = i + ms; t4_record_mbox(adap, cmd_rpl, MBOX_LEN, access, execute); + spin_lock(&adap->mbox_lock); + list_del(&entry.list); + spin_unlock(&adap->mbox_lock); return -FW_CMD_RETVAL_G((int)res); } } @@ -375,6 +430,10 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd, dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n", *(const u8 *)cmd, mbox); t4_report_fw_error(adap); + spin_lock(&adap->mbox_lock); + list_del(&entry.list); + spin_unlock(&adap->mbox_lock); + t4_fatal_err(adap); return ret; } @@ -5382,22 +5441,28 @@ unsigned int t4_get_mps_bg_map(struct adapter *adap, int idx) const char *t4_get_port_type_description(enum fw_port_type port_type) { static const char *const port_type_description[] = { - "R XFI", - "R XAUI", - "T SGMII", - "T XFI", - "T XAUI", + "Fiber_XFI", + "Fiber_XAUI", + "BT_SGMII", + "BT_XFI", + "BT_XAUI", "KX4", "CX4", "KX", "KR", - "R SFP+", - "KR/KX", - "KR/KX/KX4", - "R QSFP_10G", - "R QSA", - "R QSFP", - "R BP40_BA", + "SFP", + "BP_AP", + "BP4_AP", + "QSFP_10G", + "QSA", + "QSFP", + "BP40_BA", + "KR4_100G", + "CR4_QSFP", + "CR_QSFP", + "CR2_QSFP", + "SFP28", + "KR_SFP28", }; if (port_type < ARRAY_SIZE(port_type_description)) @@ -5438,6 +5503,7 @@ void t4_get_port_stats_offset(struct adapter *adap, int idx, void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p) { u32 bgmap = t4_get_mps_bg_map(adap, idx); + u32 stat_ctl = t4_read_reg(adap, MPS_STAT_CTL_A); #define GET_STAT(name) \ t4_read_reg64(adap, \ @@ -5469,6 +5535,14 @@ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p) p->tx_ppp6 = GET_STAT(TX_PORT_PPP6); p->tx_ppp7 = GET_STAT(TX_PORT_PPP7); + if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) { + if (stat_ctl & COUNTPAUSESTATTX_F) { + p->tx_frames -= p->tx_pause; + p->tx_octets -= p->tx_pause * 64; + } + if (stat_ctl & COUNTPAUSEMCTX_F) + p->tx_mcast_frames -= p->tx_pause; + } p->rx_octets = GET_STAT(RX_PORT_BYTES); p->rx_frames = GET_STAT(RX_PORT_FRAMES); p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST); @@ -5497,6 +5571,15 @@ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p) p->rx_ppp6 = GET_STAT(RX_PORT_PPP6); p->rx_ppp7 = GET_STAT(RX_PORT_PPP7); + if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) { + if (stat_ctl & COUNTPAUSESTATRX_F) { + p->rx_frames -= p->rx_pause; + p->rx_octets -= p->rx_pause * 64; + } + if (stat_ctl & COUNTPAUSEMCRX_F) + p->rx_mcast_frames -= p->rx_pause; + } + p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0; p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0; p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0; @@ -7477,6 +7560,39 @@ int t4_prep_adapter(struct adapter *adapter) } /** + * t4_shutdown_adapter - shut down adapter, host & wire + * @adapter: the adapter + * + * Perform an emergency shutdown of the adapter and stop it from + * continuing any further communication on the ports or DMA to the + * host. This is typically used when the adapter and/or firmware + * have crashed and we want to prevent any further accidental + * communication with the rest of the world. This will also force + * the port Link Status to go down -- if register writes work -- + * which should help our peers figure out that we're down. + */ +int t4_shutdown_adapter(struct adapter *adapter) +{ + int port; + + t4_intr_disable(adapter); + t4_write_reg(adapter, DBG_GPIO_EN_A, 0); + for_each_port(adapter, port) { + u32 a_port_cfg = PORT_REG(port, + is_t4(adapter->params.chip) + ? XGMAC_PORT_CFG_A + : MAC_PORT_CFG_A); + + t4_write_reg(adapter, a_port_cfg, + t4_read_reg(adapter, a_port_cfg) + & ~SIGNAL_DET_V(1)); + } + t4_set_reg_field(adapter, SGE_CONTROL_A, GLOBALENABLE_F, 0); + + return 0; +} + +/** * t4_bar2_sge_qregs - return BAR2 SGE Queue register information * @adapter: the adapter * @qid: the Queue ID @@ -7686,6 +7802,13 @@ int t4_init_tp_params(struct adapter *adap) &adap->params.tp.ingress_config, 1, TP_INGRESS_CONFIG_A); } + /* For T6, cache the adapter's compressed error vector + * and passing outer header info for encapsulated packets. + */ + if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) { + v = t4_read_reg(adap, TP_OUT_CONFIG_A); + adap->params.tp.rx_pkt_encap = (v & CRXPKTENC_F) ? 1 : 0; + } /* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field * shift positions of several elements of the Compressed Filter Tuple diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h index a267173f5997..8098c93cd16e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h @@ -1175,6 +1175,21 @@ struct cpl_rx_pkt { #define RXERR_CSUM_V(x) ((x) << RXERR_CSUM_S) #define RXERR_CSUM_F RXERR_CSUM_V(1U) +#define T6_COMPR_RXERR_LEN_S 1 +#define T6_COMPR_RXERR_LEN_V(x) ((x) << T6_COMPR_RXERR_LEN_S) +#define T6_COMPR_RXERR_LEN_F T6_COMPR_RXERR_LEN_V(1U) + +#define T6_COMPR_RXERR_VEC_S 0 +#define T6_COMPR_RXERR_VEC_M 0x3F +#define T6_COMPR_RXERR_VEC_V(x) ((x) << T6_COMPR_RXERR_LEN_S) +#define T6_COMPR_RXERR_VEC_G(x) \ + (((x) >> T6_COMPR_RXERR_VEC_S) & T6_COMPR_RXERR_VEC_M) + +/* Logical OR of RX_ERROR_CSUM, RX_ERROR_CSIP */ +#define T6_COMPR_RXERR_SUM_S 4 +#define T6_COMPR_RXERR_SUM_V(x) ((x) << T6_COMPR_RXERR_SUM_S) +#define T6_COMPR_RXERR_SUM_F T6_COMPR_RXERR_SUM_V(1U) + struct cpl_trace_pkt { u8 opcode; u8 intf; @@ -1349,6 +1364,10 @@ struct cpl_tx_data { #define TX_FORCE_S 13 #define TX_FORCE_V(x) ((x) << TX_FORCE_S) +#define T6_TX_FORCE_S 20 +#define T6_TX_FORCE_V(x) ((x) << T6_TX_FORCE_S) +#define T6_TX_FORCE_F T6_TX_FORCE_V(1U) + enum { ULP_TX_MEM_READ = 2, ULP_TX_MEM_WRITE = 3, diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h index ecf3ccc257bc..a323185507ec 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h @@ -169,6 +169,9 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN CH_PCI_ID_TABLE_FENTRY(0x509b), /* Custom T540-CR LOM */ CH_PCI_ID_TABLE_FENTRY(0x509c), /* Custom T520-CR*/ CH_PCI_ID_TABLE_FENTRY(0x509d), /* Custom T540-CR*/ + CH_PCI_ID_TABLE_FENTRY(0x509e), /* Custom T520-CR */ + CH_PCI_ID_TABLE_FENTRY(0x509f), /* Custom T540-CR */ + CH_PCI_ID_TABLE_FENTRY(0x50a0), /* Custom T540-CR */ /* T6 adapters: */ @@ -185,6 +188,8 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN CH_PCI_ID_TABLE_FENTRY(0x6011), CH_PCI_ID_TABLE_FENTRY(0x6014), CH_PCI_ID_TABLE_FENTRY(0x6015), + CH_PCI_ID_TABLE_FENTRY(0x6080), + CH_PCI_ID_TABLE_FENTRY(0x6081), CH_PCI_DEVICE_ID_TABLE_DEFINE_END; #endif /* __T4_PCI_ID_TBL_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index 9fea255c7e87..3348d33c36fa 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -855,6 +855,14 @@ #define PERR_INT_CAUSE_V(x) ((x) << PERR_INT_CAUSE_S) #define PERR_INT_CAUSE_F PERR_INT_CAUSE_V(1U) +#define DBG_GPIO_EN_A 0x6010 +#define XGMAC_PORT_CFG_A 0x1000 +#define MAC_PORT_CFG_A 0x800 + +#define SIGNAL_DET_S 14 +#define SIGNAL_DET_V(x) ((x) << SIGNAL_DET_S) +#define SIGNAL_DET_F SIGNAL_DET_V(1U) + #define MC_ECC_STATUS_A 0x751c #define MC_P_ECC_STATUS_A 0x4131c @@ -1276,6 +1284,10 @@ #define DBGLARPTR_M 0x7fU #define DBGLARPTR_V(x) ((x) << DBGLARPTR_S) +#define CRXPKTENC_S 3 +#define CRXPKTENC_V(x) ((x) << CRXPKTENC_S) +#define CRXPKTENC_F CRXPKTENC_V(1U) + #define TP_DBG_LA_DATAL_A 0x7ed8 #define TP_DBG_LA_CONFIG_A 0x7ed4 #define TP_OUT_CONFIG_A 0x7d04 @@ -1794,12 +1806,29 @@ #define MPS_CMN_CTL_A 0x9000 +#define COUNTPAUSEMCRX_S 5 +#define COUNTPAUSEMCRX_V(x) ((x) << COUNTPAUSEMCRX_S) +#define COUNTPAUSEMCRX_F COUNTPAUSEMCRX_V(1U) + +#define COUNTPAUSESTATRX_S 4 +#define COUNTPAUSESTATRX_V(x) ((x) << COUNTPAUSESTATRX_S) +#define COUNTPAUSESTATRX_F COUNTPAUSESTATRX_V(1U) + +#define COUNTPAUSEMCTX_S 3 +#define COUNTPAUSEMCTX_V(x) ((x) << COUNTPAUSEMCTX_S) +#define COUNTPAUSEMCTX_F COUNTPAUSEMCTX_V(1U) + +#define COUNTPAUSESTATTX_S 2 +#define COUNTPAUSESTATTX_V(x) ((x) << COUNTPAUSESTATTX_S) +#define COUNTPAUSESTATTX_F COUNTPAUSESTATTX_V(1U) + #define NUMPORTS_S 0 #define NUMPORTS_M 0x3U #define NUMPORTS_G(x) (((x) >> NUMPORTS_S) & NUMPORTS_M) #define MPS_INT_CAUSE_A 0x9008 #define MPS_TX_INT_CAUSE_A 0x9408 +#define MPS_STAT_CTL_A 0x9600 #define FRMERR_S 15 #define FRMERR_V(x) ((x) << FRMERR_S) diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index 8d9e4b7a8e84..ccc05f874419 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h @@ -3385,6 +3385,14 @@ struct fw_crypto_lookaside_wr { #define FW_CRYPTO_LOOKASIDE_WR_IV_G(x) \ (((x) >> FW_CRYPTO_LOOKASIDE_WR_IV_S) & FW_CRYPTO_LOOKASIDE_WR_IV_M) +#define FW_CRYPTO_LOOKASIDE_WR_FQIDX_S 15 +#define FW_CRYPTO_LOOKASIDE_WR_FQIDX_M 0xff +#define FW_CRYPTO_LOOKASIDE_WR_FQIDX_V(x) \ + ((x) << FW_CRYPTO_LOOKASIDE_WR_FQIDX_S) +#define FW_CRYPTO_LOOKASIDE_WR_FQIDX_G(x) \ + (((x) >> FW_CRYPTO_LOOKASIDE_WR_FQIDX_S) & \ + FW_CRYPTO_LOOKASIDE_WR_FQIDX_M) + #define FW_CRYPTO_LOOKASIDE_WR_TX_CH_S 10 #define FW_CRYPTO_LOOKASIDE_WR_TX_CH_M 0x3 #define FW_CRYPTO_LOOKASIDE_WR_TX_CH_V(x) \ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h index 2accab386323..fa376444e57c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h @@ -36,8 +36,8 @@ #define __T4FW_VERSION_H__ #define T4FW_VERSION_MAJOR 0x01 -#define T4FW_VERSION_MINOR 0x0F -#define T4FW_VERSION_MICRO 0x25 +#define T4FW_VERSION_MINOR 0x10 +#define T4FW_VERSION_MICRO 0x21 #define T4FW_VERSION_BUILD 0x00 #define T4FW_MIN_VERSION_MAJOR 0x01 @@ -45,8 +45,8 @@ #define T4FW_MIN_VERSION_MICRO 0x00 #define T5FW_VERSION_MAJOR 0x01 -#define T5FW_VERSION_MINOR 0x0F -#define T5FW_VERSION_MICRO 0x25 +#define T5FW_VERSION_MINOR 0x10 +#define T5FW_VERSION_MICRO 0x21 #define T5FW_VERSION_BUILD 0x00 #define T5FW_MIN_VERSION_MAJOR 0x00 @@ -54,8 +54,8 @@ #define T5FW_MIN_VERSION_MICRO 0x00 #define T6FW_VERSION_MAJOR 0x01 -#define T6FW_VERSION_MINOR 0x0F -#define T6FW_VERSION_MICRO 0x25 +#define T6FW_VERSION_MINOR 0x10 +#define T6FW_VERSION_MICRO 0x21 #define T6FW_VERSION_BUILD 0x00 #define T6FW_MIN_VERSION_MAJOR 0x00 diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index 0d1a134c8174..ac7a150c54e9 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -158,20 +158,23 @@ void t4vf_os_link_changed(struct adapter *adapter, int pidx, int link_ok) netif_carrier_on(dev); switch (pi->link_cfg.speed) { - case 40000: - s = "40Gbps"; + case 100: + s = "100Mbps"; + break; + case 1000: + s = "1Gbps"; break; - case 10000: s = "10Gbps"; break; - - case 1000: - s = "1000Mbps"; + case 25000: + s = "25Gbps"; break; - - case 100: - s = "100Mbps"; + case 40000: + s = "40Gbps"; + break; + case 100000: + s = "100Gbps"; break; default: diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index f3ed9ce99e5e..e37dde2ba97f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -1889,7 +1889,7 @@ static int napi_rx_handler(struct napi_struct *napi, int budget) u32 val; if (likely(work_done < budget)) { - napi_complete(napi); + napi_complete_done(napi, work_done); intr_params = rspq->next_intr_params; rspq->next_intr_params = rspq->intr_params; } else diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.h b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.h index e995a1a3840a..a91ad766cef0 100644 --- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.h +++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.h @@ -59,7 +59,7 @@ struct cxgbi_pagepod_hdr { #define PPOD_PAGES_MAX 4 struct cxgbi_pagepod { struct cxgbi_pagepod_hdr hdr; - u64 addr[PPOD_PAGES_MAX + 1]; + __be64 addr[PPOD_PAGES_MAX + 1]; }; /* ddp tag format diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c index 396c88678eab..7a7c02f1f8b9 100644 --- a/drivers/net/ethernet/cirrus/ep93xx_eth.c +++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c @@ -228,9 +228,10 @@ static void ep93xx_mdio_write(struct net_device *dev, int phy_id, int reg, int d pr_info("mdio write timed out\n"); } -static int ep93xx_rx(struct net_device *dev, int processed, int budget) +static int ep93xx_rx(struct net_device *dev, int budget) { struct ep93xx_priv *ep = netdev_priv(dev); + int processed = 0; while (processed < budget) { int entry; @@ -294,7 +295,7 @@ static int ep93xx_rx(struct net_device *dev, int processed, int budget) skb_put(skb, length); skb->protocol = eth_type_trans(skb, dev); - netif_receive_skb(skb); + napi_gro_receive(&ep->napi, skb); dev->stats.rx_packets++; dev->stats.rx_bytes += length; @@ -310,35 +311,17 @@ err: return processed; } -static int ep93xx_have_more_rx(struct ep93xx_priv *ep) -{ - struct ep93xx_rstat *rstat = ep->descs->rstat + ep->rx_pointer; - return !!((rstat->rstat0 & RSTAT0_RFP) && (rstat->rstat1 & RSTAT1_RFP)); -} - static int ep93xx_poll(struct napi_struct *napi, int budget) { struct ep93xx_priv *ep = container_of(napi, struct ep93xx_priv, napi); struct net_device *dev = ep->dev; - int rx = 0; - -poll_some_more: - rx = ep93xx_rx(dev, rx, budget); - if (rx < budget) { - int more = 0; + int rx; + rx = ep93xx_rx(dev, budget); + if (rx < budget && napi_complete_done(napi, rx)) { spin_lock_irq(&ep->rx_lock); - __napi_complete(napi); wrl(ep, REG_INTEN, REG_INTEN_TX | REG_INTEN_RX); - if (ep93xx_have_more_rx(ep)) { - wrl(ep, REG_INTEN, REG_INTEN_TX); - wrl(ep, REG_INTSTSP, REG_INTSTS_RX); - more = 1; - } spin_unlock_irq(&ep->rx_lock); - - if (more && napi_reschedule(napi)) - goto poll_some_more; } if (rx) { diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h index 9023c858715d..2b23f46b34d3 100644 --- a/drivers/net/ethernet/cisco/enic/enic.h +++ b/drivers/net/ethernet/cisco/enic/enic.h @@ -135,6 +135,11 @@ struct enic_rfs_flw_tbl { struct timer_list rfs_may_expire; }; +struct vxlan_offload { + u16 vxlan_udp_port_number; + u8 patch_level; +}; + /* Per-instance private data structure */ struct enic { struct net_device *netdev; @@ -175,6 +180,7 @@ struct enic { /* receive queue cache line section */ ____cacheline_aligned struct vnic_rq rq[ENIC_RQ_MAX]; unsigned int rq_count; + struct vxlan_offload vxlan; u64 rq_truncated_pkts; u64 rq_bad_fcs; struct napi_struct napi[ENIC_RQ_MAX + ENIC_WQ_MAX]; diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index cdd7a1a59aa7..4b87beeabce1 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -43,10 +43,9 @@ #ifdef CONFIG_RFS_ACCEL #include <linux/cpu_rmap.h> #endif -#ifdef CONFIG_NET_RX_BUSY_POLL -#include <net/busy_poll.h> -#endif #include <linux/crash_dump.h> +#include <net/busy_poll.h> +#include <net/vxlan.h> #include "cq_enet_desc.h" #include "vnic_dev.h" @@ -178,6 +177,134 @@ static void enic_unset_affinity_hint(struct enic *enic) irq_set_affinity_hint(enic->msix_entry[i].vector, NULL); } +static void enic_udp_tunnel_add(struct net_device *netdev, + struct udp_tunnel_info *ti) +{ + struct enic *enic = netdev_priv(netdev); + __be16 port = ti->port; + int err; + + spin_lock_bh(&enic->devcmd_lock); + + if (ti->type != UDP_TUNNEL_TYPE_VXLAN) { + netdev_info(netdev, "udp_tnl: only vxlan tunnel offload supported"); + goto error; + } + + if (ti->sa_family != AF_INET) { + netdev_info(netdev, "vxlan: only IPv4 offload supported"); + goto error; + } + + if (enic->vxlan.vxlan_udp_port_number) { + if (ntohs(port) == enic->vxlan.vxlan_udp_port_number) + netdev_warn(netdev, "vxlan: udp port already offloaded"); + else + netdev_info(netdev, "vxlan: offload supported for only one UDP port"); + + goto error; + } + + err = vnic_dev_overlay_offload_cfg(enic->vdev, + OVERLAY_CFG_VXLAN_PORT_UPDATE, + ntohs(port)); + if (err) + goto error; + + err = vnic_dev_overlay_offload_ctrl(enic->vdev, OVERLAY_FEATURE_VXLAN, + enic->vxlan.patch_level); + if (err) + goto error; + + enic->vxlan.vxlan_udp_port_number = ntohs(port); + + netdev_info(netdev, "vxlan fw-vers-%d: offload enabled for udp port: %d, sa_family: %d ", + (int)enic->vxlan.patch_level, ntohs(port), ti->sa_family); + + goto unlock; + +error: + netdev_info(netdev, "failed to offload udp port: %d, sa_family: %d, type: %d", + ntohs(port), ti->sa_family, ti->type); +unlock: + spin_unlock_bh(&enic->devcmd_lock); +} + +static void enic_udp_tunnel_del(struct net_device *netdev, + struct udp_tunnel_info *ti) +{ + struct enic *enic = netdev_priv(netdev); + int err; + + spin_lock_bh(&enic->devcmd_lock); + + if ((ti->sa_family != AF_INET) || + ((ntohs(ti->port) != enic->vxlan.vxlan_udp_port_number)) || + (ti->type != UDP_TUNNEL_TYPE_VXLAN)) { + netdev_info(netdev, "udp_tnl: port:%d, sa_family: %d, type: %d not offloaded", + ntohs(ti->port), ti->sa_family, ti->type); + goto unlock; + } + + err = vnic_dev_overlay_offload_ctrl(enic->vdev, OVERLAY_FEATURE_VXLAN, + OVERLAY_OFFLOAD_DISABLE); + if (err) { + netdev_err(netdev, "vxlan: del offload udp port: %d failed", + ntohs(ti->port)); + goto unlock; + } + + enic->vxlan.vxlan_udp_port_number = 0; + + netdev_info(netdev, "vxlan: del offload udp port %d, family %d\n", + ntohs(ti->port), ti->sa_family); + +unlock: + spin_unlock_bh(&enic->devcmd_lock); +} + +static netdev_features_t enic_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + const struct ethhdr *eth = (struct ethhdr *)skb_inner_mac_header(skb); + struct enic *enic = netdev_priv(dev); + struct udphdr *udph; + u16 port = 0; + u16 proto; + + if (!skb->encapsulation) + return features; + + features = vxlan_features_check(skb, features); + + /* hardware only supports IPv4 vxlan tunnel */ + if (vlan_get_protocol(skb) != htons(ETH_P_IP)) + goto out; + + /* hardware does not support offload of ipv6 inner pkt */ + if (eth->h_proto != ntohs(ETH_P_IP)) + goto out; + + proto = ip_hdr(skb)->protocol; + + if (proto == IPPROTO_UDP) { + udph = udp_hdr(skb); + port = be16_to_cpu(udph->dest); + } + + /* HW supports offload of only one UDP port. Remove CSUM and GSO MASK + * for other UDP port tunnels + */ + if (port != enic->vxlan.vxlan_udp_port_number) + goto out; + + return features; + +out: + return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); +} + int enic_is_dynamic(struct enic *enic) { return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN; @@ -506,20 +633,19 @@ static int enic_queue_wq_skb_csum_l4(struct enic *enic, struct vnic_wq *wq, return err; } -static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq, - struct sk_buff *skb, unsigned int mss, - int vlan_tag_insert, unsigned int vlan_tag, - int loopback) +static void enic_preload_tcp_csum_encap(struct sk_buff *skb) { - unsigned int frag_len_left = skb_headlen(skb); - unsigned int len_left = skb->len - frag_len_left; - unsigned int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); - int eop = (len_left == 0); - unsigned int len; - dma_addr_t dma_addr; - unsigned int offset = 0; - skb_frag_t *frag; + if (skb->protocol == cpu_to_be16(ETH_P_IP)) { + inner_ip_hdr(skb)->check = 0; + inner_tcp_hdr(skb)->check = + ~csum_tcpudp_magic(inner_ip_hdr(skb)->saddr, + inner_ip_hdr(skb)->daddr, 0, + IPPROTO_TCP, 0); + } +} +static void enic_preload_tcp_csum(struct sk_buff *skb) +{ /* Preload TCP csum field with IP pseudo hdr calculated * with IP length set to zero. HW will later add in length * to each TCP segment resulting from the TSO. @@ -533,6 +659,30 @@ static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq, tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); } +} + +static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq, + struct sk_buff *skb, unsigned int mss, + int vlan_tag_insert, unsigned int vlan_tag, + int loopback) +{ + unsigned int frag_len_left = skb_headlen(skb); + unsigned int len_left = skb->len - frag_len_left; + int eop = (len_left == 0); + unsigned int offset = 0; + unsigned int hdr_len; + dma_addr_t dma_addr; + unsigned int len; + skb_frag_t *frag; + + if (skb->encapsulation) { + hdr_len = skb_inner_transport_header(skb) - skb->data; + hdr_len += inner_tcp_hdrlen(skb); + enic_preload_tcp_csum_encap(skb); + } else { + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + enic_preload_tcp_csum(skb); + } /* Queue WQ_ENET_MAX_DESC_LEN length descriptors * for the main skb fragment @@ -581,6 +731,38 @@ static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq, return 0; } +static inline int enic_queue_wq_skb_encap(struct enic *enic, struct vnic_wq *wq, + struct sk_buff *skb, + int vlan_tag_insert, + unsigned int vlan_tag, int loopback) +{ + unsigned int head_len = skb_headlen(skb); + unsigned int len_left = skb->len - head_len; + /* Hardware will overwrite the checksum fields, calculating from + * scratch and ignoring the value placed by software. + * Offload mode = 00 + * mss[2], mss[1], mss[0] bits are set + */ + unsigned int mss_or_csum = 7; + int eop = (len_left == 0); + dma_addr_t dma_addr; + int err = 0; + + dma_addr = pci_map_single(enic->pdev, skb->data, head_len, + PCI_DMA_TODEVICE); + if (unlikely(enic_dma_map_check(enic, dma_addr))) + return -ENOMEM; + + enic_queue_wq_desc_ex(wq, skb, dma_addr, head_len, mss_or_csum, 0, + vlan_tag_insert, vlan_tag, + WQ_ENET_OFFLOAD_MODE_CSUM, eop, 1 /* SOP */, eop, + loopback); + if (!eop) + err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback); + + return err; +} + static inline void enic_queue_wq_skb(struct enic *enic, struct vnic_wq *wq, struct sk_buff *skb) { @@ -603,6 +785,9 @@ static inline void enic_queue_wq_skb(struct enic *enic, err = enic_queue_wq_skb_tso(enic, wq, skb, mss, vlan_tag_insert, vlan_tag, loopback); + else if (skb->encapsulation) + err = enic_queue_wq_skb_encap(enic, wq, skb, vlan_tag_insert, + vlan_tag, loopback); else if (skb->ip_summed == CHECKSUM_PARTIAL) err = enic_queue_wq_skb_csum_l4(enic, wq, skb, vlan_tag_insert, vlan_tag, loopback); @@ -680,8 +865,8 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, } /* dev_base_lock rwlock held, nominally process context */ -static struct rtnl_link_stats64 *enic_get_stats(struct net_device *netdev, - struct rtnl_link_stats64 *net_stats) +static void enic_get_stats(struct net_device *netdev, + struct rtnl_link_stats64 *net_stats) { struct enic *enic = netdev_priv(netdev); struct vnic_stats *stats; @@ -693,7 +878,7 @@ static struct rtnl_link_stats64 *enic_get_stats(struct net_device *netdev, * recorded stats. */ if (err == -ENOMEM) - return net_stats; + return; net_stats->tx_packets = stats->tx.tx_frames_ok; net_stats->tx_bytes = stats->tx.tx_bytes_ok; @@ -707,8 +892,6 @@ static struct rtnl_link_stats64 *enic_get_stats(struct net_device *netdev, net_stats->rx_over_errors = enic->rq_truncated_pkts; net_stats->rx_crc_errors = enic->rq_bad_fcs; net_stats->rx_dropped = stats->rx.rx_no_bufs + stats->rx.rx_drop; - - return net_stats; } static int enic_mc_sync(struct net_device *netdev, const u8 *mc_addr) @@ -1117,6 +1300,7 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq, u8 packet_error; u16 q_number, completed_index, bytes_written, vlan_tci, checksum; u32 rss_hash; + bool outer_csum_ok = true, encap = false; if (skipped) return; @@ -1165,7 +1349,8 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq, skb_put(skb, bytes_written); skb->protocol = eth_type_trans(skb, netdev); skb_record_rx_queue(skb, q_number); - if (netdev->features & NETIF_F_RXHASH) { + if ((netdev->features & NETIF_F_RXHASH) && rss_hash && + (type == 3)) { switch (rss_type) { case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv4: case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6: @@ -1179,22 +1364,45 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq, break; } } + if (enic->vxlan.vxlan_udp_port_number) { + switch (enic->vxlan.patch_level) { + case 0: + if (fcoe) { + encap = true; + outer_csum_ok = fcoe_fc_crc_ok; + } + break; + case 2: + if ((type == 7) && + (rss_hash & BIT(0))) { + encap = true; + outer_csum_ok = (rss_hash & BIT(1)) && + (rss_hash & BIT(2)); + } + break; + } + } /* Hardware does not provide whole packet checksum. It only * provides pseudo checksum. Since hw validates the packet * checksum but not provide us the checksum value. use * CHECSUM_UNNECESSARY. + * + * In case of encap pkt tcp_udp_csum_ok/tcp_udp_csum_ok is + * inner csum_ok. outer_csum_ok is set by hw when outer udp + * csum is correct or is zero. */ - if ((netdev->features & NETIF_F_RXCSUM) && tcp_udp_csum_ok && - ipv4_csum_ok) + if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc && + tcp_udp_csum_ok && ipv4_csum_ok && outer_csum_ok) { skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->csum_level = encap; + } if (vlan_stripped) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci); skb_mark_napi_id(skb, &enic->napi[rq->index]); - if (enic_poll_busy_polling(rq) || - !(netdev->features & NETIF_F_GRO)) + if (!(netdev->features & NETIF_F_GRO)) netif_receive_skb(skb); else napi_gro_receive(&enic->napi[q_number], skb); @@ -1298,15 +1506,6 @@ static int enic_poll(struct napi_struct *napi, int budget) wq_work_done = vnic_cq_service(&enic->cq[cq_wq], wq_work_to_do, enic_wq_service, NULL); - if (!enic_poll_lock_napi(&enic->rq[cq_rq])) { - if (wq_work_done > 0) - vnic_intr_return_credits(&enic->intr[intr], - wq_work_done, - 0 /* dont unmask intr */, - 0 /* dont reset intr timer */); - return budget; - } - if (budget > 0) rq_work_done = vnic_cq_service(&enic->cq[cq_rq], rq_work_to_do, enic_rq_service, NULL); @@ -1325,7 +1524,6 @@ static int enic_poll(struct napi_struct *napi, int budget) 0 /* don't reset intr timer */); err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf); - enic_poll_unlock_napi(&enic->rq[cq_rq], napi); /* Buffer allocation failed. Stay in polling * mode so we can try to fill the ring again. @@ -1345,7 +1543,7 @@ static int enic_poll(struct napi_struct *napi, int budget) * exit polling */ - napi_complete(napi); + napi_complete_done(napi, rq_work_done); if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) enic_set_int_moderation(enic, &enic->rq[0]); vnic_intr_unmask(&enic->intr[intr]); @@ -1392,34 +1590,6 @@ static void enic_set_rx_cpu_rmap(struct enic *enic) #endif /* CONFIG_RFS_ACCEL */ -#ifdef CONFIG_NET_RX_BUSY_POLL -static int enic_busy_poll(struct napi_struct *napi) -{ - struct net_device *netdev = napi->dev; - struct enic *enic = netdev_priv(netdev); - unsigned int rq = (napi - &enic->napi[0]); - unsigned int cq = enic_cq_rq(enic, rq); - unsigned int intr = enic_msix_rq_intr(enic, rq); - unsigned int work_to_do = -1; /* clean all pkts possible */ - unsigned int work_done; - - if (!enic_poll_lock_poll(&enic->rq[rq])) - return LL_FLUSH_BUSY; - work_done = vnic_cq_service(&enic->cq[cq], work_to_do, - enic_rq_service, NULL); - - if (work_done > 0) - vnic_intr_return_credits(&enic->intr[intr], - work_done, 0, 0); - vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf); - if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) - enic_calc_int_moderation(enic, &enic->rq[rq]); - enic_poll_unlock_poll(&enic->rq[rq]); - - return work_done; -} -#endif /* CONFIG_NET_RX_BUSY_POLL */ - static int enic_poll_msix_wq(struct napi_struct *napi, int budget) { struct net_device *netdev = napi->dev; @@ -1461,8 +1631,6 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget) unsigned int work_done = 0; int err; - if (!enic_poll_lock_napi(&enic->rq[rq])) - return budget; /* Service RQ */ @@ -1495,14 +1663,13 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget) */ enic_calc_int_moderation(enic, &enic->rq[rq]); - enic_poll_unlock_napi(&enic->rq[rq], napi); if (work_done < work_to_do) { /* Some work done, but not enough to stay in polling, * exit polling */ - napi_complete(napi); + napi_complete_done(napi, work_done); if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) enic_set_int_moderation(enic, &enic->rq[rq]); vnic_intr_unmask(&enic->intr[intr]); @@ -1753,10 +1920,9 @@ static int enic_open(struct net_device *netdev) netif_tx_wake_all_queues(netdev); - for (i = 0; i < enic->rq_count; i++) { - enic_busy_poll_init_lock(&enic->rq[i]); + for (i = 0; i < enic->rq_count; i++) napi_enable(&enic->napi[i]); - } + if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) for (i = 0; i < enic->wq_count; i++) napi_enable(&enic->napi[enic_cq_wq(enic, i)]); @@ -1800,13 +1966,8 @@ static int enic_stop(struct net_device *netdev) enic_dev_disable(enic); - for (i = 0; i < enic->rq_count; i++) { + for (i = 0; i < enic->rq_count; i++) napi_disable(&enic->napi[i]); - local_bh_disable(); - while (!enic_poll_lock_napi(&enic->rq[i])) - mdelay(1); - local_bh_enable(); - } netif_carrier_off(netdev); netif_tx_disable(netdev); @@ -2337,9 +2498,9 @@ static const struct net_device_ops enic_netdev_dynamic_ops = { #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = enic_rx_flow_steer, #endif -#ifdef CONFIG_NET_RX_BUSY_POLL - .ndo_busy_poll = enic_busy_poll, -#endif + .ndo_udp_tunnel_add = enic_udp_tunnel_add, + .ndo_udp_tunnel_del = enic_udp_tunnel_del, + .ndo_features_check = enic_features_check, }; static const struct net_device_ops enic_netdev_ops = { @@ -2363,9 +2524,9 @@ static const struct net_device_ops enic_netdev_ops = { #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = enic_rx_flow_steer, #endif -#ifdef CONFIG_NET_RX_BUSY_POLL - .ndo_busy_poll = enic_busy_poll, -#endif + .ndo_udp_tunnel_add = enic_udp_tunnel_add, + .ndo_udp_tunnel_del = enic_udp_tunnel_del, + .ndo_features_check = enic_features_check, }; static void enic_dev_deinit(struct enic *enic) @@ -2741,6 +2902,39 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->hw_features |= NETIF_F_RXHASH; if (ENIC_SETTING(enic, RXCSUM)) netdev->hw_features |= NETIF_F_RXCSUM; + if (ENIC_SETTING(enic, VXLAN)) { + u64 patch_level; + + netdev->hw_enc_features |= NETIF_F_RXCSUM | + NETIF_F_TSO | + NETIF_F_TSO_ECN | + NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_HW_CSUM | + NETIF_F_GSO_UDP_TUNNEL_CSUM; + netdev->hw_features |= netdev->hw_enc_features; + /* get bit mask from hw about supported offload bit level + * BIT(0) = fw supports patch_level 0 + * fcoe bit = encap + * fcoe_fc_crc_ok = outer csum ok + * BIT(1) = always set by fw + * BIT(2) = fw supports patch_level 2 + * BIT(0) in rss_hash = encap + * BIT(1,2) in rss_hash = outer_ip_csum_ok/ + * outer_tcp_csum_ok + * used in enic_rq_indicate_buf + */ + err = vnic_dev_get_supported_feature_ver(enic->vdev, + VIC_FEATURE_VXLAN, + &patch_level); + if (err) + patch_level = 0; + /* mask bits that are supported by driver + */ + patch_level &= BIT_ULL(0) | BIT_ULL(2); + patch_level = fls(patch_level); + patch_level = patch_level ? patch_level - 1 : 0; + enic->vxlan.patch_level = patch_level; + } netdev->features |= netdev->hw_features; netdev->vlan_features |= netdev->features; diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c index 8f27df3207bc..1841ad45d215 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_dev.c +++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c @@ -1247,3 +1247,37 @@ int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry, return ret; } + +int vnic_dev_overlay_offload_ctrl(struct vnic_dev *vdev, u8 overlay, u8 config) +{ + u64 a0 = overlay; + u64 a1 = config; + int wait = 1000; + + return vnic_dev_cmd(vdev, CMD_OVERLAY_OFFLOAD_CTRL, &a0, &a1, wait); +} + +int vnic_dev_overlay_offload_cfg(struct vnic_dev *vdev, u8 overlay, + u16 vxlan_udp_port_number) +{ + u64 a1 = vxlan_udp_port_number; + u64 a0 = overlay; + int wait = 1000; + + return vnic_dev_cmd(vdev, CMD_OVERLAY_OFFLOAD_CFG, &a0, &a1, wait); +} + +int vnic_dev_get_supported_feature_ver(struct vnic_dev *vdev, u8 feature, + u64 *supported_versions) +{ + u64 a0 = feature; + int wait = 1000; + u64 a1 = 0; + int ret; + + ret = vnic_dev_cmd(vdev, CMD_GET_SUPP_FEATURE_VER, &a0, &a1, wait); + if (!ret) + *supported_versions = a0; + + return ret; +} diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.h b/drivers/net/ethernet/cisco/enic/vnic_dev.h index 54156c484424..9d43d6bb9907 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_dev.h +++ b/drivers/net/ethernet/cisco/enic/vnic_dev.h @@ -179,5 +179,10 @@ int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr); int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry, struct filter *data); int vnic_devcmd_init(struct vnic_dev *vdev); +int vnic_dev_overlay_offload_ctrl(struct vnic_dev *vdev, u8 overlay, u8 config); +int vnic_dev_overlay_offload_cfg(struct vnic_dev *vdev, u8 overlay, + u16 vxlan_udp_port_number); +int vnic_dev_get_supported_feature_ver(struct vnic_dev *vdev, u8 feature, + u64 *supported_versions); #endif /* _VNIC_DEV_H_ */ diff --git a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h index 2a812880b884..d83880b0d468 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h +++ b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h @@ -406,6 +406,31 @@ enum vnic_devcmd_cmd { * in: (u32) a0=Queue Pair number */ CMD_QP_STATS_CLEAR = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 63), + + /* Use this devcmd for agreeing on the highest common version supported + * by both driver and fw for features who need such a facility. + * in: (u64) a0 = feature (driver requests for the supported versions + * on this feature) + * out: (u64) a0 = bitmap of all supported versions for that feature + */ + CMD_GET_SUPP_FEATURE_VER = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 69), + + /* Control (Enable/Disable) overlay offloads on the given vnic + * in: (u8) a0 = OVERLAY_FEATURE_NVGRE : NVGRE + * a0 = OVERLAY_FEATURE_VXLAN : VxLAN + * in: (u8) a1 = OVERLAY_OFFLOAD_ENABLE : Enable or + * a1 = OVERLAY_OFFLOAD_DISABLE : Disable or + * a1 = OVERLAY_OFFLOAD_ENABLE_V2 : Enable with version 2 + */ + CMD_OVERLAY_OFFLOAD_CTRL = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 72), + + /* Configuration of overlay offloads feature on a given vNIC + * in: (u8) a0 = DEVCMD_OVERLAY_NVGRE : NVGRE + * a0 = DEVCMD_OVERLAY_VXLAN : VxLAN + * in: (u8) a1 = VXLAN_PORT_UPDATE : VxLAN + * in: (u16) a2 = unsigned short int port information + */ + CMD_OVERLAY_OFFLOAD_CFG = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 73), }; /* CMD_ENABLE2 flags */ @@ -657,4 +682,30 @@ struct devcmd2_result { #define DEVCMD2_RING_SIZE 32 #define DEVCMD2_DESC_SIZE 128 +enum overlay_feature_t { + OVERLAY_FEATURE_NVGRE = 1, + OVERLAY_FEATURE_VXLAN, + OVERLAY_FEATURE_MAX, +}; + +enum overlay_ofld_cmd { + OVERLAY_OFFLOAD_ENABLE, + OVERLAY_OFFLOAD_DISABLE, + OVERLAY_OFFLOAD_ENABLE_P2, + OVERLAY_OFFLOAD_MAX, +}; + +#define OVERLAY_CFG_VXLAN_PORT_UPDATE 0 + +/* Use this enum to get the supported versions for each of these features + * If you need to use the devcmd_get_supported_feature_version(), add + * the new feature into this enum and install function handler in devcmd.c + */ +enum vic_feature_t { + VIC_FEATURE_VXLAN, + VIC_FEATURE_RDMA, + VIC_FEATURE_VXLAN_PATCH, + VIC_FEATURE_MAX, +}; + #endif /* _VNIC_DEVCMD_H_ */ diff --git a/drivers/net/ethernet/cisco/enic/vnic_enet.h b/drivers/net/ethernet/cisco/enic/vnic_enet.h index 75aced2de869..7d6fbb5635a4 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_enet.h +++ b/drivers/net/ethernet/cisco/enic/vnic_enet.h @@ -48,6 +48,7 @@ struct vnic_enet_config { #define VENETF_RSSHASH_IPV6_EX 0x200 /* Hash on IPv6 extended fields */ #define VENETF_RSSHASH_TCPIPV6_EX 0x400 /* Hash on TCP + IPv6 ext. fields */ #define VENETF_LOOP 0x800 /* Loopback enabled */ +#define VENETF_VXLAN 0x10000 /* VxLAN offload */ #define VENET_INTR_TYPE_MIN 0 /* Timer specs min interrupt spacing */ #define VENET_INTR_TYPE_IDLE 1 /* Timer specs idle time before irq */ diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.h b/drivers/net/ethernet/cisco/enic/vnic_rq.h index b9c82f143d7e..0413103ebe94 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_rq.h +++ b/drivers/net/ethernet/cisco/enic/vnic_rq.h @@ -92,9 +92,6 @@ struct vnic_rq { struct vnic_rq_buf *to_clean; void *os_buf_head; unsigned int pkts_outstanding; -#ifdef CONFIG_NET_RX_BUSY_POLL - atomic_t bpoll_state; -#endif /* CONFIG_NET_RX_BUSY_POLL */ }; static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq) @@ -207,81 +204,6 @@ static inline int vnic_rq_fill(struct vnic_rq *rq, return 0; } -#ifdef CONFIG_NET_RX_BUSY_POLL -static inline void enic_busy_poll_init_lock(struct vnic_rq *rq) -{ - atomic_set(&rq->bpoll_state, ENIC_POLL_STATE_IDLE); -} - -static inline bool enic_poll_lock_napi(struct vnic_rq *rq) -{ - int rc = atomic_cmpxchg(&rq->bpoll_state, ENIC_POLL_STATE_IDLE, - ENIC_POLL_STATE_NAPI); - - return (rc == ENIC_POLL_STATE_IDLE); -} - -static inline void enic_poll_unlock_napi(struct vnic_rq *rq, - struct napi_struct *napi) -{ - WARN_ON(atomic_read(&rq->bpoll_state) != ENIC_POLL_STATE_NAPI); - napi_gro_flush(napi, false); - atomic_set(&rq->bpoll_state, ENIC_POLL_STATE_IDLE); -} - -static inline bool enic_poll_lock_poll(struct vnic_rq *rq) -{ - int rc = atomic_cmpxchg(&rq->bpoll_state, ENIC_POLL_STATE_IDLE, - ENIC_POLL_STATE_POLL); - - return (rc == ENIC_POLL_STATE_IDLE); -} - - -static inline void enic_poll_unlock_poll(struct vnic_rq *rq) -{ - WARN_ON(atomic_read(&rq->bpoll_state) != ENIC_POLL_STATE_POLL); - atomic_set(&rq->bpoll_state, ENIC_POLL_STATE_IDLE); -} - -static inline bool enic_poll_busy_polling(struct vnic_rq *rq) -{ - return atomic_read(&rq->bpoll_state) & ENIC_POLL_STATE_POLL; -} - -#else - -static inline void enic_busy_poll_init_lock(struct vnic_rq *rq) -{ -} - -static inline bool enic_poll_lock_napi(struct vnic_rq *rq) -{ - return true; -} - -static inline bool enic_poll_unlock_napi(struct vnic_rq *rq, - struct napi_struct *napi) -{ - return false; -} - -static inline bool enic_poll_lock_poll(struct vnic_rq *rq) -{ - return false; -} - -static inline bool enic_poll_unlock_poll(struct vnic_rq *rq) -{ - return false; -} - -static inline bool enic_poll_ll_polling(struct vnic_rq *rq) -{ - return false; -} -#endif /* CONFIG_NET_RX_BUSY_POLL */ - void vnic_rq_free(struct vnic_rq *rq); int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index, unsigned int desc_count, unsigned int desc_size); diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c index 57c17e797ae3..127ce9707378 100644 --- a/drivers/net/ethernet/dec/tulip/de2104x.c +++ b/drivers/net/ethernet/dec/tulip/de2104x.c @@ -1485,95 +1485,104 @@ static void __de_get_regs(struct de_private *de, u8 *buf) de_rx_missed(de, rbuf[8]); } -static int __de_get_settings(struct de_private *de, struct ethtool_cmd *ecmd) +static int __de_get_link_ksettings(struct de_private *de, + struct ethtool_link_ksettings *cmd) { - ecmd->supported = de->media_supported; - ecmd->transceiver = XCVR_INTERNAL; - ecmd->phy_address = 0; - ecmd->advertising = de->media_advertise; + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + de->media_supported); + cmd->base.phy_address = 0; + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + de->media_advertise); switch (de->media_type) { case DE_MEDIA_AUI: - ecmd->port = PORT_AUI; + cmd->base.port = PORT_AUI; break; case DE_MEDIA_BNC: - ecmd->port = PORT_BNC; + cmd->base.port = PORT_BNC; break; default: - ecmd->port = PORT_TP; + cmd->base.port = PORT_TP; break; } - ethtool_cmd_speed_set(ecmd, 10); + cmd->base.speed = 10; if (dr32(MacMode) & FullDuplex) - ecmd->duplex = DUPLEX_FULL; + cmd->base.duplex = DUPLEX_FULL; else - ecmd->duplex = DUPLEX_HALF; + cmd->base.duplex = DUPLEX_HALF; if (de->media_lock) - ecmd->autoneg = AUTONEG_DISABLE; + cmd->base.autoneg = AUTONEG_DISABLE; else - ecmd->autoneg = AUTONEG_ENABLE; + cmd->base.autoneg = AUTONEG_ENABLE; /* ignore maxtxpkt, maxrxpkt for now */ return 0; } -static int __de_set_settings(struct de_private *de, struct ethtool_cmd *ecmd) +static int __de_set_link_ksettings(struct de_private *de, + const struct ethtool_link_ksettings *cmd) { u32 new_media; unsigned int media_lock; + u8 duplex = cmd->base.duplex; + u8 port = cmd->base.port; + u8 autoneg = cmd->base.autoneg; + u32 advertising; - if (ethtool_cmd_speed(ecmd) != 10) - return -EINVAL; - if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL) + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); + + if (cmd->base.speed != 10) return -EINVAL; - if (ecmd->port != PORT_TP && ecmd->port != PORT_AUI && ecmd->port != PORT_BNC) + if (duplex != DUPLEX_HALF && duplex != DUPLEX_FULL) return -EINVAL; - if (de->de21040 && ecmd->port == PORT_BNC) + if (port != PORT_TP && port != PORT_AUI && port != PORT_BNC) return -EINVAL; - if (ecmd->transceiver != XCVR_INTERNAL) + if (de->de21040 && port == PORT_BNC) return -EINVAL; - if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE) + if (autoneg != AUTONEG_DISABLE && autoneg != AUTONEG_ENABLE) return -EINVAL; - if (ecmd->advertising & ~de->media_supported) + if (advertising & ~de->media_supported) return -EINVAL; - if (ecmd->autoneg == AUTONEG_ENABLE && - (!(ecmd->advertising & ADVERTISED_Autoneg))) + if (autoneg == AUTONEG_ENABLE && + (!(advertising & ADVERTISED_Autoneg))) return -EINVAL; - switch (ecmd->port) { + switch (port) { case PORT_AUI: new_media = DE_MEDIA_AUI; - if (!(ecmd->advertising & ADVERTISED_AUI)) + if (!(advertising & ADVERTISED_AUI)) return -EINVAL; break; case PORT_BNC: new_media = DE_MEDIA_BNC; - if (!(ecmd->advertising & ADVERTISED_BNC)) + if (!(advertising & ADVERTISED_BNC)) return -EINVAL; break; default: - if (ecmd->autoneg == AUTONEG_ENABLE) + if (autoneg == AUTONEG_ENABLE) new_media = DE_MEDIA_TP_AUTO; - else if (ecmd->duplex == DUPLEX_FULL) + else if (duplex == DUPLEX_FULL) new_media = DE_MEDIA_TP_FD; else new_media = DE_MEDIA_TP; - if (!(ecmd->advertising & ADVERTISED_TP)) + if (!(advertising & ADVERTISED_TP)) return -EINVAL; - if (!(ecmd->advertising & (ADVERTISED_10baseT_Full | ADVERTISED_10baseT_Half))) + if (!(advertising & (ADVERTISED_10baseT_Full | + ADVERTISED_10baseT_Half))) return -EINVAL; break; } - media_lock = (ecmd->autoneg == AUTONEG_ENABLE) ? 0 : 1; + media_lock = (autoneg == AUTONEG_ENABLE) ? 0 : 1; if ((new_media == de->media_type) && (media_lock == de->media_lock) && - (ecmd->advertising == de->media_advertise)) + (advertising == de->media_advertise)) return 0; /* nothing to change */ de_link_down(de); @@ -1582,7 +1591,7 @@ static int __de_set_settings(struct de_private *de, struct ethtool_cmd *ecmd) de->media_type = new_media; de->media_lock = media_lock; - de->media_advertise = ecmd->advertising; + de->media_advertise = advertising; de_set_media(de); if (netif_running(de->dev)) de_start_rxtx(de); @@ -1604,25 +1613,27 @@ static int de_get_regs_len(struct net_device *dev) return DE_REGS_SIZE; } -static int de_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +static int de_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct de_private *de = netdev_priv(dev); int rc; spin_lock_irq(&de->lock); - rc = __de_get_settings(de, ecmd); + rc = __de_get_link_ksettings(de, cmd); spin_unlock_irq(&de->lock); return rc; } -static int de_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +static int de_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct de_private *de = netdev_priv(dev); int rc; spin_lock_irq(&de->lock); - rc = __de_set_settings(de, ecmd); + rc = __de_set_link_ksettings(de, cmd); spin_unlock_irq(&de->lock); return rc; @@ -1690,13 +1701,13 @@ static const struct ethtool_ops de_ethtool_ops = { .get_link = ethtool_op_get_link, .get_drvinfo = de_get_drvinfo, .get_regs_len = de_get_regs_len, - .get_settings = de_get_settings, - .set_settings = de_set_settings, .get_msglevel = de_get_msglevel, .set_msglevel = de_set_msglevel, .get_eeprom = de_get_eeprom, .nway_reset = de_nway_reset, .get_regs = de_get_regs, + .get_link_ksettings = de_get_link_ksettings, + .set_link_ksettings = de_set_link_ksettings, }; static void de21040_get_mac_address(struct de_private *de) diff --git a/drivers/net/ethernet/dec/tulip/interrupt.c b/drivers/net/ethernet/dec/tulip/interrupt.c index 92306b320840..ba6ae24acf62 100644 --- a/drivers/net/ethernet/dec/tulip/interrupt.c +++ b/drivers/net/ethernet/dec/tulip/interrupt.c @@ -319,8 +319,8 @@ int tulip_poll(struct napi_struct *napi, int budget) /* Remove us from polling list and enable RX intr. */ - napi_complete(napi); - iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7); + napi_complete_done(napi, work_done); + iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7); /* The last op happens after poll completion. Which means the following: * 1. it can race with disabling irqs in irq handler @@ -355,7 +355,7 @@ int tulip_poll(struct napi_struct *napi, int budget) * before we did napi_complete(). See? We would lose it. */ /* remove ourselves from the polling list */ - napi_complete(napi); + napi_complete_done(napi, work_done); return work_done; } diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c index f82ebe5d89ee..8d98b259d1ba 100644 --- a/drivers/net/ethernet/dec/tulip/uli526x.c +++ b/drivers/net/ethernet/dec/tulip/uli526x.c @@ -926,48 +926,53 @@ static void uli526x_set_filter_mode(struct net_device * dev) } static void -ULi_ethtool_gset(struct uli526x_board_info *db, struct ethtool_cmd *ecmd) +ULi_ethtool_get_link_ksettings(struct uli526x_board_info *db, + struct ethtool_link_ksettings *cmd) { - ecmd->supported = (SUPPORTED_10baseT_Half | + u32 supported, advertising; + + supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_Autoneg | SUPPORTED_MII); - ecmd->advertising = (ADVERTISED_10baseT_Half | + advertising = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | ADVERTISED_Autoneg | ADVERTISED_MII); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); - ecmd->port = PORT_MII; - ecmd->phy_address = db->phy_addr; - - ecmd->transceiver = XCVR_EXTERNAL; + cmd->base.port = PORT_MII; + cmd->base.phy_address = db->phy_addr; - ethtool_cmd_speed_set(ecmd, SPEED_10); - ecmd->duplex = DUPLEX_HALF; + cmd->base.speed = SPEED_10; + cmd->base.duplex = DUPLEX_HALF; if(db->op_mode==ULI526X_100MHF || db->op_mode==ULI526X_100MFD) { - ethtool_cmd_speed_set(ecmd, SPEED_100); + cmd->base.speed = SPEED_100; } if(db->op_mode==ULI526X_10MFD || db->op_mode==ULI526X_100MFD) { - ecmd->duplex = DUPLEX_FULL; + cmd->base.duplex = DUPLEX_FULL; } if(db->link_failed) { - ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); - ecmd->duplex = DUPLEX_UNKNOWN; + cmd->base.speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; } if (db->media_mode & ULI526X_AUTO) { - ecmd->autoneg = AUTONEG_ENABLE; + cmd->base.autoneg = AUTONEG_ENABLE; } } @@ -981,10 +986,12 @@ static void netdev_get_drvinfo(struct net_device *dev, strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info)); } -static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { +static int netdev_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) +{ struct uli526x_board_info *np = netdev_priv(dev); - ULi_ethtool_gset(np, cmd); + ULi_ethtool_get_link_ksettings(np, cmd); return 0; } @@ -1006,9 +1013,9 @@ static void uli526x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) static const struct ethtool_ops netdev_ethtool_ops = { .get_drvinfo = netdev_get_drvinfo, - .get_settings = netdev_get_settings, .get_link = netdev_get_link, .get_wol = uli526x_get_wol, + .get_link_ksettings = netdev_get_link_ksettings, }; /* diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c index bc9bf88e5831..d1f2f3cc7cfa 100644 --- a/drivers/net/ethernet/dec/tulip/winbond-840.c +++ b/drivers/net/ethernet/dec/tulip/winbond-840.c @@ -1391,25 +1391,27 @@ static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo * strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info)); } -static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int netdev_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct netdev_private *np = netdev_priv(dev); int rc; spin_lock_irq(&np->lock); - rc = mii_ethtool_gset(&np->mii_if, cmd); + rc = mii_ethtool_get_link_ksettings(&np->mii_if, cmd); spin_unlock_irq(&np->lock); return rc; } -static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int netdev_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct netdev_private *np = netdev_priv(dev); int rc; spin_lock_irq(&np->lock); - rc = mii_ethtool_sset(&np->mii_if, cmd); + rc = mii_ethtool_set_link_ksettings(&np->mii_if, cmd); spin_unlock_irq(&np->lock); return rc; @@ -1439,12 +1441,12 @@ static void netdev_set_msglevel(struct net_device *dev, u32 value) static const struct ethtool_ops netdev_ethtool_ops = { .get_drvinfo = netdev_get_drvinfo, - .get_settings = netdev_get_settings, - .set_settings = netdev_set_settings, .nway_reset = netdev_nway_reset, .get_link = netdev_get_link, .get_msglevel = netdev_get_msglevel, .set_msglevel = netdev_set_msglevel, + .get_link_ksettings = netdev_get_link_ksettings, + .set_link_ksettings = netdev_set_link_ksettings, }; static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c index 8c95a8a81e3c..1e350135f11d 100644 --- a/drivers/net/ethernet/dlink/dl2k.c +++ b/drivers/net/ethernet/dlink/dl2k.c @@ -1256,52 +1256,63 @@ static void rio_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info)); } -static int rio_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int rio_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct netdev_private *np = netdev_priv(dev); + u32 supported, advertising; + if (np->phy_media) { /* fiber device */ - cmd->supported = SUPPORTED_Autoneg | SUPPORTED_FIBRE; - cmd->advertising= ADVERTISED_Autoneg | ADVERTISED_FIBRE; - cmd->port = PORT_FIBRE; - cmd->transceiver = XCVR_INTERNAL; + supported = SUPPORTED_Autoneg | SUPPORTED_FIBRE; + advertising = ADVERTISED_Autoneg | ADVERTISED_FIBRE; + cmd->base.port = PORT_FIBRE; } else { /* copper device */ - cmd->supported = SUPPORTED_10baseT_Half | + supported = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_MII; - cmd->advertising = ADVERTISED_10baseT_Half | + advertising = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half | - ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Full| + ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg | ADVERTISED_MII; - cmd->port = PORT_MII; - cmd->transceiver = XCVR_INTERNAL; + cmd->base.port = PORT_MII; } - if ( np->link_status ) { - ethtool_cmd_speed_set(cmd, np->speed); - cmd->duplex = np->full_duplex ? DUPLEX_FULL : DUPLEX_HALF; + if (np->link_status) { + cmd->base.speed = np->speed; + cmd->base.duplex = np->full_duplex ? DUPLEX_FULL : DUPLEX_HALF; } else { - ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); - cmd->duplex = DUPLEX_UNKNOWN; + cmd->base.speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; } - if ( np->an_enable) - cmd->autoneg = AUTONEG_ENABLE; + if (np->an_enable) + cmd->base.autoneg = AUTONEG_ENABLE; else - cmd->autoneg = AUTONEG_DISABLE; + cmd->base.autoneg = AUTONEG_DISABLE; + + cmd->base.phy_address = np->phy_addr; + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); - cmd->phy_address = np->phy_addr; return 0; } -static int rio_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int rio_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct netdev_private *np = netdev_priv(dev); + u32 speed = cmd->base.speed; + u8 duplex = cmd->base.duplex; + netif_carrier_off(dev); - if (cmd->autoneg == AUTONEG_ENABLE) { - if (np->an_enable) + if (cmd->base.autoneg == AUTONEG_ENABLE) { + if (np->an_enable) { return 0; - else { + } else { np->an_enable = 1; mii_set_media(dev); return 0; @@ -1309,18 +1320,18 @@ static int rio_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) } else { np->an_enable = 0; if (np->speed == 1000) { - ethtool_cmd_speed_set(cmd, SPEED_100); - cmd->duplex = DUPLEX_FULL; + speed = SPEED_100; + duplex = DUPLEX_FULL; printk("Warning!! Can't disable Auto negotiation in 1000Mbps, change to Manual 100Mbps, Full duplex.\n"); } - switch (ethtool_cmd_speed(cmd)) { + switch (speed) { case SPEED_10: np->speed = 10; - np->full_duplex = (cmd->duplex == DUPLEX_FULL); + np->full_duplex = (duplex == DUPLEX_FULL); break; case SPEED_100: np->speed = 100; - np->full_duplex = (cmd->duplex == DUPLEX_FULL); + np->full_duplex = (duplex == DUPLEX_FULL); break; case SPEED_1000: /* not supported */ default: @@ -1339,9 +1350,9 @@ static u32 rio_get_link(struct net_device *dev) static const struct ethtool_ops ethtool_ops = { .get_drvinfo = rio_get_drvinfo, - .get_settings = rio_get_settings, - .set_settings = rio_set_settings, .get_link = rio_get_link, + .get_link_ksettings = rio_get_link_ksettings, + .set_link_ksettings = rio_set_link_ksettings, }; static int diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c index 2e5b66762e15..2704bcf023be 100644 --- a/drivers/net/ethernet/dlink/sundance.c +++ b/drivers/net/ethernet/dlink/sundance.c @@ -1664,21 +1664,23 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info)); } -static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +static int get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct netdev_private *np = netdev_priv(dev); spin_lock_irq(&np->lock); - mii_ethtool_gset(&np->mii_if, ecmd); + mii_ethtool_get_link_ksettings(&np->mii_if, cmd); spin_unlock_irq(&np->lock); return 0; } -static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +static int set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct netdev_private *np = netdev_priv(dev); int res; spin_lock_irq(&np->lock); - res = mii_ethtool_sset(&np->mii_if, ecmd); + res = mii_ethtool_set_link_ksettings(&np->mii_if, cmd); spin_unlock_irq(&np->lock); return res; } @@ -1800,8 +1802,6 @@ static int sundance_set_wol(struct net_device *dev, static const struct ethtool_ops ethtool_ops = { .begin = check_if_running, .get_drvinfo = get_drvinfo, - .get_settings = get_settings, - .set_settings = set_settings, .nway_reset = nway_reset, .get_link = get_link, .get_wol = sundance_get_wol, @@ -1811,6 +1811,8 @@ static const struct ethtool_ops ethtool_ops = { .get_strings = get_strings, .get_sset_count = get_sset_count, .get_ethtool_stats = get_ethtool_stats, + .get_link_ksettings = get_link_ksettings, + .set_link_ksettings = set_link_ksettings, }; static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c index 2a17c59f69f9..3e77dd863175 100644 --- a/drivers/net/ethernet/dnet.c +++ b/drivers/net/ethernet/dnet.c @@ -415,7 +415,7 @@ static int dnet_poll(struct napi_struct *napi, int budget) /* We processed all packets available. Tell NAPI it can * stop polling then re-enable rx interrupts. */ - napi_complete(napi); + napi_complete_done(napi, npackets); int_enable = dnet_readl(bp, INTR_ENB); int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF; dnet_writel(bp, int_enable, INTR_ENB); diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c index 7bf78a0d322c..278f139f2a22 100644 --- a/drivers/net/ethernet/ec_bhf.c +++ b/drivers/net/ethernet/ec_bhf.c @@ -457,7 +457,7 @@ static int ec_bhf_stop(struct net_device *net_dev) return 0; } -static struct rtnl_link_stats64 * +static void ec_bhf_get_stats(struct net_device *net_dev, struct rtnl_link_stats64 *stats) { @@ -472,8 +472,6 @@ ec_bhf_get_stats(struct net_device *net_dev, stats->tx_bytes = priv->stat_tx_bytes; stats->rx_bytes = priv->stat_rx_bytes; - - return stats; } static const struct net_device_ops ec_bhf_netdev_ops = { diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 4c30c44b242e..d49528ad7821 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -226,11 +226,6 @@ struct be_aic_obj { /* Adaptive interrupt coalescing (AIC) info */ u64 tx_reqs_prev; /* Used to calculate TX pps */ }; -enum { - NAPI_POLLING, - BUSY_POLLING -}; - struct be_mcc_obj { struct be_queue_info q; struct be_queue_info cq; diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c index 0a48a31225e6..7d1819c9e8cc 100644 --- a/drivers/net/ethernet/emulex/benet/be_ethtool.c +++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c @@ -606,7 +606,8 @@ bool be_pause_supported(struct be_adapter *adapter) false : true; } -static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) +static int be_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) { struct be_adapter *adapter = netdev_priv(netdev); u8 link_status; @@ -614,13 +615,14 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) int status; u32 auto_speeds; u32 fixed_speeds; + u32 supported = 0, advertising = 0; if (adapter->phy.link_speed < 0) { status = be_cmd_link_status_query(adapter, &link_speed, &link_status, 0); if (!status) be_link_status_update(adapter, link_status); - ethtool_cmd_speed_set(ecmd, link_speed); + cmd->base.speed = link_speed; status = be_cmd_get_phy_info(adapter); if (!status) { @@ -629,58 +631,51 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) be_cmd_query_cable_type(adapter); - ecmd->supported = + supported = convert_to_et_setting(adapter, auto_speeds | fixed_speeds); - ecmd->advertising = + advertising = convert_to_et_setting(adapter, auto_speeds); - ecmd->port = be_get_port_type(adapter); + cmd->base.port = be_get_port_type(adapter); if (adapter->phy.auto_speeds_supported) { - ecmd->supported |= SUPPORTED_Autoneg; - ecmd->autoneg = AUTONEG_ENABLE; - ecmd->advertising |= ADVERTISED_Autoneg; + supported |= SUPPORTED_Autoneg; + cmd->base.autoneg = AUTONEG_ENABLE; + advertising |= ADVERTISED_Autoneg; } - ecmd->supported |= SUPPORTED_Pause; + supported |= SUPPORTED_Pause; if (be_pause_supported(adapter)) - ecmd->advertising |= ADVERTISED_Pause; - - switch (adapter->phy.interface_type) { - case PHY_TYPE_KR_10GB: - case PHY_TYPE_KX4_10GB: - ecmd->transceiver = XCVR_INTERNAL; - break; - default: - ecmd->transceiver = XCVR_EXTERNAL; - break; - } + advertising |= ADVERTISED_Pause; } else { - ecmd->port = PORT_OTHER; - ecmd->autoneg = AUTONEG_DISABLE; - ecmd->transceiver = XCVR_DUMMY1; + cmd->base.port = PORT_OTHER; + cmd->base.autoneg = AUTONEG_DISABLE; } /* Save for future use */ - adapter->phy.link_speed = ethtool_cmd_speed(ecmd); - adapter->phy.port_type = ecmd->port; - adapter->phy.transceiver = ecmd->transceiver; - adapter->phy.autoneg = ecmd->autoneg; - adapter->phy.advertising = ecmd->advertising; - adapter->phy.supported = ecmd->supported; + adapter->phy.link_speed = cmd->base.speed; + adapter->phy.port_type = cmd->base.port; + adapter->phy.autoneg = cmd->base.autoneg; + adapter->phy.advertising = advertising; + adapter->phy.supported = supported; } else { - ethtool_cmd_speed_set(ecmd, adapter->phy.link_speed); - ecmd->port = adapter->phy.port_type; - ecmd->transceiver = adapter->phy.transceiver; - ecmd->autoneg = adapter->phy.autoneg; - ecmd->advertising = adapter->phy.advertising; - ecmd->supported = adapter->phy.supported; + cmd->base.speed = adapter->phy.link_speed; + cmd->base.port = adapter->phy.port_type; + cmd->base.autoneg = adapter->phy.autoneg; + advertising = adapter->phy.advertising; + supported = adapter->phy.supported; } - ecmd->duplex = netif_carrier_ok(netdev) ? DUPLEX_FULL : DUPLEX_UNKNOWN; - ecmd->phy_address = adapter->port_num; + cmd->base.duplex = netif_carrier_ok(netdev) ? + DUPLEX_FULL : DUPLEX_UNKNOWN; + cmd->base.phy_address = adapter->port_num; + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); return 0; } @@ -1399,7 +1394,6 @@ static int be_set_priv_flags(struct net_device *netdev, u32 flags) } const struct ethtool_ops be_ethtool_ops = { - .get_settings = be_get_settings, .get_drvinfo = be_get_drvinfo, .get_wol = be_get_wol, .set_wol = be_set_wol, @@ -1433,5 +1427,6 @@ const struct ethtool_ops be_ethtool_ops = { .get_channels = be_get_channels, .set_channels = be_set_channels, .get_module_info = be_get_module_info, - .get_module_eeprom = be_get_module_eeprom + .get_module_eeprom = be_get_module_eeprom, + .get_link_ksettings = be_get_link_ksettings, }; diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index cd49a54c538d..6be3b9aba8ed 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -647,8 +647,8 @@ void be_parse_stats(struct be_adapter *adapter) } } -static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev, - struct rtnl_link_stats64 *stats) +static void be_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) { struct be_adapter *adapter = netdev_priv(netdev); struct be_drv_stats *drvs = &adapter->drv_stats; @@ -712,7 +712,6 @@ static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev, stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop + drvs->rx_input_fifo_overflow_drop + drvs->rx_drops_no_pbuf; - return stats; } void be_link_status_update(struct be_adapter *adapter, u8 link_status) @@ -3064,7 +3063,7 @@ static inline bool do_gro(struct be_rx_compl_info *rxcp) } static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi, - int budget, int polling) + int budget) { struct be_adapter *adapter = rxo->adapter; struct be_queue_info *rx_cq = &rxo->cq; @@ -3096,8 +3095,7 @@ static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi, goto loop_continue; } - /* Don't do gro when we're busy_polling */ - if (do_gro(rxcp) && polling != BUSY_POLLING) + if (do_gro(rxcp)) be_rx_compl_process_gro(rxo, napi, rxcp); else be_rx_compl_process(rxo, napi, rxcp); @@ -3195,106 +3193,6 @@ static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo, } } -#ifdef CONFIG_NET_RX_BUSY_POLL -static inline bool be_lock_napi(struct be_eq_obj *eqo) -{ - bool status = true; - - spin_lock(&eqo->lock); /* BH is already disabled */ - if (eqo->state & BE_EQ_LOCKED) { - WARN_ON(eqo->state & BE_EQ_NAPI); - eqo->state |= BE_EQ_NAPI_YIELD; - status = false; - } else { - eqo->state = BE_EQ_NAPI; - } - spin_unlock(&eqo->lock); - return status; -} - -static inline void be_unlock_napi(struct be_eq_obj *eqo) -{ - spin_lock(&eqo->lock); /* BH is already disabled */ - - WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD)); - eqo->state = BE_EQ_IDLE; - - spin_unlock(&eqo->lock); -} - -static inline bool be_lock_busy_poll(struct be_eq_obj *eqo) -{ - bool status = true; - - spin_lock_bh(&eqo->lock); - if (eqo->state & BE_EQ_LOCKED) { - eqo->state |= BE_EQ_POLL_YIELD; - status = false; - } else { - eqo->state |= BE_EQ_POLL; - } - spin_unlock_bh(&eqo->lock); - return status; -} - -static inline void be_unlock_busy_poll(struct be_eq_obj *eqo) -{ - spin_lock_bh(&eqo->lock); - - WARN_ON(eqo->state & (BE_EQ_NAPI)); - eqo->state = BE_EQ_IDLE; - - spin_unlock_bh(&eqo->lock); -} - -static inline void be_enable_busy_poll(struct be_eq_obj *eqo) -{ - spin_lock_init(&eqo->lock); - eqo->state = BE_EQ_IDLE; -} - -static inline void be_disable_busy_poll(struct be_eq_obj *eqo) -{ - local_bh_disable(); - - /* It's enough to just acquire napi lock on the eqo to stop - * be_busy_poll() from processing any queueus. - */ - while (!be_lock_napi(eqo)) - mdelay(1); - - local_bh_enable(); -} - -#else /* CONFIG_NET_RX_BUSY_POLL */ - -static inline bool be_lock_napi(struct be_eq_obj *eqo) -{ - return true; -} - -static inline void be_unlock_napi(struct be_eq_obj *eqo) -{ -} - -static inline bool be_lock_busy_poll(struct be_eq_obj *eqo) -{ - return false; -} - -static inline void be_unlock_busy_poll(struct be_eq_obj *eqo) -{ -} - -static inline void be_enable_busy_poll(struct be_eq_obj *eqo) -{ -} - -static inline void be_disable_busy_poll(struct be_eq_obj *eqo) -{ -} -#endif /* CONFIG_NET_RX_BUSY_POLL */ - int be_poll(struct napi_struct *napi, int budget) { struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi); @@ -3309,25 +3207,20 @@ int be_poll(struct napi_struct *napi, int budget) for_all_tx_queues_on_eq(adapter, eqo, txo, i) be_process_tx(adapter, txo, i); - if (be_lock_napi(eqo)) { - /* This loop will iterate twice for EQ0 in which - * completions of the last RXQ (default one) are also processed - * For other EQs the loop iterates only once - */ - for_all_rx_queues_on_eq(adapter, eqo, rxo, i) { - work = be_process_rx(rxo, napi, budget, NAPI_POLLING); - max_work = max(work, max_work); - } - be_unlock_napi(eqo); - } else { - max_work = budget; + /* This loop will iterate twice for EQ0 in which + * completions of the last RXQ (default one) are also processed + * For other EQs the loop iterates only once + */ + for_all_rx_queues_on_eq(adapter, eqo, rxo, i) { + work = be_process_rx(rxo, napi, budget); + max_work = max(work, max_work); } if (is_mcc_eqo(eqo)) be_process_mcc(adapter); if (max_work < budget) { - napi_complete(napi); + napi_complete_done(napi, max_work); /* Skyhawk EQ_DB has a provision to set the rearm to interrupt * delay via a delay multiplier encoding value @@ -3344,28 +3237,6 @@ int be_poll(struct napi_struct *napi, int budget) return max_work; } -#ifdef CONFIG_NET_RX_BUSY_POLL -static int be_busy_poll(struct napi_struct *napi) -{ - struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi); - struct be_adapter *adapter = eqo->adapter; - struct be_rx_obj *rxo; - int i, work = 0; - - if (!be_lock_busy_poll(eqo)) - return LL_FLUSH_BUSY; - - for_all_rx_queues_on_eq(adapter, eqo, rxo, i) { - work = be_process_rx(rxo, napi, 4, BUSY_POLLING); - if (work) - break; - } - - be_unlock_busy_poll(eqo); - return work; -} -#endif - void be_detect_error(struct be_adapter *adapter) { u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0; @@ -3670,7 +3541,6 @@ static int be_close(struct net_device *netdev) if (adapter->flags & BE_FLAGS_NAPI_ENABLED) { for_all_evt_queues(adapter, eqo, i) { napi_disable(&eqo->napi); - be_disable_busy_poll(eqo); } adapter->flags &= ~BE_FLAGS_NAPI_ENABLED; } @@ -3840,7 +3710,6 @@ static int be_open(struct net_device *netdev) for_all_evt_queues(adapter, eqo, i) { napi_enable(&eqo->napi); - be_enable_busy_poll(eqo); be_eq_notify(adapter, eqo->q.id, true, true, 0, 0); } adapter->flags |= BE_FLAGS_NAPI_ENABLED; @@ -5246,9 +5115,6 @@ static const struct net_device_ops be_netdev_ops = { #endif .ndo_bridge_setlink = be_ndo_bridge_setlink, .ndo_bridge_getlink = be_ndo_bridge_getlink, -#ifdef CONFIG_NET_RX_BUSY_POLL - .ndo_busy_poll = be_busy_poll, -#endif .ndo_udp_tunnel_add = be_add_vxlan_port, .ndo_udp_tunnel_del = be_del_vxlan_port, .ndo_features_check = be_features_check, diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c index 45abc81f6f55..23d82748f52b 100644 --- a/drivers/net/ethernet/ethoc.c +++ b/drivers/net/ethernet/ethoc.c @@ -180,8 +180,6 @@ MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size"); * struct ethoc - driver-private device structure * @iobase: pointer to I/O memory region * @membase: pointer to buffer memory region - * @dma_alloc: dma allocated buffer size - * @io_region_size: I/O memory region size * @num_bd: number of buffer descriptors * @num_tx: number of send buffers * @cur_tx: last send buffer written @@ -199,8 +197,6 @@ MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size"); struct ethoc { void __iomem *iobase; void __iomem *membase; - int dma_alloc; - resource_size_t io_region_size; bool big_endian; unsigned int num_bd; @@ -618,7 +614,7 @@ static int ethoc_poll(struct napi_struct *napi, int budget) tx_work_done = ethoc_tx(priv->netdev, budget); if (rx_work_done < budget && tx_work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, rx_work_done); ethoc_enable_irq(priv, INT_MASK_TX | INT_MASK_RX); } @@ -999,7 +995,7 @@ static int ethoc_set_ringparam(struct net_device *dev, return 0; } -const struct ethtool_ops ethoc_ethtool_ops = { +static const struct ethtool_ops ethoc_ethtool_ops = { .get_regs_len = ethoc_get_regs_len, .get_regs = ethoc_get_regs, .nway_reset = phy_ethtool_nway_reset, @@ -1035,7 +1031,6 @@ static int ethoc_probe(struct platform_device *pdev) struct ethoc *priv = NULL; int num_bd; int ret = 0; - bool random_mac = false; struct ethoc_platform_data *pdata = dev_get_platdata(&pdev->dev); u32 eth_clkfreq = pdata ? pdata->eth_clkfreq : 0; @@ -1096,8 +1091,6 @@ static int ethoc_probe(struct platform_device *pdev) /* setup driver-private data */ priv = netdev_priv(netdev); priv->netdev = netdev; - priv->dma_alloc = 0; - priv->io_region_size = resource_size(mmio); priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr, resource_size(mmio)); @@ -1127,7 +1120,6 @@ static int ethoc_probe(struct platform_device *pdev) goto free; } netdev->mem_end = netdev->mem_start + buffer_size; - priv->dma_alloc = buffer_size; } priv->big_endian = pdata ? pdata->big_endian : @@ -1176,16 +1168,11 @@ static int ethoc_probe(struct platform_device *pdev) /* Check the MAC again for validity, if it still isn't choose and * program a random one. */ - if (!is_valid_ether_addr(netdev->dev_addr)) { - eth_random_addr(netdev->dev_addr); - random_mac = true; - } + if (!is_valid_ether_addr(netdev->dev_addr)) + eth_hw_addr_random(netdev); ethoc_do_set_mac_address(netdev); - if (random_mac) - netdev->addr_assign_type = NET_ADDR_RANDOM; - /* Allow the platform setup code to adjust MII management bus clock. */ if (!eth_clkfreq) { struct clk *clk = devm_clk_get(&pdev->dev, NULL); diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c index 223f35cc034c..992ebe973d25 100644 --- a/drivers/net/ethernet/ezchip/nps_enet.c +++ b/drivers/net/ethernet/ezchip/nps_enet.c @@ -192,7 +192,7 @@ static int nps_enet_poll(struct napi_struct *napi, int budget) if (work_done < budget) { u32 buf_int_enable_value = 0; - napi_complete(napi); + napi_complete_done(napi, work_done); /* set tx_done and rx_rdy bits */ buf_int_enable_value |= NPS_ENET_ENABLE << RX_RDY_SHIFT; diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index 262587240c86..928b0df2b8e0 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -1456,7 +1456,7 @@ err_alloc_etherdev: return err; } -static int __exit ftgmac100_remove(struct platform_device *pdev) +static int ftgmac100_remove(struct platform_device *pdev) { struct net_device *netdev; struct ftgmac100 *priv; @@ -1483,7 +1483,7 @@ MODULE_DEVICE_TABLE(of, ftgmac100_of_match); static struct platform_driver ftgmac100_driver = { .probe = ftgmac100_probe, - .remove = __exit_p(ftgmac100_remove), + .remove = ftgmac100_remove, .driver = { .name = DRV_NAME, .of_match_table = ftgmac100_of_match, diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c index dce5f7b7f772..6ac336b546e6 100644 --- a/drivers/net/ethernet/faraday/ftmac100.c +++ b/drivers/net/ethernet/faraday/ftmac100.c @@ -825,16 +825,18 @@ static void ftmac100_get_drvinfo(struct net_device *netdev, strlcpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info)); } -static int ftmac100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd) +static int ftmac100_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) { struct ftmac100 *priv = netdev_priv(netdev); - return mii_ethtool_gset(&priv->mii, cmd); + return mii_ethtool_get_link_ksettings(&priv->mii, cmd); } -static int ftmac100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd) +static int ftmac100_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) { struct ftmac100 *priv = netdev_priv(netdev); - return mii_ethtool_sset(&priv->mii, cmd); + return mii_ethtool_set_link_ksettings(&priv->mii, cmd); } static int ftmac100_nway_reset(struct net_device *netdev) @@ -850,11 +852,11 @@ static u32 ftmac100_get_link(struct net_device *netdev) } static const struct ethtool_ops ftmac100_ethtool_ops = { - .set_settings = ftmac100_set_settings, - .get_settings = ftmac100_get_settings, .get_drvinfo = ftmac100_get_drvinfo, .nway_reset = ftmac100_nway_reset, .get_link = ftmac100_get_link, + .get_link_ksettings = ftmac100_get_link_ksettings, + .set_link_ksettings = ftmac100_set_link_ksettings, }; /****************************************************************************** @@ -1154,7 +1156,7 @@ err_alloc_etherdev: return err; } -static int __exit ftmac100_remove(struct platform_device *pdev) +static int ftmac100_remove(struct platform_device *pdev) { struct net_device *netdev; struct ftmac100 *priv; @@ -1174,7 +1176,7 @@ static int __exit ftmac100_remove(struct platform_device *pdev) static struct platform_driver ftmac100_driver = { .probe = ftmac100_probe, - .remove = __exit_p(ftmac100_remove), + .remove = ftmac100_remove, .driver = { .name = DRV_NAME, }, diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c index 9cb436cb3745..766636a7c25e 100644 --- a/drivers/net/ethernet/fealnx.c +++ b/drivers/net/ethernet/fealnx.c @@ -1817,25 +1817,27 @@ static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *i strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info)); } -static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int netdev_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct netdev_private *np = netdev_priv(dev); int rc; spin_lock_irq(&np->lock); - rc = mii_ethtool_gset(&np->mii, cmd); + rc = mii_ethtool_get_link_ksettings(&np->mii, cmd); spin_unlock_irq(&np->lock); return rc; } -static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int netdev_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct netdev_private *np = netdev_priv(dev); int rc; spin_lock_irq(&np->lock); - rc = mii_ethtool_sset(&np->mii, cmd); + rc = mii_ethtool_set_link_ksettings(&np->mii, cmd); spin_unlock_irq(&np->lock); return rc; @@ -1865,12 +1867,12 @@ static void netdev_set_msglevel(struct net_device *dev, u32 value) static const struct ethtool_ops netdev_ethtool_ops = { .get_drvinfo = netdev_get_drvinfo, - .get_settings = netdev_get_settings, - .set_settings = netdev_set_settings, .nway_reset = netdev_nway_reset, .get_link = netdev_get_link, .get_msglevel = netdev_get_msglevel, .set_msglevel = netdev_set_msglevel, + .get_link_ksettings = netdev_get_link_ksettings, + .set_link_ksettings = netdev_set_link_ksettings, }; static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index 726b5693ae8a..e2ca107f9d94 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -313,8 +313,8 @@ static void dpaa_tx_timeout(struct net_device *net_dev) /* Calculates the statistics for the given device by adding the statistics * collected by each CPU. */ -static struct rtnl_link_stats64 *dpaa_get_stats64(struct net_device *net_dev, - struct rtnl_link_stats64 *s) +static void dpaa_get_stats64(struct net_device *net_dev, + struct rtnl_link_stats64 *s) { int numstats = sizeof(struct rtnl_link_stats64) / sizeof(u64); struct dpaa_priv *priv = netdev_priv(net_dev); @@ -332,8 +332,6 @@ static struct rtnl_link_stats64 *dpaa_get_stats64(struct net_device *net_dev, for (j = 0; j < numstats; j++) netstats[j] += cpustats[j]; } - - return s; } static struct mac_device *dpaa_mac_dev_get(struct platform_device *pdev) @@ -2003,7 +2001,7 @@ static int dpaa_eth_poll(struct napi_struct *napi, int budget) int cleaned = qman_p_poll_dqrr(np->p, budget); if (cleaned < budget) { - napi_complete(napi); + napi_complete_done(napi, cleaned); qman_p_irqsource_add(np->p, QM_PIRQ_DQRI); } else if (np->down) { @@ -2335,6 +2333,13 @@ static int dpaa_eth_stop(struct net_device *net_dev) return err; } +static int dpaa_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd) +{ + if (!net_dev->phydev) + return -EINVAL; + return phy_mii_ioctl(net_dev->phydev, rq, cmd); +} + static const struct net_device_ops dpaa_ops = { .ndo_open = dpaa_open, .ndo_start_xmit = dpaa_start_xmit, @@ -2344,6 +2349,7 @@ static const struct net_device_ops dpaa_ops = { .ndo_set_mac_address = dpaa_set_mac_address, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = dpaa_set_rx_mode, + .ndo_do_ioctl = dpaa_ioctl, }; static int dpaa_napi_add(struct net_device *net_dev) diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c index 27e7044667d1..15571e251fb9 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c @@ -72,8 +72,8 @@ static char dpaa_stats_global[][ETH_GSTRING_LEN] = { #define DPAA_STATS_PERCPU_LEN ARRAY_SIZE(dpaa_stats_percpu) #define DPAA_STATS_GLOBAL_LEN ARRAY_SIZE(dpaa_stats_global) -static int dpaa_get_settings(struct net_device *net_dev, - struct ethtool_cmd *et_cmd) +static int dpaa_get_link_ksettings(struct net_device *net_dev, + struct ethtool_link_ksettings *cmd) { int err; @@ -82,13 +82,13 @@ static int dpaa_get_settings(struct net_device *net_dev, return 0; } - err = phy_ethtool_gset(net_dev->phydev, et_cmd); + err = phy_ethtool_ksettings_get(net_dev->phydev, cmd); return err; } -static int dpaa_set_settings(struct net_device *net_dev, - struct ethtool_cmd *et_cmd) +static int dpaa_set_link_ksettings(struct net_device *net_dev, + const struct ethtool_link_ksettings *cmd) { int err; @@ -97,9 +97,9 @@ static int dpaa_set_settings(struct net_device *net_dev, return -ENODEV; } - err = phy_ethtool_sset(net_dev->phydev, et_cmd); + err = phy_ethtool_ksettings_set(net_dev->phydev, cmd); if (err < 0) - netdev_err(net_dev, "phy_ethtool_sset() = %d\n", err); + netdev_err(net_dev, "phy_ethtool_ksettings_set() = %d\n", err); return err; } @@ -402,8 +402,6 @@ static void dpaa_get_strings(struct net_device *net_dev, u32 stringset, } const struct ethtool_ops dpaa_ethtool_ops = { - .get_settings = dpaa_get_settings, - .set_settings = dpaa_set_settings, .get_drvinfo = dpaa_get_drvinfo, .get_msglevel = dpaa_get_msglevel, .set_msglevel = dpaa_set_msglevel, @@ -414,4 +412,6 @@ const struct ethtool_ops dpaa_ethtool_ops = { .get_sset_count = dpaa_get_sset_count, .get_ethtool_stats = dpaa_get_ethtool_stats, .get_strings = dpaa_get_strings, + .get_link_ksettings = dpaa_get_link_ksettings, + .set_link_ksettings = dpaa_set_link_ksettings, }; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 8be7034b2e7b..91a16641e851 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -1615,7 +1615,7 @@ static int fec_enet_rx_napi(struct napi_struct *napi, int budget) fec_enet_tx(ndev); if (pkts < budget) { - napi_complete(napi); + napi_complete_done(napi, pkts); writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); } return pkts; diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c index c88918c4c5f3..84ea130eed36 100644 --- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c +++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c @@ -337,7 +337,7 @@ struct fman_mac { u8 mac_id; u32 exceptions; bool ptp_tsu_enabled; - bool en_tsu_err_exeption; + bool en_tsu_err_exception; struct dtsec_cfg *dtsec_drv_param; void *fm; struct fman_rev_info fm_rev_info; @@ -1247,12 +1247,12 @@ int dtsec_set_exception(struct fman_mac *dtsec, switch (exception) { case FM_MAC_EX_1G_1588_TS_RX_ERR: if (enable) { - dtsec->en_tsu_err_exeption = true; + dtsec->en_tsu_err_exception = true; iowrite32be(ioread32be(®s->tmr_pemask) | TMR_PEMASK_TSREEN, ®s->tmr_pemask); } else { - dtsec->en_tsu_err_exeption = false; + dtsec->en_tsu_err_exception = false; iowrite32be(ioread32be(®s->tmr_pemask) & ~TMR_PEMASK_TSREEN, ®s->tmr_pemask); @@ -1420,7 +1420,7 @@ struct fman_mac *dtsec_config(struct fman_mac_params *params) dtsec->event_cb = params->event_cb; dtsec->dev_id = params->dev_id; dtsec->ptp_tsu_enabled = dtsec->dtsec_drv_param->ptp_tsu_en; - dtsec->en_tsu_err_exeption = dtsec->dtsec_drv_param->ptp_exception_en; + dtsec->en_tsu_err_exception = dtsec->dtsec_drv_param->ptp_exception_en; dtsec->fm = params->fm; dtsec->basex_if = params->basex_if; diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c index 71a5ded9d1de..cd6a53eaf161 100644 --- a/drivers/net/ethernet/freescale/fman/fman_memac.c +++ b/drivers/net/ethernet/freescale/fman/fman_memac.c @@ -38,6 +38,7 @@ #include <linux/slab.h> #include <linux/io.h> #include <linux/phy.h> +#include <linux/phy_fixed.h> #include <linux/of_mdio.h> /* PCS registers */ diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c index 1f98838f32b7..753259091b22 100644 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c @@ -301,7 +301,7 @@ static int fs_enet_napi(struct napi_struct *napi, int budget) if (received < budget && tx_left) { /* done */ - napi_complete(napi); + napi_complete_done(napi, received); (*fep->ops->napi_enable)(dev); return received; @@ -964,11 +964,10 @@ static int fs_enet_probe(struct platform_device *ofdev) */ clk = devm_clk_get(&ofdev->dev, "per"); if (!IS_ERR(clk)) { - err = clk_prepare_enable(clk); - if (err) { - ret = err; + ret = clk_prepare_enable(clk); + if (ret) goto out_deregister_fixed_link; - } + fpi->clk_per = clk; } @@ -1045,10 +1044,10 @@ out_cleanup_data: out_free_dev: free_netdev(ndev); out_put: - of_node_put(fpi->phy_node); if (fpi->clk_per) clk_disable_unprepare(fpi->clk_per); out_deregister_fixed_link: + of_node_put(fpi->phy_node); if (of_phy_is_fixed_link(ofdev->dev.of_node)) of_phy_deregister_fixed_link(ofdev->dev.of_node); out_free_fpi: diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 957bfc220978..0ff166ec3e7e 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -3183,7 +3183,7 @@ static int gfar_poll_rx_sq(struct napi_struct *napi, int budget) if (work_done < budget) { u32 imask; - napi_complete(napi); + napi_complete_done(napi, work_done); /* Clear the halt bit in RSTAT */ gfar_write(®s->rstat, gfargrp->rstat); @@ -3272,7 +3272,7 @@ static int gfar_poll_rx(struct napi_struct *napi, int budget) if (!num_act_queues) { u32 imask; - napi_complete(napi); + napi_complete_done(napi, work_done); /* Clear the halt bit in RSTAT */ gfar_write(®s->rstat, gfargrp->rstat); diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index 9d660888510f..3f7ae9f64cd8 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -3303,7 +3303,7 @@ static int ucc_geth_poll(struct napi_struct *napi, int budget) howmany += ucc_geth_rx(ugeth, i, budget - howmany); if (howmany < budget) { - napi_complete(napi); + napi_complete_done(napi, howmany); setbits32(ugeth->uccf->p_uccm, UCCE_RX_EVENTS | UCCE_TX_EVENTS); } diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c index 97b184774784..0cec06bec63e 100644 --- a/drivers/net/ethernet/hisilicon/hip04_eth.c +++ b/drivers/net/ethernet/hisilicon/hip04_eth.c @@ -555,7 +555,7 @@ refill: priv->reg_inten |= RCV_INT; writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN); } - napi_complete(napi); + napi_complete_done(napi, rx); done: /* clean up tx descriptors and start a new timer if necessary */ tx_remaining = hip04_tx_reclaim(ndev, false); @@ -701,11 +701,6 @@ static void hip04_tx_timeout_task(struct work_struct *work) hip04_mac_open(priv->ndev); } -static struct net_device_stats *hip04_get_stats(struct net_device *ndev) -{ - return &ndev->stats; -} - static int hip04_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec) { @@ -764,7 +759,6 @@ static const struct ethtool_ops hip04_ethtool_ops = { static const struct net_device_ops hip04_netdev_ops = { .ndo_open = hip04_mac_open, .ndo_stop = hip04_mac_stop, - .ndo_get_stats = hip04_get_stats, .ndo_start_xmit = hip04_mac_start_xmit, .ndo_set_mac_address = hip04_set_mac_address, .ndo_tx_timeout = hip04_timeout, diff --git a/drivers/net/ethernet/hisilicon/hisi_femac.c b/drivers/net/ethernet/hisilicon/hisi_femac.c index 979852d56f31..2c2808830e95 100644 --- a/drivers/net/ethernet/hisilicon/hisi_femac.c +++ b/drivers/net/ethernet/hisilicon/hisi_femac.c @@ -330,7 +330,7 @@ static int hisi_femac_poll(struct napi_struct *napi, int budget) } while (ints & DEF_INT_MASK); if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); hisi_femac_irq_enable(priv, DEF_INT_MASK & (~IRQ_INT_TX_PER_PACKET)); } diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c index 418ca1f3774a..25a6c8722eca 100644 --- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c +++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c @@ -662,7 +662,7 @@ static int hix5hd2_poll(struct napi_struct *napi, int budget) } while (ints & DEF_INT_MASK); if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); hix5hd2_irq_enable(priv); } diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 8aed72860e7c..fca37e2c7f01 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -797,7 +797,6 @@ static void hns_nic_rx_up_pro(struct hns_nic_ring_data *ring_data, skb->protocol = eth_type_trans(skb, ndev); (void)napi_gro_receive(&ring_data->napi, skb); - ndev->last_rx = jiffies; } static int hns_desc_unused(struct hnae_ring *ring) @@ -1203,43 +1202,48 @@ static void hns_set_irq_affinity(struct hns_nic_priv *priv) struct hns_nic_ring_data *rd; int i; int cpu; - cpumask_t mask; + cpumask_var_t mask; + + if (!alloc_cpumask_var(&mask, GFP_KERNEL)) + return; /*diffrent irq banlance for 16core and 32core*/ if (h->q_num == num_possible_cpus()) { for (i = 0; i < h->q_num * 2; i++) { rd = &priv->ring_data[i]; if (cpu_online(rd->queue_index)) { - cpumask_clear(&mask); + cpumask_clear(mask); cpu = rd->queue_index; - cpumask_set_cpu(cpu, &mask); + cpumask_set_cpu(cpu, mask); (void)irq_set_affinity_hint(rd->ring->irq, - &mask); + mask); } } } else { for (i = 0; i < h->q_num; i++) { rd = &priv->ring_data[i]; if (cpu_online(rd->queue_index * 2)) { - cpumask_clear(&mask); + cpumask_clear(mask); cpu = rd->queue_index * 2; - cpumask_set_cpu(cpu, &mask); + cpumask_set_cpu(cpu, mask); (void)irq_set_affinity_hint(rd->ring->irq, - &mask); + mask); } } for (i = h->q_num; i < h->q_num * 2; i++) { rd = &priv->ring_data[i]; if (cpu_online(rd->queue_index * 2 + 1)) { - cpumask_clear(&mask); + cpumask_clear(mask); cpu = rd->queue_index * 2 + 1; - cpumask_set_cpu(cpu, &mask); + cpumask_set_cpu(cpu, mask); (void)irq_set_affinity_hint(rd->ring->irq, - &mask); + mask); } } } + + free_cpumask_var(mask); } static int hns_nic_init_irq(struct hns_nic_priv *priv) @@ -1625,8 +1629,8 @@ void hns_nic_set_rx_mode(struct net_device *ndev) netdev_err(ndev, "sync uc address fail\n"); } -struct rtnl_link_stats64 *hns_nic_get_stats64(struct net_device *ndev, - struct rtnl_link_stats64 *stats) +static void hns_nic_get_stats64(struct net_device *ndev, + struct rtnl_link_stats64 *stats) { int idx = 0; u64 tx_bytes = 0; @@ -1668,8 +1672,6 @@ struct rtnl_link_stats64 *hns_nic_get_stats64(struct net_device *ndev, stats->tx_window_errors = ndev->stats.tx_window_errors; stats->rx_compressed = ndev->stats.rx_compressed; stats->tx_compressed = ndev->stats.tx_compressed; - - return stats; } static u16 diff --git a/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c b/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c index 85a3866459cf..4f58d338d739 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c @@ -31,9 +31,11 @@ #include "ehea.h" #include "ehea_phyp.h" -static int ehea_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int ehea_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct ehea_port *port = netdev_priv(dev); + u32 supported, advertising; u32 speed; int ret; @@ -60,68 +62,75 @@ static int ehea_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) speed = -1; break; /* BUG */ } - cmd->duplex = port->full_duplex == 1 ? + cmd->base.duplex = port->full_duplex == 1 ? DUPLEX_FULL : DUPLEX_HALF; } else { speed = SPEED_UNKNOWN; - cmd->duplex = DUPLEX_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; } - ethtool_cmd_speed_set(cmd, speed); + cmd->base.speed = speed; - if (cmd->speed == SPEED_10000) { - cmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); - cmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); - cmd->port = PORT_FIBRE; + if (cmd->base.speed == SPEED_10000) { + supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); + advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); + cmd->base.port = PORT_FIBRE; } else { - cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full + supported = (SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_10baseT_Half | SUPPORTED_Autoneg | SUPPORTED_TP); - cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg + advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg | ADVERTISED_TP); - cmd->port = PORT_TP; + cmd->base.port = PORT_TP; } - cmd->autoneg = port->autoneg == 1 ? AUTONEG_ENABLE : AUTONEG_DISABLE; + cmd->base.autoneg = port->autoneg == 1 ? + AUTONEG_ENABLE : AUTONEG_DISABLE; + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); return 0; } -static int ehea_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int ehea_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct ehea_port *port = netdev_priv(dev); int ret = 0; u32 sp; - if (cmd->autoneg == AUTONEG_ENABLE) { + if (cmd->base.autoneg == AUTONEG_ENABLE) { sp = EHEA_SPEED_AUTONEG; goto doit; } - switch (cmd->speed) { + switch (cmd->base.speed) { case SPEED_10: - if (cmd->duplex == DUPLEX_FULL) + if (cmd->base.duplex == DUPLEX_FULL) sp = H_SPEED_10M_F; else sp = H_SPEED_10M_H; break; case SPEED_100: - if (cmd->duplex == DUPLEX_FULL) + if (cmd->base.duplex == DUPLEX_FULL) sp = H_SPEED_100M_F; else sp = H_SPEED_100M_H; break; case SPEED_1000: - if (cmd->duplex == DUPLEX_FULL) + if (cmd->base.duplex == DUPLEX_FULL) sp = H_SPEED_1G_F; else ret = -EINVAL; break; case SPEED_10000: - if (cmd->duplex == DUPLEX_FULL) + if (cmd->base.duplex == DUPLEX_FULL) sp = H_SPEED_10G_F; else ret = -EINVAL; @@ -264,7 +273,6 @@ static void ehea_get_ethtool_stats(struct net_device *dev, } static const struct ethtool_ops ehea_ethtool_ops = { - .get_settings = ehea_get_settings, .get_drvinfo = ehea_get_drvinfo, .get_msglevel = ehea_get_msglevel, .set_msglevel = ehea_set_msglevel, @@ -272,8 +280,9 @@ static const struct ethtool_ops ehea_ethtool_ops = { .get_strings = ehea_get_strings, .get_sset_count = ehea_get_sset_count, .get_ethtool_stats = ehea_get_ethtool_stats, - .set_settings = ehea_set_settings, .nway_reset = ehea_nway_reset, /* Restart autonegotiation */ + .get_link_ksettings = ehea_get_link_ksettings, + .set_link_ksettings = ehea_set_link_ksettings, }; void ehea_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index 702446a93697..1e53d7a82675 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c @@ -328,8 +328,8 @@ out: spin_unlock_irqrestore(&ehea_bcmc_regs.lock, flags); } -static struct rtnl_link_stats64 *ehea_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *stats) +static void ehea_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) { struct ehea_port *port = netdev_priv(dev); u64 rx_packets = 0, tx_packets = 0, rx_bytes = 0, tx_bytes = 0; @@ -352,7 +352,6 @@ static struct rtnl_link_stats64 *ehea_get_stats64(struct net_device *dev, stats->multicast = port->stats.multicast; stats->rx_errors = port->stats.rx_errors; - return stats; } static void ehea_update_stats(struct work_struct *work) diff --git a/drivers/net/ethernet/ibm/emac/Kconfig b/drivers/net/ethernet/ibm/emac/Kconfig index 3f44a30e0615..90d49191beb3 100644 --- a/drivers/net/ethernet/ibm/emac/Kconfig +++ b/drivers/net/ethernet/ibm/emac/Kconfig @@ -2,6 +2,7 @@ config IBM_EMAC tristate "IBM EMAC Ethernet support" depends on PPC_DCR select CRC32 + select PHYLIB help This driver supports the IBM EMAC family of Ethernet controllers typically found on 4xx embedded PowerPC chips, but also on the diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index 5909615c27f7..275c2e2349ad 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -42,6 +42,7 @@ #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/of_net.h> +#include <linux/of_mdio.h> #include <linux/slab.h> #include <asm/processor.h> @@ -1991,69 +1992,79 @@ static struct mal_commac_ops emac_commac_sg_ops = { }; /* Ethtool support */ -static int emac_ethtool_get_settings(struct net_device *ndev, - struct ethtool_cmd *cmd) +static int emac_ethtool_get_link_ksettings(struct net_device *ndev, + struct ethtool_link_ksettings *cmd) { struct emac_instance *dev = netdev_priv(ndev); + u32 supported, advertising; - cmd->supported = dev->phy.features; - cmd->port = PORT_MII; - cmd->phy_address = dev->phy.address; - cmd->transceiver = - dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL; + supported = dev->phy.features; + cmd->base.port = PORT_MII; + cmd->base.phy_address = dev->phy.address; mutex_lock(&dev->link_lock); - cmd->advertising = dev->phy.advertising; - cmd->autoneg = dev->phy.autoneg; - cmd->speed = dev->phy.speed; - cmd->duplex = dev->phy.duplex; + advertising = dev->phy.advertising; + cmd->base.autoneg = dev->phy.autoneg; + cmd->base.speed = dev->phy.speed; + cmd->base.duplex = dev->phy.duplex; mutex_unlock(&dev->link_lock); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); + return 0; } -static int emac_ethtool_set_settings(struct net_device *ndev, - struct ethtool_cmd *cmd) +static int +emac_ethtool_set_link_ksettings(struct net_device *ndev, + const struct ethtool_link_ksettings *cmd) { struct emac_instance *dev = netdev_priv(ndev); u32 f = dev->phy.features; + u32 advertising; + + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); DBG(dev, "set_settings(%d, %d, %d, 0x%08x)" NL, - cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising); + cmd->base.autoneg, cmd->base.speed, cmd->base.duplex, advertising); /* Basic sanity checks */ if (dev->phy.address < 0) return -EOPNOTSUPP; - if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE) + if (cmd->base.autoneg != AUTONEG_ENABLE && + cmd->base.autoneg != AUTONEG_DISABLE) return -EINVAL; - if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0) + if (cmd->base.autoneg == AUTONEG_ENABLE && advertising == 0) return -EINVAL; - if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL) + if (cmd->base.duplex != DUPLEX_HALF && cmd->base.duplex != DUPLEX_FULL) return -EINVAL; - if (cmd->autoneg == AUTONEG_DISABLE) { - switch (cmd->speed) { + if (cmd->base.autoneg == AUTONEG_DISABLE) { + switch (cmd->base.speed) { case SPEED_10: - if (cmd->duplex == DUPLEX_HALF && + if (cmd->base.duplex == DUPLEX_HALF && !(f & SUPPORTED_10baseT_Half)) return -EINVAL; - if (cmd->duplex == DUPLEX_FULL && + if (cmd->base.duplex == DUPLEX_FULL && !(f & SUPPORTED_10baseT_Full)) return -EINVAL; break; case SPEED_100: - if (cmd->duplex == DUPLEX_HALF && + if (cmd->base.duplex == DUPLEX_HALF && !(f & SUPPORTED_100baseT_Half)) return -EINVAL; - if (cmd->duplex == DUPLEX_FULL && + if (cmd->base.duplex == DUPLEX_FULL && !(f & SUPPORTED_100baseT_Full)) return -EINVAL; break; case SPEED_1000: - if (cmd->duplex == DUPLEX_HALF && + if (cmd->base.duplex == DUPLEX_HALF && !(f & SUPPORTED_1000baseT_Half)) return -EINVAL; - if (cmd->duplex == DUPLEX_FULL && + if (cmd->base.duplex == DUPLEX_FULL && !(f & SUPPORTED_1000baseT_Full)) return -EINVAL; break; @@ -2062,8 +2073,8 @@ static int emac_ethtool_set_settings(struct net_device *ndev, } mutex_lock(&dev->link_lock); - dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed, - cmd->duplex); + dev->phy.def->ops->setup_forced(&dev->phy, cmd->base.speed, + cmd->base.duplex); mutex_unlock(&dev->link_lock); } else { @@ -2072,7 +2083,7 @@ static int emac_ethtool_set_settings(struct net_device *ndev, mutex_lock(&dev->link_lock); dev->phy.def->ops->setup_aneg(&dev->phy, - (cmd->advertising & f) | + (advertising & f) | (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause))); @@ -2234,8 +2245,6 @@ static void emac_ethtool_get_drvinfo(struct net_device *ndev, } static const struct ethtool_ops emac_ethtool_ops = { - .get_settings = emac_ethtool_get_settings, - .set_settings = emac_ethtool_set_settings, .get_drvinfo = emac_ethtool_get_drvinfo, .get_regs_len = emac_ethtool_get_regs_len, @@ -2251,6 +2260,8 @@ static const struct ethtool_ops emac_ethtool_ops = { .get_ethtool_stats = emac_ethtool_get_ethtool_stats, .get_link = ethtool_op_get_link, + .get_link_ksettings = emac_ethtool_get_link_ksettings, + .set_link_ksettings = emac_ethtool_set_link_ksettings, }; static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) @@ -2410,6 +2421,219 @@ static int emac_read_uint_prop(struct device_node *np, const char *name, return 0; } +static void emac_adjust_link(struct net_device *ndev) +{ + struct emac_instance *dev = netdev_priv(ndev); + struct phy_device *phy = dev->phy_dev; + + dev->phy.autoneg = phy->autoneg; + dev->phy.speed = phy->speed; + dev->phy.duplex = phy->duplex; + dev->phy.pause = phy->pause; + dev->phy.asym_pause = phy->asym_pause; + dev->phy.advertising = phy->advertising; +} + +static int emac_mii_bus_read(struct mii_bus *bus, int addr, int regnum) +{ + int ret = emac_mdio_read(bus->priv, addr, regnum); + /* This is a workaround for powered down ports/phys. + * In the wild, this was seen on the Cisco Meraki MX60(W). + * This hardware disables ports as part of the handoff + * procedure. Accessing the ports will lead to errors + * (-ETIMEDOUT, -EREMOTEIO) that do more harm than good. + */ + return ret < 0 ? 0xffff : ret; +} + +static int emac_mii_bus_write(struct mii_bus *bus, int addr, + int regnum, u16 val) +{ + emac_mdio_write(bus->priv, addr, regnum, val); + return 0; +} + +static int emac_mii_bus_reset(struct mii_bus *bus) +{ + struct emac_instance *dev = netdev_priv(bus->priv); + + return emac_reset(dev); +} + +static int emac_mdio_setup_aneg(struct mii_phy *phy, u32 advertise) +{ + struct net_device *ndev = phy->dev; + struct emac_instance *dev = netdev_priv(ndev); + + dev->phy.autoneg = AUTONEG_ENABLE; + dev->phy.speed = SPEED_1000; + dev->phy.duplex = DUPLEX_FULL; + dev->phy.advertising = advertise; + phy->autoneg = AUTONEG_ENABLE; + phy->speed = dev->phy.speed; + phy->duplex = dev->phy.duplex; + phy->advertising = advertise; + return phy_start_aneg(dev->phy_dev); +} + +static int emac_mdio_setup_forced(struct mii_phy *phy, int speed, int fd) +{ + struct net_device *ndev = phy->dev; + struct emac_instance *dev = netdev_priv(ndev); + + dev->phy.autoneg = AUTONEG_DISABLE; + dev->phy.speed = speed; + dev->phy.duplex = fd; + phy->autoneg = AUTONEG_DISABLE; + phy->speed = speed; + phy->duplex = fd; + return phy_start_aneg(dev->phy_dev); +} + +static int emac_mdio_poll_link(struct mii_phy *phy) +{ + struct net_device *ndev = phy->dev; + struct emac_instance *dev = netdev_priv(ndev); + int res; + + res = phy_read_status(dev->phy_dev); + if (res) { + dev_err(&dev->ofdev->dev, "link update failed (%d).", res); + return ethtool_op_get_link(ndev); + } + + return dev->phy_dev->link; +} + +static int emac_mdio_read_link(struct mii_phy *phy) +{ + struct net_device *ndev = phy->dev; + struct emac_instance *dev = netdev_priv(ndev); + int res; + + res = phy_read_status(dev->phy_dev); + if (res) + return res; + + dev->phy.speed = phy->speed; + dev->phy.duplex = phy->duplex; + dev->phy.pause = phy->pause; + dev->phy.asym_pause = phy->asym_pause; + return 0; +} + +static int emac_mdio_init_phy(struct mii_phy *phy) +{ + struct net_device *ndev = phy->dev; + struct emac_instance *dev = netdev_priv(ndev); + + phy_start(dev->phy_dev); + dev->phy.autoneg = phy->autoneg; + dev->phy.speed = phy->speed; + dev->phy.duplex = phy->duplex; + dev->phy.advertising = phy->advertising; + dev->phy.pause = phy->pause; + dev->phy.asym_pause = phy->asym_pause; + + return phy_init_hw(dev->phy_dev); +} + +static const struct mii_phy_ops emac_dt_mdio_phy_ops = { + .init = emac_mdio_init_phy, + .setup_aneg = emac_mdio_setup_aneg, + .setup_forced = emac_mdio_setup_forced, + .poll_link = emac_mdio_poll_link, + .read_link = emac_mdio_read_link, +}; + +static int emac_dt_mdio_probe(struct emac_instance *dev) +{ + struct device_node *mii_np; + int res; + + mii_np = of_get_child_by_name(dev->ofdev->dev.of_node, "mdio"); + if (!mii_np) { + dev_err(&dev->ofdev->dev, "no mdio definition found."); + return -ENODEV; + } + + if (!of_device_is_available(mii_np)) { + res = -ENODEV; + goto put_node; + } + + dev->mii_bus = devm_mdiobus_alloc(&dev->ofdev->dev); + if (!dev->mii_bus) { + res = -ENOMEM; + goto put_node; + } + + dev->mii_bus->priv = dev->ndev; + dev->mii_bus->parent = dev->ndev->dev.parent; + dev->mii_bus->name = "emac_mdio"; + dev->mii_bus->read = &emac_mii_bus_read; + dev->mii_bus->write = &emac_mii_bus_write; + dev->mii_bus->reset = &emac_mii_bus_reset; + snprintf(dev->mii_bus->id, MII_BUS_ID_SIZE, "%s", dev->ofdev->name); + res = of_mdiobus_register(dev->mii_bus, mii_np); + if (res) { + dev_err(&dev->ofdev->dev, "cannot register MDIO bus %s (%d)", + dev->mii_bus->name, res); + } + + put_node: + of_node_put(mii_np); + return res; +} + +static int emac_dt_phy_connect(struct emac_instance *dev, + struct device_node *phy_handle) +{ + int res; + + dev->phy.def = devm_kzalloc(&dev->ofdev->dev, sizeof(*dev->phy.def), + GFP_KERNEL); + if (!dev->phy.def) + return -ENOMEM; + + dev->phy_dev = of_phy_connect(dev->ndev, phy_handle, &emac_adjust_link, + 0, dev->phy_mode); + if (!dev->phy_dev) { + dev_err(&dev->ofdev->dev, "failed to connect to PHY.\n"); + return -ENODEV; + } + + dev->phy.def->phy_id = dev->phy_dev->drv->phy_id; + dev->phy.def->phy_id_mask = dev->phy_dev->drv->phy_id_mask; + dev->phy.def->name = dev->phy_dev->drv->name; + dev->phy.def->ops = &emac_dt_mdio_phy_ops; + dev->phy.features = dev->phy_dev->supported; + dev->phy.address = dev->phy_dev->mdio.addr; + dev->phy.mode = dev->phy_dev->interface; + return 0; +} + +static int emac_dt_phy_probe(struct emac_instance *dev) +{ + struct device_node *np = dev->ofdev->dev.of_node; + struct device_node *phy_handle; + int res = 0; + + phy_handle = of_parse_phandle(np, "phy-handle", 0); + + if (phy_handle) { + res = emac_dt_mdio_probe(dev); + if (!res) { + res = emac_dt_phy_connect(dev, phy_handle); + if (res) + mdiobus_unregister(dev->mii_bus); + } + } + + of_node_put(phy_handle); + return res; +} + static int emac_init_phy(struct emac_instance *dev) { struct device_node *np = dev->ofdev->dev.of_node; @@ -2420,15 +2644,12 @@ static int emac_init_phy(struct emac_instance *dev) dev->phy.dev = ndev; dev->phy.mode = dev->phy_mode; - /* PHY-less configuration. - * XXX I probably should move these settings to the dev tree - */ - if (dev->phy_address == 0xffffffff && dev->phy_map == 0xffffffff) { + /* PHY-less configuration. */ + if ((dev->phy_address == 0xffffffff && dev->phy_map == 0xffffffff) || + of_phy_is_fixed_link(np)) { emac_reset(dev); - /* PHY-less configuration. - * XXX I probably should move these settings to the dev tree - */ + /* PHY-less configuration. */ dev->phy.address = -1; dev->phy.features = SUPPORTED_MII; if (emac_phy_supports_gige(dev->phy_mode)) @@ -2437,6 +2658,16 @@ static int emac_init_phy(struct emac_instance *dev) dev->phy.features |= SUPPORTED_100baseT_Full; dev->phy.pause = 1; + if (of_phy_is_fixed_link(np)) { + int res = emac_dt_mdio_probe(dev); + + if (!res) { + res = of_phy_register_fixed_link(np); + if (res) + mdiobus_unregister(dev->mii_bus); + } + return res; + } return 0; } @@ -2480,6 +2711,18 @@ static int emac_init_phy(struct emac_instance *dev) emac_configure(dev); + if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) { + int res = emac_dt_phy_probe(dev); + + mutex_unlock(&emac_phy_map_lock); + if (!res) + goto init_phy; + + dev_err(&dev->ofdev->dev, "failed to attach dt phy (%d).\n", + res); + return res; + } + if (dev->phy_address != 0xffffffff) phy_map = ~(1 << dev->phy_address); @@ -2507,6 +2750,7 @@ static int emac_init_phy(struct emac_instance *dev) return -ENXIO; } + init_phy: /* Init PHY */ if (dev->phy.def->ops->init) dev->phy.def->ops->init(&dev->phy); @@ -2978,6 +3222,12 @@ static int emac_remove(struct platform_device *ofdev) if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) zmii_detach(dev->zmii_dev, dev->zmii_port); + if (dev->phy_dev) + phy_disconnect(dev->phy_dev); + + if (dev->mii_bus) + mdiobus_unregister(dev->mii_bus); + busy_phy_map &= ~(1 << dev->phy.address); DBG(dev, "busy_phy_map now %#x" NL, busy_phy_map); diff --git a/drivers/net/ethernet/ibm/emac/core.h b/drivers/net/ethernet/ibm/emac/core.h index 93ae11494810..0710a6685489 100644 --- a/drivers/net/ethernet/ibm/emac/core.h +++ b/drivers/net/ethernet/ibm/emac/core.h @@ -199,6 +199,10 @@ struct emac_instance { struct emac_instance *mdio_instance; struct mutex mdio_lock; + /* Device-tree based phy configuration */ + struct mii_bus *mii_bus; + struct phy_device *phy_dev; + /* ZMII infos if any */ u32 zmii_ph; u32 zmii_port; diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c index aaf6fec566b5..cd3227b088b7 100644 --- a/drivers/net/ethernet/ibm/emac/mal.c +++ b/drivers/net/ethernet/ibm/emac/mal.c @@ -421,20 +421,20 @@ static int mal_poll(struct napi_struct *napi, int budget) int n; if (unlikely(test_bit(MAL_COMMAC_POLL_DISABLED, &mc->flags))) continue; - n = mc->ops->poll_rx(mc->dev, budget); + n = mc->ops->poll_rx(mc->dev, budget - received); if (n) { received += n; - budget -= n; - if (budget <= 0) - goto more_work; // XXX What if this is the last one ? + if (received >= budget) + return budget; } } - /* We need to disable IRQs to protect from RXDE IRQ here */ - spin_lock_irqsave(&mal->lock, flags); - __napi_complete(napi); - mal_enable_eob_irq(mal); - spin_unlock_irqrestore(&mal->lock, flags); + if (napi_complete_done(napi, received)) { + /* We need to disable IRQs to protect from RXDE IRQ here */ + spin_lock_irqsave(&mal->lock, flags); + mal_enable_eob_irq(mal); + spin_unlock_irqrestore(&mal->lock, flags); + } /* Check for "rotting" packet(s) */ list_for_each(l, &mal->poll_list) { diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 309f5c66083c..72ab7b6bf20b 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -729,20 +729,26 @@ static int ibmveth_close(struct net_device *netdev) return 0; } -static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int netdev_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { - cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | + u32 supported, advertising; + + supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE); - cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg | + advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg | ADVERTISED_FIBRE); - ethtool_cmd_speed_set(cmd, SPEED_1000); - cmd->duplex = DUPLEX_FULL; - cmd->port = PORT_FIBRE; - cmd->phy_address = 0; - cmd->transceiver = XCVR_INTERNAL; - cmd->autoneg = AUTONEG_ENABLE; - cmd->maxtxpkt = 0; - cmd->maxrxpkt = 1; + cmd->base.speed = SPEED_1000; + cmd->base.duplex = DUPLEX_FULL; + cmd->base.port = PORT_FIBRE; + cmd->base.phy_address = 0; + cmd->base.autoneg = AUTONEG_ENABLE; + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); + return 0; } @@ -978,11 +984,11 @@ static void ibmveth_get_ethtool_stats(struct net_device *dev, static const struct ethtool_ops netdev_ethtool_ops = { .get_drvinfo = netdev_get_drvinfo, - .get_settings = netdev_get_settings, .get_link = ethtool_op_get_link, .get_strings = ibmveth_get_strings, .get_sset_count = ibmveth_get_sset_count, .get_ethtool_stats = ibmveth_get_ethtool_stats, + .get_link_ksettings = netdev_get_link_ksettings, }; static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) @@ -1320,7 +1326,7 @@ restart_poll: ibmveth_replenish_task(adapter); if (frames_processed < budget) { - napi_complete(napi); + napi_complete_done(napi, frames_processed); /* We think we are done - reenable interrupts, * then check once more to make sure we are done. diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index a07b8d79174c..9198e6bd5160 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -988,7 +988,7 @@ restart_poll: if (frames_processed < budget) { enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]); - napi_complete(napi); + napi_complete_done(napi, frames_processed); if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) && napi_reschedule(napi)) { disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]); @@ -1026,21 +1026,26 @@ static const struct net_device_ops ibmvnic_netdev_ops = { /* ethtool functions */ -static int ibmvnic_get_settings(struct net_device *netdev, - struct ethtool_cmd *cmd) +static int ibmvnic_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) { - cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | + u32 supported, advertising; + + supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE); - cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg | + advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg | ADVERTISED_FIBRE); - ethtool_cmd_speed_set(cmd, SPEED_1000); - cmd->duplex = DUPLEX_FULL; - cmd->port = PORT_FIBRE; - cmd->phy_address = 0; - cmd->transceiver = XCVR_INTERNAL; - cmd->autoneg = AUTONEG_ENABLE; - cmd->maxtxpkt = 0; - cmd->maxrxpkt = 1; + cmd->base.speed = SPEED_1000; + cmd->base.duplex = DUPLEX_FULL; + cmd->base.port = PORT_FIBRE; + cmd->base.phy_address = 0; + cmd->base.autoneg = AUTONEG_ENABLE; + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); + return 0; } @@ -1133,7 +1138,6 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev, } static const struct ethtool_ops ibmvnic_ethtool_ops = { - .get_settings = ibmvnic_get_settings, .get_drvinfo = ibmvnic_get_drvinfo, .get_msglevel = ibmvnic_get_msglevel, .set_msglevel = ibmvnic_set_msglevel, @@ -1142,6 +1146,7 @@ static const struct ethtool_ops ibmvnic_ethtool_ops = { .get_strings = ibmvnic_get_strings, .get_sset_count = ibmvnic_get_sset_count, .get_ethtool_stats = ibmvnic_get_ethtool_stats, + .get_link_ksettings = ibmvnic_get_link_ksettings, }; /* Routines for managing CRQs/sCRQs */ @@ -1255,8 +1260,6 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter) } adapter->rx_scrq = NULL; } - - adapter->requested_caps = 0; } static void release_sub_crqs_no_irqs(struct ibmvnic_adapter *adapter) @@ -1278,8 +1281,6 @@ static void release_sub_crqs_no_irqs(struct ibmvnic_adapter *adapter) adapter->rx_scrq[i]); adapter->rx_scrq = NULL; } - - adapter->requested_caps = 0; } static int disable_scrq_irq(struct ibmvnic_adapter *adapter, @@ -1567,30 +1568,36 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry) crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES); crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES); crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES); crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.request_capability.capability = cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ); crq.request_capability.number = cpu_to_be64(adapter->req_tx_entries_per_subcrq); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ); crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_entries_per_subcrq); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.request_capability.capability = cpu_to_be16(REQ_MTU); crq.request_capability.number = cpu_to_be64(adapter->req_mtu); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); if (adapter->netdev->flags & IFF_PROMISC) { @@ -1598,12 +1605,14 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry) crq.request_capability.capability = cpu_to_be16(PROMISC_REQUESTED); crq.request_capability.number = cpu_to_be64(1); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); } } else { crq.request_capability.capability = cpu_to_be16(PROMISC_REQUESTED); crq.request_capability.number = cpu_to_be64(0); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); } @@ -1955,112 +1964,112 @@ static void send_cap_queries(struct ibmvnic_adapter *adapter) { union ibmvnic_crq crq; - atomic_set(&adapter->running_cap_queries, 0); + atomic_set(&adapter->running_cap_crqs, 0); memset(&crq, 0, sizeof(crq)); crq.query_capability.first = IBMVNIC_CRQ_CMD; crq.query_capability.cmd = QUERY_CAPABILITY; crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(MIN_MTU); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(MAX_MTU); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); } @@ -2348,6 +2357,7 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq, u64 *req_value; char *name; + atomic_dec(&adapter->running_cap_crqs); switch (be16_to_cpu(crq->request_capability_rsp.capability)) { case REQ_TX_QUEUES: req_value = &adapter->req_tx_queues; @@ -2402,12 +2412,13 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq, } /* Done receiving requested capabilities, query IP offload support */ - if (++adapter->requested_caps == 7) { + if (atomic_read(&adapter->running_cap_crqs) == 0) { union ibmvnic_crq newcrq; int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer); struct ibmvnic_query_ip_offload_buffer *ip_offload_buf = &adapter->ip_offload_buf; + adapter->wait_capability = false; adapter->ip_offload_tok = dma_map_single(dev, ip_offload_buf, buf_sz, DMA_FROM_DEVICE); @@ -2543,9 +2554,9 @@ static void handle_query_cap_rsp(union ibmvnic_crq *crq, struct device *dev = &adapter->vdev->dev; long rc; - atomic_dec(&adapter->running_cap_queries); + atomic_dec(&adapter->running_cap_crqs); netdev_dbg(netdev, "Outstanding queries: %d\n", - atomic_read(&adapter->running_cap_queries)); + atomic_read(&adapter->running_cap_crqs)); rc = crq->query_capability.rc.code; if (rc) { dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc); @@ -2703,9 +2714,11 @@ static void handle_query_cap_rsp(union ibmvnic_crq *crq, } out: - if (atomic_read(&adapter->running_cap_queries) == 0) + if (atomic_read(&adapter->running_cap_crqs) == 0) { + adapter->wait_capability = false; init_sub_crqs(adapter, 0); /* We're done querying the capabilities, initialize sub-crqs */ + } } static void handle_control_ras_rsp(union ibmvnic_crq *crq, @@ -3415,6 +3428,18 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, static irqreturn_t ibmvnic_interrupt(int irq, void *instance) { struct ibmvnic_adapter *adapter = instance; + unsigned long flags; + + spin_lock_irqsave(&adapter->crq.lock, flags); + vio_disable_interrupts(adapter->vdev); + tasklet_schedule(&adapter->tasklet); + spin_unlock_irqrestore(&adapter->crq.lock, flags); + return IRQ_HANDLED; +} + +static void ibmvnic_tasklet(void *data) +{ + struct ibmvnic_adapter *adapter = data; struct ibmvnic_crq_queue *queue = &adapter->crq; struct vio_dev *vdev = adapter->vdev; union ibmvnic_crq *crq; @@ -3436,11 +3461,19 @@ static irqreturn_t ibmvnic_interrupt(int irq, void *instance) ibmvnic_handle_crq(crq, adapter); crq->generic.first = 0; } else { - done = true; + /* remain in tasklet until all + * capabilities responses are received + */ + if (!adapter->wait_capability) + done = true; } } + /* if capabilities CRQ's were sent in this tasklet, the following + * tasklet must wait until all responses are received + */ + if (atomic_read(&adapter->running_cap_crqs) != 0) + adapter->wait_capability = true; spin_unlock_irqrestore(&queue->lock, flags); - return IRQ_HANDLED; } static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter) @@ -3495,6 +3528,7 @@ static void ibmvnic_release_crq_queue(struct ibmvnic_adapter *adapter) netdev_dbg(adapter->netdev, "Releasing CRQ\n"); free_irq(vdev->irq, adapter); + tasklet_kill(&adapter->tasklet); do { rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); @@ -3540,6 +3574,9 @@ static int ibmvnic_init_crq_queue(struct ibmvnic_adapter *adapter) retrc = 0; + tasklet_init(&adapter->tasklet, (void *)ibmvnic_tasklet, + (unsigned long)adapter); + netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq); rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, IBMVNIC_NAME, adapter); @@ -3561,6 +3598,7 @@ static int ibmvnic_init_crq_queue(struct ibmvnic_adapter *adapter) return retrc; req_irq_failed: + tasklet_kill(&adapter->tasklet); do { rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index dd775d951b73..422824f1f42a 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -976,11 +976,11 @@ struct ibmvnic_adapter { dma_addr_t login_rsp_buf_token; int login_rsp_buf_sz; - atomic_t running_cap_queries; + atomic_t running_cap_crqs; + bool wait_capability; struct ibmvnic_sub_crq_queue **tx_scrq; struct ibmvnic_sub_crq_queue **rx_scrq; - int requested_caps; bool renegotiate; /* rx structs */ @@ -1049,5 +1049,6 @@ struct ibmvnic_adapter { struct work_struct vnic_crq_init; struct work_struct ibmvnic_xport; + struct tasklet_struct tasklet; bool failover; }; diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index 25c6dfd500b4..2b7323d392dc 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -2253,7 +2253,7 @@ static int e100_poll(struct napi_struct *napi, int budget) /* If budget not fully consumed, exit the polling mode */ if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); e100_enable_irq(nic); } @@ -2426,19 +2426,21 @@ err_clean_rx: #define E100_82552_LED_ON 0x000F /* LEDTX and LED_RX both on */ #define E100_82552_LED_OFF 0x000A /* LEDTX and LED_RX both off */ -static int e100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd) +static int e100_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) { struct nic *nic = netdev_priv(netdev); - return mii_ethtool_gset(&nic->mii, cmd); + return mii_ethtool_get_link_ksettings(&nic->mii, cmd); } -static int e100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd) +static int e100_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) { struct nic *nic = netdev_priv(netdev); int err; mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET); - err = mii_ethtool_sset(&nic->mii, cmd); + err = mii_ethtool_set_link_ksettings(&nic->mii, cmd); e100_exec_cb(nic, NULL, e100_configure); return err; @@ -2741,8 +2743,6 @@ static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data) } static const struct ethtool_ops e100_ethtool_ops = { - .get_settings = e100_get_settings, - .set_settings = e100_set_settings, .get_drvinfo = e100_get_drvinfo, .get_regs_len = e100_get_regs_len, .get_regs = e100_get_regs, @@ -2763,6 +2763,8 @@ static const struct ethtool_ops e100_ethtool_ops = { .get_ethtool_stats = e100_get_ethtool_stats, .get_sset_count = e100_get_sset_count, .get_ts_info = ethtool_op_get_ts_info, + .get_link_ksettings = e100_get_link_ksettings, + .set_link_ksettings = e100_set_link_ksettings, }; static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index 879cca47b021..a29b12e80855 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -493,8 +493,8 @@ int e1000e_setup_rx_resources(struct e1000_ring *ring); int e1000e_setup_tx_resources(struct e1000_ring *ring); void e1000e_free_rx_resources(struct e1000_ring *ring); void e1000e_free_tx_resources(struct e1000_ring *ring); -struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev, - struct rtnl_link_stats64 *stats); +void e1000e_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats); void e1000e_set_interrupt_capability(struct e1000_adapter *adapter); void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter); void e1000e_get_hw_control(struct e1000_adapter *adapter); diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index eccf1da9356b..2175cced402f 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -240,9 +240,9 @@ static void e1000e_dump(struct e1000_adapter *adapter) /* Print netdevice Info */ if (netdev) { dev_info(&adapter->pdev->dev, "Net device Info\n"); - pr_info("Device Name state trans_start last_rx\n"); - pr_info("%-15s %016lX %016lX %016lX\n", netdev->name, - netdev->state, dev_trans_start(netdev), netdev->last_rx); + pr_info("Device Name state trans_start\n"); + pr_info("%-15s %016lX %016lX\n", netdev->name, + netdev->state, dev_trans_start(netdev)); } /* Print Registers */ @@ -5920,12 +5920,11 @@ static void e1000_reset_task(struct work_struct *work) * * Returns the address of the device statistics structure. **/ -struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev, - struct rtnl_link_stats64 *stats) +void e1000e_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) { struct e1000_adapter *adapter = netdev_priv(netdev); - memset(stats, 0, sizeof(struct rtnl_link_stats64)); spin_lock(&adapter->stats64_lock); e1000e_update_stats(adapter); /* Fill out the OS statistics structure */ @@ -5958,7 +5957,6 @@ struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev, /* Tx Dropped needs to be maintained elsewhere */ spin_unlock(&adapter->stats64_lock); - return stats; } /** @@ -6276,8 +6274,8 @@ static int e1000e_pm_freeze(struct device *dev) /* Quiesce the device without resetting the hardware */ e1000e_down(adapter, false); e1000_free_irq(adapter); + e1000e_reset_interrupt_capability(adapter); } - e1000e_reset_interrupt_capability(adapter); /* Allow time for pending master requests to run */ e1000e_disable_pcie_master(&adapter->hw); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h index 4d19e46f7c55..52b979443cde 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k.h @@ -260,9 +260,7 @@ struct fm10k_intfc { #define FM10K_FLAG_RESET_REQUESTED (u32)(BIT(0)) #define FM10K_FLAG_RSS_FIELD_IPV4_UDP (u32)(BIT(1)) #define FM10K_FLAG_RSS_FIELD_IPV6_UDP (u32)(BIT(2)) -#define FM10K_FLAG_RX_TS_ENABLED (u32)(BIT(3)) -#define FM10K_FLAG_SWPRI_CONFIG (u32)(BIT(4)) -#define FM10K_FLAG_DEBUG_STATS (u32)(BIT(5)) +#define FM10K_FLAG_SWPRI_CONFIG (u32)(BIT(3)) int xcast_mode; /* Tx fast path data */ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_common.c b/drivers/net/ethernet/intel/fm10k/fm10k_common.c index dd95ac4f4c64..62a6ad9b3eed 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_common.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_common.c @@ -506,7 +506,7 @@ s32 fm10k_get_host_state_generic(struct fm10k_hw *hw, bool *host_ready) goto out; /* if we somehow dropped the Tx enable we should reset */ - if (hw->mac.tx_ready && !(txdctl & FM10K_TXDCTL_ENABLE)) { + if (mac->tx_ready && !(txdctl & FM10K_TXDCTL_ENABLE)) { ret_val = FM10K_ERR_RESET_REQUESTED; goto out; } @@ -523,8 +523,8 @@ s32 fm10k_get_host_state_generic(struct fm10k_hw *hw, bool *host_ready) /* interface cannot receive traffic without logical ports */ if (mac->dglort_map == FM10K_DGLORTMAP_NONE) { - if (hw->mac.ops.request_lport_map) - ret_val = hw->mac.ops.request_lport_map(hw); + if (mac->ops.request_lport_map) + ret_val = mac->ops.request_lport_map(hw); goto out; } diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c index 5241e0873397..0c84fef750f4 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c @@ -148,7 +148,7 @@ enum { static const char fm10k_prv_flags[FM10K_PRV_FLAG_LEN][ETH_GSTRING_LEN] = { }; -static void fm10k_add_stat_strings(char **p, const char *prefix, +static void fm10k_add_stat_strings(u8 **p, const char *prefix, const struct fm10k_stats stats[], const unsigned int size) { @@ -164,32 +164,31 @@ static void fm10k_add_stat_strings(char **p, const char *prefix, static void fm10k_get_stat_strings(struct net_device *dev, u8 *data) { struct fm10k_intfc *interface = netdev_priv(dev); - char *p = (char *)data; unsigned int i; - fm10k_add_stat_strings(&p, "", fm10k_gstrings_net_stats, + fm10k_add_stat_strings(&data, "", fm10k_gstrings_net_stats, FM10K_NETDEV_STATS_LEN); - fm10k_add_stat_strings(&p, "", fm10k_gstrings_global_stats, + fm10k_add_stat_strings(&data, "", fm10k_gstrings_global_stats, FM10K_GLOBAL_STATS_LEN); - fm10k_add_stat_strings(&p, "", fm10k_gstrings_mbx_stats, + fm10k_add_stat_strings(&data, "", fm10k_gstrings_mbx_stats, FM10K_MBX_STATS_LEN); if (interface->hw.mac.type != fm10k_mac_vf) - fm10k_add_stat_strings(&p, "", fm10k_gstrings_pf_stats, + fm10k_add_stat_strings(&data, "", fm10k_gstrings_pf_stats, FM10K_PF_STATS_LEN); for (i = 0; i < interface->hw.mac.max_queues; i++) { char prefix[ETH_GSTRING_LEN]; snprintf(prefix, ETH_GSTRING_LEN, "tx_queue_%u_", i); - fm10k_add_stat_strings(&p, prefix, + fm10k_add_stat_strings(&data, prefix, fm10k_gstrings_queue_stats, FM10K_QUEUE_STATS_LEN); snprintf(prefix, ETH_GSTRING_LEN, "rx_queue_%u_", i); - fm10k_add_stat_strings(&p, prefix, + fm10k_add_stat_strings(&data, prefix, fm10k_gstrings_queue_stats, FM10K_QUEUE_STATS_LEN); } @@ -198,18 +197,16 @@ static void fm10k_get_stat_strings(struct net_device *dev, u8 *data) static void fm10k_get_strings(struct net_device *dev, u32 stringset, u8 *data) { - char *p = (char *)data; - switch (stringset) { case ETH_SS_TEST: - memcpy(data, *fm10k_gstrings_test, + memcpy(data, fm10k_gstrings_test, FM10K_TEST_LEN * ETH_GSTRING_LEN); break; case ETH_SS_STATS: fm10k_get_stat_strings(dev, data); break; case ETH_SS_PRIV_FLAGS: - memcpy(p, fm10k_prv_flags, + memcpy(data, fm10k_prv_flags, FM10K_PRV_FLAG_LEN * ETH_GSTRING_LEN); break; } diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index 5de937852436..5bb233a9614c 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -28,7 +28,7 @@ #include "fm10k.h" -#define DRV_VERSION "0.21.2-k" +#define DRV_VERSION "0.21.7-k" #define DRV_SUMMARY "Intel(R) Ethernet Switch Host Interface Driver" const char fm10k_driver_version[] = DRV_VERSION; char fm10k_driver_name[] = "fm10k"; @@ -251,6 +251,7 @@ static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer, /** * fm10k_add_rx_frag - Add contents of Rx buffer to sk_buff * @rx_buffer: buffer containing page to add + * @size: packet size from rx_desc * @rx_desc: descriptor containing length of buffer written by hardware * @skb: sk_buff to place the data into * @@ -263,12 +264,12 @@ static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer, * true if the buffer can be reused by the interface. **/ static bool fm10k_add_rx_frag(struct fm10k_rx_buffer *rx_buffer, + unsigned int size, union fm10k_rx_desc *rx_desc, struct sk_buff *skb) { struct page *page = rx_buffer->page; unsigned char *va = page_address(page) + rx_buffer->page_offset; - unsigned int size = le16_to_cpu(rx_desc->w.length); #if (PAGE_SIZE < 8192) unsigned int truesize = FM10K_RX_BUFSZ; #else @@ -314,6 +315,7 @@ static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring, union fm10k_rx_desc *rx_desc, struct sk_buff *skb) { + unsigned int size = le16_to_cpu(rx_desc->w.length); struct fm10k_rx_buffer *rx_buffer; struct page *page; @@ -350,11 +352,11 @@ static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring, dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma, rx_buffer->page_offset, - FM10K_RX_BUFSZ, + size, DMA_FROM_DEVICE); /* pull page into skb */ - if (fm10k_add_rx_frag(rx_buffer, rx_desc, skb)) { + if (fm10k_add_rx_frag(rx_buffer, size, rx_desc, skb)) { /* hand second half of page back to the ring */ fm10k_reuse_rx_page(rx_ring, rx_buffer); } else { @@ -473,6 +475,8 @@ static unsigned int fm10k_process_skb_fields(struct fm10k_ring *rx_ring, fm10k_rx_checksum(rx_ring, rx_desc, skb); + FM10K_CB(skb)->tstamp = rx_desc->q.timestamp; + FM10K_CB(skb)->fi.w.vlan = rx_desc->w.vlan; skb_record_rx_queue(skb, rx_ring->queue_index); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c index c9dfa6564fcf..334088a101c3 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c @@ -2011,9 +2011,10 @@ static void fm10k_sm_mbx_create_reply(struct fm10k_hw *hw, * function can also be used to respond to an error as the connection * resetting would also be a means of dealing with errors. **/ -static void fm10k_sm_mbx_process_reset(struct fm10k_hw *hw, - struct fm10k_mbx_info *mbx) +static s32 fm10k_sm_mbx_process_reset(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx) { + s32 err = 0; const enum fm10k_mbx_state state = mbx->state; switch (state) { @@ -2026,6 +2027,7 @@ static void fm10k_sm_mbx_process_reset(struct fm10k_hw *hw, case FM10K_STATE_OPEN: /* flush any incomplete work */ fm10k_sm_mbx_connect_reset(mbx); + err = FM10K_ERR_RESET_REQUESTED; break; case FM10K_STATE_CONNECT: /* Update remote value to match local value */ @@ -2035,6 +2037,8 @@ static void fm10k_sm_mbx_process_reset(struct fm10k_hw *hw, } fm10k_sm_mbx_create_reply(hw, mbx, mbx->tail); + + return err; } /** @@ -2115,7 +2119,7 @@ static s32 fm10k_sm_mbx_process(struct fm10k_hw *hw, switch (FM10K_MSG_HDR_FIELD_GET(mbx->mbx_hdr, SM_VER)) { case 0: - fm10k_sm_mbx_process_reset(hw, mbx); + err = fm10k_sm_mbx_process_reset(hw, mbx); break; case FM10K_SM_MBX_VERSION: err = fm10k_sm_mbx_process_version_1(hw, mbx); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index bc5ef6eb3dd6..01db688cf539 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c @@ -1118,8 +1118,8 @@ void fm10k_reset_rx_state(struct fm10k_intfc *interface) * Returns 64bit statistics, for use in the ndo_get_stats64 callback. This * function replaces fm10k_get_stats for kernels which support it. */ -static struct rtnl_link_stats64 *fm10k_get_stats64(struct net_device *netdev, - struct rtnl_link_stats64 *stats) +static void fm10k_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) { struct fm10k_intfc *interface = netdev_priv(netdev); struct fm10k_ring *ring; @@ -1164,8 +1164,6 @@ static struct rtnl_link_stats64 *fm10k_get_stats64(struct net_device *netdev, /* following stats updated by fm10k_service_task() */ stats->rx_missed_errors = netdev->stats.rx_missed_errors; - - return stats; } int fm10k_setup_tc(struct net_device *dev, u8 tc) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index b1a2f8437d59..e372a5823480 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -1144,6 +1144,7 @@ static irqreturn_t fm10k_msix_mbx_pf(int __always_unused irq, void *data) struct fm10k_hw *hw = &interface->hw; struct fm10k_mbx_info *mbx = &hw->mbx; u32 eicr; + s32 err = 0; /* unmask any set bits related to this interrupt */ eicr = fm10k_read_reg(hw, FM10K_EICR); @@ -1159,12 +1160,15 @@ static irqreturn_t fm10k_msix_mbx_pf(int __always_unused irq, void *data) /* service mailboxes */ if (fm10k_mbx_trylock(interface)) { - mbx->ops.process(hw, mbx); + err = mbx->ops.process(hw, mbx); /* handle VFLRE events */ fm10k_iov_event(interface); fm10k_mbx_unlock(interface); } + if (err == FM10K_ERR_RESET_REQUESTED) + interface->flags |= FM10K_FLAG_RESET_REQUESTED; + /* if switch toggled state we should reset GLORTs */ if (eicr & FM10K_EICR_SWITCHNOTREADY) { /* force link down for at least 4 seconds */ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c index 23fb319fd2a0..40ee0242a80a 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c @@ -72,10 +72,6 @@ force_reset: fm10k_write_flush(hw); udelay(FM10K_RESET_TIMEOUT); - /* Reset mailbox global interrupts */ - reg = FM10K_MBX_GLOBAL_REQ_INTERRUPT | FM10K_MBX_GLOBAL_ACK_INTERRUPT; - fm10k_write_reg(hw, FM10K_GMBX, reg); - /* Verify we made it out of reset */ reg = fm10k_read_reg(hw, FM10K_IP); if (!(reg & FM10K_IP_NOTINRESET)) diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index ba8d30984bee..82d8040fa418 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -134,19 +134,6 @@ /* default to trying for four seconds */ #define I40E_TRY_LINK_TIMEOUT (4 * HZ) -/** - * i40e_is_mac_710 - Return true if MAC is X710/XL710 - * @hw: ptr to the hardware info - **/ -static inline bool i40e_is_mac_710(struct i40e_hw *hw) -{ - if ((hw->mac.type == I40E_MAC_X710) || - (hw->mac.type == I40E_MAC_XL710)) - return true; - - return false; -} - /* driver state flags */ enum i40e_state_t { __I40E_TESTING, @@ -361,6 +348,8 @@ struct i40e_pf { #define I40E_FLAG_TRUE_PROMISC_SUPPORT BIT_ULL(51) #define I40E_FLAG_HAVE_CRT_RETIMER BIT_ULL(52) #define I40E_FLAG_PTP_L4_CAPABLE BIT_ULL(53) +#define I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE BIT_ULL(54) +#define I40E_FLAG_TEMP_LINK_POLLING BIT_ULL(55) /* tracks features that get auto disabled by errors */ u64 auto_disable_flags; @@ -480,6 +469,22 @@ struct i40e_mac_filter { enum i40e_filter_state state; }; +/* Wrapper structure to keep track of filters while we are preparing to send + * firmware commands. We cannot send firmware commands while holding a + * spinlock, since it might sleep. To avoid this, we wrap the added filters in + * a separate structure, which will track the state change and update the real + * filter while under lock. We can't simply hold the filters in a separate + * list, as this opens a window for a race condition when adding new MAC + * addresses to all VLANs, or when adding new VLANs to all MAC addresses. + */ +struct i40e_new_mac_filter { + struct hlist_node hlist; + struct i40e_mac_filter *f; + + /* Track future changes to state separately */ + enum i40e_filter_state state; +}; + struct i40e_veb { struct i40e_pf *pf; u16 idx; @@ -762,6 +767,7 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features); void i40e_set_ethtool_ops(struct net_device *netdev); struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan); +void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f); void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan); int i40e_sync_vsi_filters(struct i40e_vsi *vsi); struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, @@ -804,7 +810,6 @@ int i40e_lan_add_device(struct i40e_pf *pf); int i40e_lan_del_device(struct i40e_pf *pf); void i40e_client_subtask(struct i40e_pf *pf); void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi); -void i40e_notify_client_of_netdev_open(struct i40e_vsi *vsi); void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset); void i40e_notify_client_of_vf_enable(struct i40e_pf *pf, u32 num_vfs); void i40e_notify_client_of_vf_reset(struct i40e_pf *pf, u32 vf_id); @@ -834,9 +839,8 @@ static inline void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector) void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf); void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf, bool clearpba); #ifdef I40E_FCOE -struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( - struct net_device *netdev, - struct rtnl_link_stats64 *storage); +void i40e_get_netdev_stats_struct(struct net_device *netdev, + struct rtnl_link_stats64 *storage); int i40e_set_mac(struct net_device *netdev, void *p); void i40e_set_rx_mode(struct net_device *netdev); #endif @@ -853,12 +857,12 @@ int i40e_close(struct net_device *netdev); int i40e_vsi_open(struct i40e_vsi *vsi); void i40e_vlan_stripping_disable(struct i40e_vsi *vsi); int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid); -int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid); +int i40e_vsi_add_vlan(struct i40e_vsi *vsi, u16 vid); void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid); -void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid); -struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, - const u8 *macaddr); -int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, const u8 *macaddr); +void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid); +struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi, + const u8 *macaddr); +int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr); bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi); struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr); #ifdef I40E_FCOE diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index b2101a51534c..451f48b7540a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -538,6 +538,8 @@ I40E_CHECK_STRUCT_LEN(24, i40e_aqc_mac_address_read_data); /* Manage MAC Address Write Command (0x0108) */ struct i40e_aqc_mac_address_write { __le16 command_flags; +#define I40E_AQC_MC_MAG_EN 0x0100 +#define I40E_AQC_WOL_PRESERVE_ON_PFR 0x0200 #define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000 #define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000 #define I40E_AQC_WRITE_TYPE_PORT 0x8000 diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c index 7fe72abc0b4a..d570219efd9f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_client.c +++ b/drivers/net/ethernet/intel/i40e/i40e_client.c @@ -174,8 +174,6 @@ void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi) if (!vsi) return; - memset(¶ms, 0, sizeof(params)); - i40e_client_get_params(vsi, ¶ms); mutex_lock(&i40e_client_instance_mutex); list_for_each_entry(cdev, &i40e_client_instances, list) { if (cdev->lan_info.pf == vsi->back) { @@ -186,6 +184,8 @@ void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi) "Cannot locate client instance l2_param_change routine\n"); continue; } + memset(¶ms, 0, sizeof(params)); + i40e_client_get_params(vsi, ¶ms); if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) { dev_dbg(&vsi->back->pdev->dev, "Client is not open, abort l2 param change\n"); @@ -201,41 +201,6 @@ void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi) } /** - * i40e_notify_client_of_netdev_open - call the client open callback - * @vsi: the VSI with netdev opened - * - * If there is a client to this netdev, call the client with open - **/ -void i40e_notify_client_of_netdev_open(struct i40e_vsi *vsi) -{ - struct i40e_client_instance *cdev; - int ret = 0; - - if (!vsi) - return; - mutex_lock(&i40e_client_instance_mutex); - list_for_each_entry(cdev, &i40e_client_instances, list) { - if (cdev->lan_info.netdev == vsi->netdev) { - if (!cdev->client || - !cdev->client->ops || !cdev->client->ops->open) { - dev_dbg(&vsi->back->pdev->dev, - "Cannot locate client instance open routine\n"); - continue; - } - if (!(test_bit(__I40E_CLIENT_INSTANCE_OPENED, - &cdev->state))) { - ret = cdev->client->ops->open(&cdev->lan_info, - cdev->client); - if (!ret) - set_bit(__I40E_CLIENT_INSTANCE_OPENED, - &cdev->state); - } - } - } - mutex_unlock(&i40e_client_instance_mutex); -} - -/** * i40e_client_release_qvlist * @ldev: pointer to L2 context. * @@ -545,9 +510,10 @@ void i40e_client_subtask(struct i40e_pf *pf) continue; if (!existing) { - dev_info(&pf->pdev->dev, "Added instance of Client %s to PF%d bus=0x%02x func=0x%02x\n", + dev_info(&pf->pdev->dev, "Added instance of Client %s to PF%d bus=0x%02x dev=0x%02x func=0x%02x\n", client->name, pf->hw.pf_id, - pf->hw.bus.device, pf->hw.bus.func); + pf->hw.bus.bus_id, pf->hw.bus.device, + pf->hw.bus.func); } mutex_lock(&i40e_client_instance_mutex); @@ -596,8 +562,9 @@ int i40e_lan_add_device(struct i40e_pf *pf) ldev->pf = pf; INIT_LIST_HEAD(&ldev->list); list_add(&ldev->list, &i40e_devices); - dev_info(&pf->pdev->dev, "Added LAN device PF%d bus=0x%02x func=0x%02x\n", - pf->hw.pf_id, pf->hw.bus.device, pf->hw.bus.func); + dev_info(&pf->pdev->dev, "Added LAN device PF%d bus=0x%02x dev=0x%02x func=0x%02x\n", + pf->hw.pf_id, pf->hw.bus.bus_id, + pf->hw.bus.device, pf->hw.bus.func); /* Since in some cases register may have happened before a device gets * added, we can schedule a subtask to go initiate the clients if @@ -625,9 +592,9 @@ int i40e_lan_del_device(struct i40e_pf *pf) mutex_lock(&i40e_device_mutex); list_for_each_entry_safe(ldev, tmp, &i40e_devices, list) { if (ldev->pf == pf) { - dev_info(&pf->pdev->dev, "Deleted LAN device PF%d bus=0x%02x func=0x%02x\n", - pf->hw.pf_id, pf->hw.bus.device, - pf->hw.bus.func); + dev_info(&pf->pdev->dev, "Deleted LAN device PF%d bus=0x%02x dev=0x%02x func=0x%02x\n", + pf->hw.pf_id, pf->hw.bus.bus_id, + pf->hw.bus.device, pf->hw.bus.func); list_del(&ldev->list); kfree(ldev); ret = 0; @@ -688,13 +655,11 @@ static int i40e_client_release(struct i40e_client *client) * i40e_client_prepare - prepare client specific resources * @client: pointer to the registered client * - * Return 0 on success or < 0 on error **/ -static int i40e_client_prepare(struct i40e_client *client) +static void i40e_client_prepare(struct i40e_client *client) { struct i40e_device *ldev; struct i40e_pf *pf; - int ret = 0; mutex_lock(&i40e_device_mutex); list_for_each_entry(ldev, &i40e_devices, list) { @@ -704,7 +669,6 @@ static int i40e_client_prepare(struct i40e_client *client) i40e_service_event_schedule(pf); } mutex_unlock(&i40e_device_mutex); - return ret; } /** @@ -961,13 +925,9 @@ int i40e_register_client(struct i40e_client *client) set_bit(__I40E_CLIENT_REGISTERED, &client->state); mutex_unlock(&i40e_client_mutex); - if (i40e_client_prepare(client)) { - ret = -EIO; - goto out; - } + i40e_client_prepare(client); - pr_info("i40e: Registered client %s with return code %d\n", - client->name, ret); + pr_info("i40e: Registered client %s\n", client->name); out: return ret; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 128735975caa..ece57d6a6e23 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -300,7 +300,6 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc; u16 len; u8 *buf = (u8 *)buffer; - u16 i = 0; if ((!(mask & hw->debug_mask)) || (desc == NULL)) return; @@ -328,12 +327,18 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, if (buf_len < len) len = buf_len; /* write the full 16-byte chunks */ - for (i = 0; i < (len - 16); i += 16) - i40e_debug(hw, mask, "\t0x%04X %16ph\n", i, buf + i); - /* write whatever's left over without overrunning the buffer */ - if (i < len) - i40e_debug(hw, mask, "\t0x%04X %*ph\n", - i, len - i, buf + i); + if (hw->debug_mask & mask) { + char prefix[20]; + + snprintf(prefix, 20, + "i40e %02x:%02x.%x: \t0x", + hw->bus.bus_id, + hw->bus.device, + hw->bus.func); + + print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET, + 16, 1, buf, len, false); + } } } @@ -1838,6 +1843,8 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw, hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed; hw_link_info->link_info = resp->link_info; hw_link_info->an_info = resp->an_info; + hw_link_info->fec_info = resp->config & (I40E_AQ_CONFIG_FEC_KR_ENA | + I40E_AQ_CONFIG_FEC_RS_ENA); hw_link_info->ext_info = resp->ext_info; hw_link_info->loopback = resp->loopback; hw_link_info->max_frame_size = le16_to_cpu(resp->max_frame_size); diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index f1f41f12902f..267ad2588255 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -974,7 +974,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp, struct i40e_dcbx_config *r_cfg = &pf->hw.remote_dcbx_config; int i, ret; - u32 switch_id; + u16 switch_id; bw_data = kzalloc(sizeof( struct i40e_aqc_query_port_ets_config_resp), @@ -986,7 +986,8 @@ static ssize_t i40e_dbg_command_write(struct file *filp, vsi = pf->vsi[pf->lan_vsi]; switch_id = - vsi->info.switch_id & I40E_AQ_VSI_SW_ID_MASK; + le16_to_cpu(vsi->info.switch_id) & + I40E_AQ_VSI_SW_ID_MASK; ret = i40e_aq_query_port_ets_config(&pf->hw, switch_id, diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index cc1465aac2ef..a22e26200bcc 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -803,9 +803,12 @@ static int i40e_set_settings(struct net_device *netdev, if (change || (abilities.link_speed != config.link_speed)) { /* copy over the rest of the abilities */ config.phy_type = abilities.phy_type; + config.phy_type_ext = abilities.phy_type_ext; config.eee_capability = abilities.eee_capability; config.eeer = abilities.eeer_val; config.low_power_ctrl = abilities.d3_lpan; + config.fec_config = abilities.fec_cfg_curr_mod_ext_info & + I40E_AQ_PHY_FEC_CONFIG_MASK; /* save the requested speeds */ hw->phy.link_info.requested_speeds = config.link_speed; @@ -2072,7 +2075,7 @@ static void i40e_set_itr_per_queue(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector; u16 vector, intrl; - intrl = INTRL_USEC_TO_REG(vsi->int_rate_limit); + intrl = i40e_intrl_usec_to_reg(vsi->int_rate_limit); vsi->rx_rings[queue]->rx_itr_setting = ec->rx_coalesce_usecs; vsi->tx_rings[queue]->tx_itr_setting = ec->tx_coalesce_usecs; @@ -2116,6 +2119,7 @@ static int __i40e_set_coalesce(struct net_device *netdev, struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; + u16 intrl_reg; int i; if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq) @@ -2127,8 +2131,9 @@ static int __i40e_set_coalesce(struct net_device *netdev, return -EINVAL; } - if (ec->rx_coalesce_usecs_high >= INTRL_REG_TO_USEC(I40E_MAX_INTRL)) { - netif_info(pf, drv, netdev, "Invalid value, rx-usecs-high range is 0-235\n"); + if (ec->rx_coalesce_usecs_high > INTRL_REG_TO_USEC(I40E_MAX_INTRL)) { + netif_info(pf, drv, netdev, "Invalid value, rx-usecs-high range is 0-%lu\n", + INTRL_REG_TO_USEC(I40E_MAX_INTRL)); return -EINVAL; } @@ -2141,7 +2146,12 @@ static int __i40e_set_coalesce(struct net_device *netdev, return -EINVAL; } - vsi->int_rate_limit = ec->rx_coalesce_usecs_high; + intrl_reg = i40e_intrl_usec_to_reg(ec->rx_coalesce_usecs_high); + vsi->int_rate_limit = INTRL_REG_TO_USEC(intrl_reg); + if (vsi->int_rate_limit != ec->rx_coalesce_usecs_high) { + netif_info(pf, drv, netdev, "Interrupt rate limit rounded down to %d\n", + vsi->int_rate_limit); + } if (ec->tx_coalesce_usecs == 0) { if (ec->use_adaptive_tx_coalesce) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index ad4cf639430e..e8a8351c8ea9 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -41,7 +41,7 @@ static const char i40e_driver_string[] = #define DRV_VERSION_MAJOR 1 #define DRV_VERSION_MINOR 6 -#define DRV_VERSION_BUILD 25 +#define DRV_VERSION_BUILD 27 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) DRV_KERN @@ -77,7 +77,6 @@ static const struct pci_device_id i40e_pci_tbl[] = { {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0}, - {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0}, @@ -409,15 +408,11 @@ struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi) * Returns the address of the device statistics structure. * The statistics are actually updated from the service task. **/ -#ifdef I40E_FCOE -struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( - struct net_device *netdev, - struct rtnl_link_stats64 *stats) -#else -static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( - struct net_device *netdev, - struct rtnl_link_stats64 *stats) +#ifndef I40E_FCOE +static #endif +void i40e_get_netdev_stats_struct(struct net_device *netdev, + struct rtnl_link_stats64 *stats) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_ring *tx_ring, *rx_ring; @@ -426,10 +421,10 @@ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( int i; if (test_bit(__I40E_DOWN, &vsi->state)) - return stats; + return; if (!vsi->tx_rings) - return stats; + return; rcu_read_lock(); for (i = 0; i < vsi->num_queue_pairs; i++) { @@ -469,8 +464,6 @@ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( stats->rx_dropped = vsi_stats->rx_dropped; stats->rx_crc_errors = vsi_stats->rx_crc_errors; stats->rx_length_errors = vsi_stats->rx_length_errors; - - return stats; } /** @@ -1260,7 +1253,9 @@ static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi, struct hlist_head *tmp_del_list, int vlan_filters) { + s16 pvid = le16_to_cpu(vsi->info.pvid); struct i40e_mac_filter *f, *add_head; + struct i40e_new_mac_filter *new; struct hlist_node *h; int bkt, new_vlan; @@ -1279,13 +1274,13 @@ static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi, */ /* Update the filters about to be added in place */ - hlist_for_each_entry(f, tmp_add_list, hlist) { - if (vsi->info.pvid && f->vlan != vsi->info.pvid) - f->vlan = vsi->info.pvid; - else if (vlan_filters && f->vlan == I40E_VLAN_ANY) - f->vlan = 0; - else if (!vlan_filters && f->vlan == 0) - f->vlan = I40E_VLAN_ANY; + hlist_for_each_entry(new, tmp_add_list, hlist) { + if (pvid && new->f->vlan != pvid) + new->f->vlan = pvid; + else if (vlan_filters && new->f->vlan == I40E_VLAN_ANY) + new->f->vlan = 0; + else if (!vlan_filters && new->f->vlan == 0) + new->f->vlan = I40E_VLAN_ANY; } /* Update the remaining active filters */ @@ -1295,12 +1290,12 @@ static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi, * order to avoid duplicating code for adding the new filter * then deleting the old filter. */ - if ((vsi->info.pvid && f->vlan != vsi->info.pvid) || + if ((pvid && f->vlan != pvid) || (vlan_filters && f->vlan == I40E_VLAN_ANY) || (!vlan_filters && f->vlan == 0)) { /* Determine the new vlan we will be adding */ - if (vsi->info.pvid) - new_vlan = vsi->info.pvid; + if (pvid) + new_vlan = pvid; else if (vlan_filters) new_vlan = 0; else @@ -1311,9 +1306,16 @@ static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi, if (!add_head) return -ENOMEM; - /* Put the replacement filter into the add list */ - hash_del(&add_head->hlist); - hlist_add_head(&add_head->hlist, tmp_add_list); + /* Create a temporary i40e_new_mac_filter */ + new = kzalloc(sizeof(*new), GFP_ATOMIC); + if (!new) + return -ENOMEM; + + new->f = add_head; + new->state = add_head->state; + + /* Add the new filter to the tmp list */ + hlist_add_head(&new->hlist, tmp_add_list); /* Put the original filter into the delete list */ f->state = I40E_FILTER_REMOVE; @@ -1440,23 +1442,25 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe() * instead of list_for_each_entry(). **/ -static void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f) +void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f) { if (!f) return; + /* If the filter was never added to firmware then we can just delete it + * directly and we don't want to set the status to remove or else an + * admin queue command will unnecessarily fire. + */ if ((f->state == I40E_FILTER_FAILED) || (f->state == I40E_FILTER_NEW)) { - /* this one never got added by the FW. Just remove it, - * no need to sync anything. - */ hash_del(&f->hlist); kfree(f); } else { f->state = I40E_FILTER_REMOVE; - vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; - vsi->back->flags |= I40E_FLAG_FILTER_SYNC; } + + vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; + vsi->back->flags |= I40E_FLAG_FILTER_SYNC; } /** @@ -1483,18 +1487,19 @@ void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan) } /** - * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans + * i40e_add_mac_filter - Add a MAC filter for all active VLANs * @vsi: the VSI to be searched * @macaddr: the mac address to be filtered * - * Goes through all the macvlan filters and adds a macvlan filter for each + * If we're not in VLAN mode, just add the filter to I40E_VLAN_ANY. Otherwise, + * go through all the macvlan filters and add a macvlan filter for each * unique vlan that already exists. If a PVID has been assigned, instead only * add the macaddr to that VLAN. * * Returns last filter added on success, else NULL **/ -struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, - const u8 *macaddr) +struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi, + const u8 *macaddr) { struct i40e_mac_filter *f, *add = NULL; struct hlist_node *h; @@ -1504,6 +1509,9 @@ struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, return i40e_add_filter(vsi, macaddr, le16_to_cpu(vsi->info.pvid)); + if (!i40e_is_vsi_in_vlan(vsi)) + return i40e_add_filter(vsi, macaddr, I40E_VLAN_ANY); + hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { if (f->state == I40E_FILTER_REMOVE) continue; @@ -1516,15 +1524,16 @@ struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, } /** - * i40e_del_mac_all_vlan - Remove a MAC filter from all VLANS + * i40e_del_mac_filter - Remove a MAC filter from all VLANs * @vsi: the VSI to be searched * @macaddr: the mac address to be removed * - * Removes a given MAC address from a VSI, regardless of VLAN + * Removes a given MAC address from a VSI regardless of what VLAN it has been + * associated with. * * Returns 0 for success, or error **/ -int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, const u8 *macaddr) +int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr) { struct i40e_mac_filter *f; struct hlist_node *h; @@ -1585,8 +1594,8 @@ static int i40e_set_mac(struct net_device *netdev, void *p) netdev_info(netdev, "set new mac address %pM\n", addr->sa_data); spin_lock_bh(&vsi->mac_filter_hash_lock); - i40e_del_mac_all_vlan(vsi, netdev->dev_addr); - i40e_put_mac_in_vlan(vsi, addr->sa_data); + i40e_del_mac_filter(vsi, netdev->dev_addr); + i40e_add_mac_filter(vsi, addr->sa_data); spin_unlock_bh(&vsi->mac_filter_hash_lock); ether_addr_copy(netdev->dev_addr, addr->sa_data); if (vsi->type == I40E_VSI_MAIN) { @@ -1762,14 +1771,8 @@ static int i40e_addr_sync(struct net_device *netdev, const u8 *addr) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; - struct i40e_mac_filter *f; - - if (i40e_is_vsi_in_vlan(vsi)) - f = i40e_put_mac_in_vlan(vsi, addr); - else - f = i40e_add_filter(vsi, addr, I40E_VLAN_ANY); - if (f) + if (i40e_add_mac_filter(vsi, addr)) return 0; else return -ENOMEM; @@ -1788,10 +1791,7 @@ static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr) struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; - if (i40e_is_vsi_in_vlan(vsi)) - i40e_del_mac_all_vlan(vsi, addr); - else - i40e_del_filter(vsi, addr, I40E_VLAN_ANY); + i40e_del_mac_filter(vsi, addr); return 0; } @@ -1829,16 +1829,15 @@ static void i40e_set_rx_mode(struct net_device *netdev) } /** - * i40e_undo_filter_entries - Undo the changes made to MAC filter entries + * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries * @vsi: Pointer to VSI struct * @from: Pointer to list which contains MAC filter entries - changes to * those entries needs to be undone. * - * MAC filter entries from list were slated to be sent to firmware, either for - * addition or deletion. + * MAC filter entries from this list were slated for deletion. **/ -static void i40e_undo_filter_entries(struct i40e_vsi *vsi, - struct hlist_head *from) +static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi, + struct hlist_head *from) { struct i40e_mac_filter *f; struct hlist_node *h; @@ -1853,6 +1852,53 @@ static void i40e_undo_filter_entries(struct i40e_vsi *vsi, } /** + * i40e_undo_add_filter_entries - Undo the changes made to MAC filter entries + * @vsi: Pointer to vsi struct + * @from: Pointer to list which contains MAC filter entries - changes to + * those entries needs to be undone. + * + * MAC filter entries from this list were slated for addition. + **/ +static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi, + struct hlist_head *from) +{ + struct i40e_new_mac_filter *new; + struct hlist_node *h; + + hlist_for_each_entry_safe(new, h, from, hlist) { + /* We can simply free the wrapper structure */ + hlist_del(&new->hlist); + kfree(new); + } +} + +/** + * i40e_next_entry - Get the next non-broadcast filter from a list + * @next: pointer to filter in list + * + * Returns the next non-broadcast filter in the list. Required so that we + * ignore broadcast filters within the list, since these are not handled via + * the normal firmware update path. + */ +static +struct i40e_new_mac_filter *i40e_next_filter(struct i40e_new_mac_filter *next) +{ + while (next) { + next = hlist_entry(next->hlist.next, + typeof(struct i40e_new_mac_filter), + hlist); + + /* keep going if we found a broadcast filter */ + if (next && is_broadcast_ether_addr(next->f->macaddr)) + continue; + + break; + } + + return next; +} + +/** * i40e_update_filter_state - Update filter state based on return data * from firmware * @count: Number of filters added @@ -1865,7 +1911,7 @@ static void i40e_undo_filter_entries(struct i40e_vsi *vsi, static int i40e_update_filter_state(int count, struct i40e_aqc_add_macvlan_element_data *add_list, - struct i40e_mac_filter *add_head) + struct i40e_new_mac_filter *add_head) { int retval = 0; int i; @@ -1884,9 +1930,9 @@ i40e_update_filter_state(int count, retval++; } - add_head = hlist_entry(add_head->hlist.next, - typeof(struct i40e_mac_filter), - hlist); + add_head = i40e_next_filter(add_head); + if (!add_head) + break; } return retval; @@ -1943,7 +1989,7 @@ void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name, static void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name, struct i40e_aqc_add_macvlan_element_data *list, - struct i40e_mac_filter *add_head, + struct i40e_new_mac_filter *add_head, int num_add, bool *promisc_changed) { struct i40e_hw *hw = &vsi->back->hw; @@ -1971,10 +2017,12 @@ void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name, * This function sets or clears the promiscuous broadcast flags for VLAN * filters in order to properly receive broadcast frames. Assumes that only * broadcast filters are passed. + * + * Returns status indicating success or failure; **/ -static -void i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name, - struct i40e_mac_filter *f) +static i40e_status +i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name, + struct i40e_mac_filter *f) { bool enable = f->state == I40E_FILTER_NEW; struct i40e_hw *hw = &vsi->back->hw; @@ -1993,15 +2041,13 @@ void i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name, NULL); } - if (aq_ret) { + if (aq_ret) dev_warn(&vsi->back->pdev->dev, "Error %s setting broadcast promiscuous mode on %s\n", i40e_aq_str(hw, hw->aq.asq_last_status), vsi_name); - f->state = I40E_FILTER_FAILED; - } else if (enable) { - f->state = I40E_FILTER_ACTIVE; - } + + return aq_ret; } /** @@ -2015,7 +2061,8 @@ void i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name, int i40e_sync_vsi_filters(struct i40e_vsi *vsi) { struct hlist_head tmp_add_list, tmp_del_list; - struct i40e_mac_filter *f, *add_head = NULL; + struct i40e_mac_filter *f; + struct i40e_new_mac_filter *new, *add_head = NULL; struct i40e_hw *hw = &vsi->back->hw; unsigned int failed_filters = 0; unsigned int vlan_filters = 0; @@ -2069,8 +2116,17 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) continue; } if (f->state == I40E_FILTER_NEW) { - hash_del(&f->hlist); - hlist_add_head(&f->hlist, &tmp_add_list); + /* Create a temporary i40e_new_mac_filter */ + new = kzalloc(sizeof(*new), GFP_ATOMIC); + if (!new) + goto err_no_memory_locked; + + /* Store pointer to the real filter */ + new->f = f; + new->state = f->state; + + /* Add it to the hash list */ + hlist_add_head(&new->hlist, &tmp_add_list); } /* Count the number of active (current and new) VLAN @@ -2105,7 +2161,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) cmd_flags = 0; /* handle broadcast filters by updating the broadcast - * promiscuous flag instead of deleting a MAC filter. + * promiscuous flag and release filter list. */ if (is_broadcast_ether_addr(f->macaddr)) { i40e_aqc_broadcast_filter(vsi, vsi_name, f); @@ -2163,36 +2219,37 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) goto err_no_memory; num_add = 0; - hlist_for_each_entry_safe(f, h, &tmp_add_list, hlist) { + hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) { if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state)) { - f->state = I40E_FILTER_FAILED; + new->state = I40E_FILTER_FAILED; continue; } /* handle broadcast filters by updating the broadcast * promiscuous flag instead of adding a MAC filter. */ - if (is_broadcast_ether_addr(f->macaddr)) { - u64 key = i40e_addr_to_hkey(f->macaddr); - i40e_aqc_broadcast_filter(vsi, vsi_name, f); - - hlist_del(&f->hlist); - hash_add(vsi->mac_filter_hash, &f->hlist, key); + if (is_broadcast_ether_addr(new->f->macaddr)) { + if (i40e_aqc_broadcast_filter(vsi, vsi_name, + new->f)) + new->state = I40E_FILTER_FAILED; + else + new->state = I40E_FILTER_ACTIVE; continue; } /* add to add array */ if (num_add == 0) - add_head = f; + add_head = new; cmd_flags = 0; - ether_addr_copy(add_list[num_add].mac_addr, f->macaddr); - if (f->vlan == I40E_VLAN_ANY) { + ether_addr_copy(add_list[num_add].mac_addr, + new->f->macaddr); + if (new->f->vlan == I40E_VLAN_ANY) { add_list[num_add].vlan_tag = 0; cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN; } else { add_list[num_add].vlan_tag = - cpu_to_le16((u16)(f->vlan)); + cpu_to_le16((u16)(new->f->vlan)); } add_list[num_add].queue_number = 0; /* set invalid match method for later detection */ @@ -2218,11 +2275,12 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) * the VSI's list. */ spin_lock_bh(&vsi->mac_filter_hash_lock); - hlist_for_each_entry_safe(f, h, &tmp_add_list, hlist) { - u64 key = i40e_addr_to_hkey(f->macaddr); - - hlist_del(&f->hlist); - hash_add(vsi->mac_filter_hash, &f->hlist, key); + hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) { + /* Only update the state if we're still NEW */ + if (new->f->state == I40E_FILTER_NEW) + new->f->state = new->state; + hlist_del(&new->hlist); + kfree(new); } spin_unlock_bh(&vsi->mac_filter_hash_lock); kfree(add_list); @@ -2383,8 +2441,8 @@ err_no_memory: /* Restore elements on the temporary add and delete lists */ spin_lock_bh(&vsi->mac_filter_hash_lock); err_no_memory_locked: - i40e_undo_filter_entries(vsi, &tmp_del_list); - i40e_undo_filter_entries(vsi, &tmp_add_list); + i40e_undo_del_filter_entries(vsi, &tmp_del_list); + i40e_undo_add_filter_entries(vsi, &tmp_add_list); spin_unlock_bh(&vsi->mac_filter_hash_lock); vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; @@ -2574,12 +2632,15 @@ int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid) /** * i40e_vsi_add_vlan - Add VSI membership for given VLAN * @vsi: the VSI being configured - * @vid: VLAN id to be added (0 = untagged only , -1 = any) + * @vid: VLAN id to be added **/ -int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid) +int i40e_vsi_add_vlan(struct i40e_vsi *vsi, u16 vid) { int err; + if (!vid || vsi->info.pvid) + return -EINVAL; + /* Locked once because all functions invoked below iterates list*/ spin_lock_bh(&vsi->mac_filter_hash_lock); err = i40e_add_vlan_all_mac(vsi, vid); @@ -2622,10 +2683,13 @@ void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid) /** * i40e_vsi_kill_vlan - Remove VSI membership for given VLAN * @vsi: the VSI being configured - * @vid: VLAN id to be removed (0 = untagged only , -1 = any) + * @vid: VLAN id to be removed **/ -void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid) +void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid) { + if (!vid || vsi->info.pvid) + return; + spin_lock_bh(&vsi->mac_filter_hash_lock); i40e_rm_vlan_all_mac(vsi, vid); spin_unlock_bh(&vsi->mac_filter_hash_lock); @@ -3272,7 +3336,7 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi) wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1), q_vector->tx.itr); wr32(hw, I40E_PFINT_RATEN(vector - 1), - INTRL_USEC_TO_REG(vsi->int_rate_limit)); + i40e_intrl_usec_to_reg(vsi->int_rate_limit)); /* Linked list for the queuepairs assigned to this vector */ wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp); @@ -4621,8 +4685,10 @@ static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi) */ if ((!tx_pending_hw) && i40e_get_tx_pending(tx_ring, true) && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))) { + local_bh_disable(); if (napi_reschedule(&tx_ring->q_vector->napi)) tx_ring->tx_stats.tx_lost_interrupt++; + local_bh_enable(); } } @@ -5276,6 +5342,8 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) enum i40e_aq_link_speed new_speed; char *speed = "Unknown"; char *fc = "Unknown"; + char *fec = ""; + char *an = ""; new_speed = vsi->back->hw.phy.link_info.link_speed; @@ -5335,8 +5403,23 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) break; } - netdev_info(vsi->netdev, "NIC Link is Up %sbps Full Duplex, Flow Control: %s\n", - speed, fc); + if (vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) { + fec = ", FEC: None"; + an = ", Autoneg: False"; + + if (vsi->back->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED) + an = ", Autoneg: True"; + + if (vsi->back->hw.phy.link_info.fec_info & + I40E_AQ_CONFIG_FEC_KR_ENA) + fec = ", FEC: CL74 FC-FEC/BASE-R"; + else if (vsi->back->hw.phy.link_info.fec_info & + I40E_AQ_CONFIG_FEC_RS_ENA) + fec = ", FEC: CL108 RS-FEC"; + } + + netdev_info(vsi->netdev, "NIC Link is Up, %sbps Full Duplex%s%s, Flow Control: %s\n", + speed, fec, an, fc); } /** @@ -6271,7 +6354,16 @@ static void i40e_link_event(struct i40e_pf *pf) old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP); status = i40e_get_link_status(&pf->hw, &new_link); - if (status) { + + /* On success, disable temp link polling */ + if (status == I40E_SUCCESS) { + if (pf->flags & I40E_FLAG_TEMP_LINK_POLLING) + pf->flags &= ~I40E_FLAG_TEMP_LINK_POLLING; + } else { + /* Enable link polling temporarily until i40e_get_link_status + * returns I40E_SUCCESS + */ + pf->flags |= I40E_FLAG_TEMP_LINK_POLLING; dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n", status); return; @@ -6323,7 +6415,8 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf) return; pf->service_timer_previous = jiffies; - if (pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) + if ((pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) || + (pf->flags & I40E_FLAG_TEMP_LINK_POLLING)) i40e_link_event(pf); /* Update the stats for active netdevs so the network stack @@ -8688,7 +8781,7 @@ static int i40e_sw_init(struct i40e_pf *pf) pf->hw.func_caps.fd_filters_best_effort; } - if (i40e_is_mac_710(&pf->hw) && + if ((pf->hw.mac.type == I40E_MAC_XL710) && (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || (pf->hw.aq.fw_maj_ver < 4))) { pf->flags |= I40E_FLAG_RESTART_AUTONEG; @@ -8697,13 +8790,13 @@ static int i40e_sw_init(struct i40e_pf *pf) } /* Disable FW LLDP if FW < v4.3 */ - if (i40e_is_mac_710(&pf->hw) && + if ((pf->hw.mac.type == I40E_MAC_XL710) && (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) || (pf->hw.aq.fw_maj_ver < 4))) pf->flags |= I40E_FLAG_STOP_FW_LLDP; /* Use the FW Set LLDP MIB API if FW > v4.40 */ - if (i40e_is_mac_710(&pf->hw) && + if ((pf->hw.mac.type == I40E_MAC_XL710) && (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) || (pf->hw.aq.fw_maj_ver >= 5))) pf->flags |= I40E_FLAG_USE_SET_LLDP_MIB; @@ -8734,16 +8827,17 @@ static int i40e_sw_init(struct i40e_pf *pf) } #endif /* CONFIG_PCI_IOV */ if (pf->hw.mac.type == I40E_MAC_X722) { - pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE | - I40E_FLAG_128_QP_RSS_CAPABLE | - I40E_FLAG_HW_ATR_EVICT_CAPABLE | - I40E_FLAG_OUTER_UDP_CSUM_CAPABLE | - I40E_FLAG_WB_ON_ITR_CAPABLE | - I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE | - I40E_FLAG_NO_PCI_LINK_CHECK | - I40E_FLAG_USE_SET_LLDP_MIB | - I40E_FLAG_GENEVE_OFFLOAD_CAPABLE | - I40E_FLAG_PTP_L4_CAPABLE; + pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE + | I40E_FLAG_128_QP_RSS_CAPABLE + | I40E_FLAG_HW_ATR_EVICT_CAPABLE + | I40E_FLAG_OUTER_UDP_CSUM_CAPABLE + | I40E_FLAG_WB_ON_ITR_CAPABLE + | I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE + | I40E_FLAG_NO_PCI_LINK_CHECK + | I40E_FLAG_USE_SET_LLDP_MIB + | I40E_FLAG_GENEVE_OFFLOAD_CAPABLE + | I40E_FLAG_PTP_L4_CAPABLE + | I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE; } else if ((pf->hw.aq.api_maj_ver > 1) || ((pf->hw.aq.api_maj_ver == 1) && (pf->hw.aq.api_min_ver > 4))) { @@ -9345,7 +9439,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) */ i40e_rm_default_mac_filter(vsi, mac_addr); spin_lock_bh(&vsi->mac_filter_hash_lock); - i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY); + i40e_add_mac_filter(vsi, mac_addr); spin_unlock_bh(&vsi->mac_filter_hash_lock); } else { /* relate the VSI_VMDQ name to the VSI_MAIN name */ @@ -9354,7 +9448,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) random_ether_addr(mac_addr); spin_lock_bh(&vsi->mac_filter_hash_lock); - i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY); + i40e_add_mac_filter(vsi, mac_addr); spin_unlock_bh(&vsi->mac_filter_hash_lock); } @@ -9373,7 +9467,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) */ eth_broadcast_addr(broadcast); spin_lock_bh(&vsi->mac_filter_hash_lock); - i40e_add_filter(vsi, broadcast, I40E_VLAN_ANY); + i40e_add_mac_filter(vsi, broadcast); spin_unlock_bh(&vsi->mac_filter_hash_lock); ether_addr_copy(netdev->dev_addr, mac_addr); @@ -10679,7 +10773,6 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit) i40e_pf_config_rss(pf); /* fill in link information and enable LSE reporting */ - i40e_update_link_info(&pf->hw); i40e_link_event(pf); /* Initialize user-specific link properties */ @@ -10994,6 +11087,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw->subsystem_device_id = pdev->subsystem_device; hw->bus.device = PCI_SLOT(pdev->devfn); hw->bus.func = PCI_FUNC(pdev->devfn); + hw->bus.bus_id = pdev->bus->number; pf->instance = pfs_found; /* set up the locks for the AQ, do this only once in probe @@ -11659,6 +11753,53 @@ static void i40e_pci_error_resume(struct pci_dev *pdev) } /** + * i40e_enable_mc_magic_wake - enable multicast magic packet wake up + * using the mac_address_write admin q function + * @pf: pointer to i40e_pf struct + **/ +static void i40e_enable_mc_magic_wake(struct i40e_pf *pf) +{ + struct i40e_hw *hw = &pf->hw; + i40e_status ret; + u8 mac_addr[6]; + u16 flags = 0; + + /* Get current MAC address in case it's an LAA */ + if (pf->vsi[pf->lan_vsi] && pf->vsi[pf->lan_vsi]->netdev) { + ether_addr_copy(mac_addr, + pf->vsi[pf->lan_vsi]->netdev->dev_addr); + } else { + dev_err(&pf->pdev->dev, + "Failed to retrieve MAC address; using default\n"); + ether_addr_copy(mac_addr, hw->mac.addr); + } + + /* The FW expects the mac address write cmd to first be called with + * one of these flags before calling it again with the multicast + * enable flags. + */ + flags = I40E_AQC_WRITE_TYPE_LAA_WOL; + + if (hw->func_caps.flex10_enable && hw->partition_id != 1) + flags = I40E_AQC_WRITE_TYPE_LAA_ONLY; + + ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL); + if (ret) { + dev_err(&pf->pdev->dev, + "Failed to update MAC address registers; cannot enable Multicast Magic packet wake up"); + return; + } + + flags = I40E_AQC_MC_MAG_EN + | I40E_AQC_WOL_PRESERVE_ON_PFR + | I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG; + ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL); + if (ret) + dev_err(&pf->pdev->dev, + "Failed to enable Multicast Magic Packet wake up\n"); +} + +/** * i40e_shutdown - PCI callback for shutting down * @pdev: PCI device information struct **/ @@ -11680,6 +11821,9 @@ static void i40e_shutdown(struct pci_dev *pdev) cancel_work_sync(&pf->service_task); i40e_fdir_teardown(pf); + if (pf->wol_en && (pf->flags & I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE)) + i40e_enable_mc_magic_wake(pf); + rtnl_lock(); i40e_prep_for_reset(pf); rtnl_unlock(); @@ -11711,6 +11855,9 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state) set_bit(__I40E_SUSPENDED, &pf->state); set_bit(__I40E_DOWN, &pf->state); + if (pf->wol_en && (pf->flags & I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE)) + i40e_enable_mc_magic_wake(pf); + rtnl_lock(); i40e_prep_for_reset(pf); rtnl_unlock(); diff --git a/drivers/net/ethernet/intel/i40e/i40e_osdep.h b/drivers/net/ethernet/intel/i40e/i40e_osdep.h index 5b6feb7edeb1..fea81ed065db 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_osdep.h +++ b/drivers/net/ethernet/intel/i40e/i40e_osdep.h @@ -55,7 +55,7 @@ struct i40e_dma_mem { void *va; dma_addr_t pa; u32 size; -} __packed; +}; #define i40e_allocate_dma_mem(h, m, unused, s, a) \ i40e_allocate_dma_mem_d(h, m, s, a) @@ -64,17 +64,17 @@ struct i40e_dma_mem { struct i40e_virt_mem { void *va; u32 size; -} __packed; +}; #define i40e_allocate_virt_mem(h, m, s) i40e_allocate_virt_mem_d(h, m, s) #define i40e_free_virt_mem(h, m) i40e_free_virt_mem_d(h, m) -#define i40e_debug(h, m, s, ...) \ -do { \ - if (((m) & (h)->debug_mask)) \ - pr_info("i40e %02x.%x " s, \ - (h)->bus.device, (h)->bus.func, \ - ##__VA_ARGS__); \ +#define i40e_debug(h, m, s, ...) \ +do { \ + if (((m) & (h)->debug_mask)) \ + pr_info("i40e %02x:%02x.%x " s, \ + (h)->bus.bus_id, (h)->bus.device, \ + (h)->bus.func, ##__VA_ARGS__); \ } while (0) typedef enum i40e_status_code i40e_status; diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index 9e49ffafce28..2caee35528fa 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -280,7 +280,7 @@ void i40e_ptp_rx_hang(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; - int i; + unsigned int i, cleared = 0; /* Since we cannot turn off the Rx timestamp logic if the device is * configured for Tx timestamping, we check if Rx timestamping is @@ -306,14 +306,25 @@ void i40e_ptp_rx_hang(struct i40e_vsi *vsi) time_is_before_jiffies(pf->latch_events[i] + HZ)) { rd32(hw, I40E_PRTTSYN_RXTIME_H(i)); pf->latch_event_flags &= ~BIT(i); - pf->rx_hwtstamp_cleared++; - dev_warn(&pf->pdev->dev, - "Clearing a missed Rx timestamp event for RXTIME[%d]\n", - i); + cleared++; } } spin_unlock_bh(&pf->ptp_rx_lock); + + /* Log a warning if more than 2 timestamps got dropped in the same + * check. We don't want to warn about all drops because it can occur + * in normal scenarios such as PTP frames on multicast addresses we + * aren't listening to. However, administrator should know if this is + * the reason packets aren't receiving timestamps. + */ + if (cleared > 2) + dev_dbg(&pf->pdev->dev, + "Dropped %d missed RXTIME timestamp events\n", + cleared); + + /* Finally, update the rx_hwtstamp_cleared counter */ + pf->rx_hwtstamp_cleared += cleared; } /** diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 352cf7cd2ef4..97d46058d71d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -432,7 +432,12 @@ unsupported_flow: ret = -EINVAL; } - /* The buffer allocated here is freed by the i40e_clean_tx_ring() */ + /* The buffer allocated here will be normally be freed by + * i40e_clean_fdir_tx_irq() as it reclaims resources after transmit + * completion. In the event of an error adding the buffer to the FDIR + * ring, it will immediately be freed. It may also be freed by + * i40e_clean_tx_ring() when closing the VSI. + */ return ret; } @@ -1013,14 +1018,15 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring) if (!rx_ring->rx_bi) return; + if (rx_ring->skb) { + dev_kfree_skb(rx_ring->skb); + rx_ring->skb = NULL; + } + /* Free all the Rx ring sk_buffs */ for (i = 0; i < rx_ring->count; i++) { struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i]; - if (rx_bi->skb) { - dev_kfree_skb(rx_bi->skb); - rx_bi->skb = NULL; - } if (!rx_bi->page) continue; @@ -1425,45 +1431,6 @@ void i40e_process_skb_fields(struct i40e_ring *rx_ring, } /** - * i40e_pull_tail - i40e specific version of skb_pull_tail - * @rx_ring: rx descriptor ring packet is being transacted on - * @skb: pointer to current skb being adjusted - * - * This function is an i40e specific version of __pskb_pull_tail. The - * main difference between this version and the original function is that - * this function can make several assumptions about the state of things - * that allow for significant optimizations versus the standard function. - * As a result we can do things like drop a frag and maintain an accurate - * truesize for the skb. - */ -static void i40e_pull_tail(struct i40e_ring *rx_ring, struct sk_buff *skb) -{ - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; - unsigned char *va; - unsigned int pull_len; - - /* it is valid to use page_address instead of kmap since we are - * working with pages allocated out of the lomem pool per - * alloc_page(GFP_ATOMIC) - */ - va = skb_frag_address(frag); - - /* we need the header to contain the greater of either ETH_HLEN or - * 60 bytes if the skb->len is less than 60 for skb_pad. - */ - pull_len = eth_get_headlen(va, I40E_RX_HDR_SIZE); - - /* align pull length to size of long to optimize memcpy performance */ - skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); - - /* update all of the pointers */ - skb_frag_size_sub(frag, pull_len); - frag->page_offset += pull_len; - skb->data_len -= pull_len; - skb->tail += pull_len; -} - -/** * i40e_cleanup_headers - Correct empty headers * @rx_ring: rx descriptor ring packet is being transacted on * @skb: pointer to current skb being fixed @@ -1478,10 +1445,6 @@ static void i40e_pull_tail(struct i40e_ring *rx_ring, struct sk_buff *skb) **/ static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb) { - /* place header in linear portion of buffer */ - if (skb_is_nonlinear(skb)) - i40e_pull_tail(rx_ring, skb); - /* if eth_skb_pad returns an error the skb was freed */ if (eth_skb_pad(skb)) return true; @@ -1513,19 +1476,85 @@ static void i40e_reuse_rx_page(struct i40e_ring *rx_ring, } /** - * i40e_page_is_reserved - check if reuse is possible + * i40e_page_is_reusable - check if any reuse is possible * @page: page struct to check + * + * A page is not reusable if it was allocated under low memory + * conditions, or it's not in the same NUMA node as this CPU. */ -static inline bool i40e_page_is_reserved(struct page *page) +static inline bool i40e_page_is_reusable(struct page *page) { - return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); + return (page_to_nid(page) == numa_mem_id()) && + !page_is_pfmemalloc(page); +} + +/** + * i40e_can_reuse_rx_page - Determine if this page can be reused by + * the adapter for another receive + * + * @rx_buffer: buffer containing the page + * @page: page address from rx_buffer + * @truesize: actual size of the buffer in this page + * + * If page is reusable, rx_buffer->page_offset is adjusted to point to + * an unused region in the page. + * + * For small pages, @truesize will be a constant value, half the size + * of the memory at page. We'll attempt to alternate between high and + * low halves of the page, with one half ready for use by the hardware + * and the other half being consumed by the stack. We use the page + * ref count to determine whether the stack has finished consuming the + * portion of this page that was passed up with a previous packet. If + * the page ref count is >1, we'll assume the "other" half page is + * still busy, and this page cannot be reused. + * + * For larger pages, @truesize will be the actual space used by the + * received packet (adjusted upward to an even multiple of the cache + * line size). This will advance through the page by the amount + * actually consumed by the received packets while there is still + * space for a buffer. Each region of larger pages will be used at + * most once, after which the page will not be reused. + * + * In either case, if the page is reusable its refcount is increased. + **/ +static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer, + struct page *page, + const unsigned int truesize) +{ +#if (PAGE_SIZE >= 8192) + unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048; +#endif + + /* Is any reuse possible? */ + if (unlikely(!i40e_page_is_reusable(page))) + return false; + +#if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ + if (unlikely(page_count(page) != 1)) + return false; + + /* flip page offset to other buffer */ + rx_buffer->page_offset ^= truesize; +#else + /* move offset up to the next cache line */ + rx_buffer->page_offset += truesize; + + if (rx_buffer->page_offset > last_offset) + return false; +#endif + + /* Inc ref count on page before passing it up to the stack */ + get_page(page); + + return true; } /** * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff * @rx_ring: rx descriptor ring to transact packets on * @rx_buffer: buffer containing page to add - * @rx_desc: descriptor containing length of buffer written by hardware + * @size: packet length from rx_desc * @skb: sk_buff to place the data into * * This function will add the data contained in rx_buffer->page to the skb. @@ -1538,30 +1567,29 @@ static inline bool i40e_page_is_reserved(struct page *page) **/ static bool i40e_add_rx_frag(struct i40e_ring *rx_ring, struct i40e_rx_buffer *rx_buffer, - union i40e_rx_desc *rx_desc, + unsigned int size, struct sk_buff *skb) { struct page *page = rx_buffer->page; - u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); - unsigned int size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> - I40E_RXD_QW1_LENGTH_PBUF_SHIFT; + unsigned char *va = page_address(page) + rx_buffer->page_offset; #if (PAGE_SIZE < 8192) unsigned int truesize = I40E_RXBUFFER_2048; #else unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); - unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048; #endif + unsigned int pull_len; + + if (unlikely(skb_is_nonlinear(skb))) + goto add_tail_frag; /* will the data fit in the skb we allocated? if so, just * copy it as it is pretty small anyway */ - if ((size <= I40E_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) { - unsigned char *va = page_address(page) + rx_buffer->page_offset; - + if (size <= I40E_RX_HDR_SIZE) { memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); - /* page is not reserved, we can reuse buffer as-is */ - if (likely(!i40e_page_is_reserved(page))) + /* page is reusable, we can reuse buffer as-is */ + if (likely(i40e_page_is_reusable(page))) return true; /* this page cannot be reused so discard it */ @@ -1569,34 +1597,26 @@ static bool i40e_add_rx_frag(struct i40e_ring *rx_ring, return false; } - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, - rx_buffer->page_offset, size, truesize); - - /* avoid re-using remote pages */ - if (unlikely(i40e_page_is_reserved(page))) - return false; - -#if (PAGE_SIZE < 8192) - /* if we are only owner of page we can reuse it */ - if (unlikely(page_count(page) != 1)) - return false; + /* we need the header to contain the greater of either + * ETH_HLEN or 60 bytes if the skb->len is less than + * 60 for skb_pad. + */ + pull_len = eth_get_headlen(va, I40E_RX_HDR_SIZE); - /* flip page offset to other buffer */ - rx_buffer->page_offset ^= truesize; -#else - /* move offset up to the next cache line */ - rx_buffer->page_offset += truesize; + /* align pull length to size of long to optimize + * memcpy performance + */ + memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long))); - if (rx_buffer->page_offset > last_offset) - return false; -#endif + /* update all of the pointers */ + va += pull_len; + size -= pull_len; - /* Even if we own the page, we are not allowed to use atomic_set() - * This would break get_page_unless_zero() users. - */ - get_page(rx_buffer->page); +add_tail_frag: + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + (unsigned long)va & ~PAGE_MASK, size, truesize); - return true; + return i40e_can_reuse_rx_page(rx_buffer, page, truesize); } /** @@ -1611,18 +1631,21 @@ static bool i40e_add_rx_frag(struct i40e_ring *rx_ring, */ static inline struct sk_buff *i40e_fetch_rx_buffer(struct i40e_ring *rx_ring, - union i40e_rx_desc *rx_desc) + union i40e_rx_desc *rx_desc, + struct sk_buff *skb) { + u64 local_status_error_len = + le64_to_cpu(rx_desc->wb.qword1.status_error_len); + unsigned int size = + (local_status_error_len & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> + I40E_RXD_QW1_LENGTH_PBUF_SHIFT; struct i40e_rx_buffer *rx_buffer; - struct sk_buff *skb; struct page *page; rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean]; page = rx_buffer->page; prefetchw(page); - skb = rx_buffer->skb; - if (likely(!skb)) { void *page_addr = page_address(page) + rx_buffer->page_offset; @@ -1646,19 +1669,17 @@ struct sk_buff *i40e_fetch_rx_buffer(struct i40e_ring *rx_ring, * it now to avoid a possible cache miss */ prefetchw(skb->data); - } else { - rx_buffer->skb = NULL; } /* we are reusing so sync this buffer for CPU use */ dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma, rx_buffer->page_offset, - I40E_RXBUFFER_2048, + size, DMA_FROM_DEVICE); /* pull page into skb */ - if (i40e_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) { + if (i40e_add_rx_frag(rx_ring, rx_buffer, size, skb)) { /* hand second half of page back to the ring */ i40e_reuse_rx_page(rx_ring, rx_buffer); rx_ring->rx_stats.page_reuse_count++; @@ -1700,7 +1721,6 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring, #define staterrlen rx_desc->wb.qword1.status_error_len if (unlikely(i40e_rx_is_programming_status(le64_to_cpu(staterrlen)))) { i40e_clean_programming_status(rx_ring, rx_desc); - rx_ring->rx_bi[ntc].skb = skb; return true; } /* if we are the last buffer then there is nothing else to do */ @@ -1708,8 +1728,6 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring, if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF))) return false; - /* place skb in next buffer to be received */ - rx_ring->rx_bi[ntc].skb = skb; rx_ring->rx_stats.non_eop_descs++; return true; @@ -1730,12 +1748,12 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring, static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) { unsigned int total_rx_bytes = 0, total_rx_packets = 0; + struct sk_buff *skb = rx_ring->skb; u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); bool failure = false; while (likely(total_rx_packets < budget)) { union i40e_rx_desc *rx_desc; - struct sk_buff *skb; u16 vlan_tag; u8 rx_ptype; u64 qword; @@ -1764,7 +1782,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) */ dma_rmb(); - skb = i40e_fetch_rx_buffer(rx_ring, rx_desc); + skb = i40e_fetch_rx_buffer(rx_ring, rx_desc, skb); if (!skb) break; @@ -1783,8 +1801,10 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) continue; } - if (i40e_cleanup_headers(rx_ring, skb)) + if (i40e_cleanup_headers(rx_ring, skb)) { + skb = NULL; continue; + } /* probably a little skewed due to removing CRC */ total_rx_bytes += skb->len; @@ -1809,11 +1829,14 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0; i40e_receive_skb(rx_ring, skb, vlan_tag); + skb = NULL; /* update budget accounting */ total_rx_packets++; } + rx_ring->skb = skb; + u64_stats_update_begin(&rx_ring->syncp); rx_ring->stats.packets += total_rx_packets; rx_ring->stats.bytes += total_rx_bytes; @@ -1841,14 +1864,14 @@ static u32 i40e_buildreg_itr(const int type, const u16 itr) /* a small macro to shorten up some long lines */ #define INTREG I40E_PFINT_DYN_CTLN -static inline int get_rx_itr_enabled(struct i40e_vsi *vsi, int idx) +static inline int get_rx_itr(struct i40e_vsi *vsi, int idx) { - return !!(vsi->rx_rings[idx]->rx_itr_setting); + return vsi->rx_rings[idx]->rx_itr_setting; } -static inline int get_tx_itr_enabled(struct i40e_vsi *vsi, int idx) +static inline int get_tx_itr(struct i40e_vsi *vsi, int idx) { - return !!(vsi->tx_rings[idx]->tx_itr_setting); + return vsi->tx_rings[idx]->tx_itr_setting; } /** @@ -1874,8 +1897,8 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, */ rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0); - rx_itr_setting = get_rx_itr_enabled(vsi, idx); - tx_itr_setting = get_tx_itr_enabled(vsi, idx); + rx_itr_setting = get_rx_itr(vsi, idx); + tx_itr_setting = get_tx_itr(vsi, idx); if (q_vector->itr_countdown > 0 || (!ITR_IS_DYNAMIC(rx_itr_setting) && @@ -2251,14 +2274,16 @@ out: /** * i40e_tso - set up the tso context descriptor - * @skb: ptr to the skb we're sending + * @first: pointer to first Tx buffer for xmit * @hdr_len: ptr to the size of the packet header * @cd_type_cmd_tso_mss: Quad Word 1 * * Returns 0 if no TSO can happen, 1 if tso is going, or error **/ -static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss) +static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len, + u64 *cd_type_cmd_tso_mss) { + struct sk_buff *skb = first->skb; u64 cd_cmd, cd_tso_len, cd_mss; union { struct iphdr *v4; @@ -2271,6 +2296,7 @@ static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss) unsigned char *hdr; } l4; u32 paylen, l4_offset; + u16 gso_segs, gso_size; int err; if (skb->ip_summed != CHECKSUM_PARTIAL) @@ -2309,7 +2335,8 @@ static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss) /* remove payload length from outer checksum */ paylen = skb->len - l4_offset; - csum_replace_by_diff(&l4.udp->check, htonl(paylen)); + csum_replace_by_diff(&l4.udp->check, + (__force __wsum)htonl(paylen)); } /* reset pointers to inner headers */ @@ -2330,15 +2357,23 @@ static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss) /* remove payload length from inner checksum */ paylen = skb->len - l4_offset; - csum_replace_by_diff(&l4.tcp->check, htonl(paylen)); + csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen)); /* compute length of segmentation header */ *hdr_len = (l4.tcp->doff * 4) + l4_offset; + /* pull values out of skb_shinfo */ + gso_size = skb_shinfo(skb)->gso_size; + gso_segs = skb_shinfo(skb)->gso_segs; + + /* update GSO size and bytecount with header size */ + first->gso_segs = gso_segs; + first->bytecount += (first->gso_segs - 1) * *hdr_len; + /* find the field values */ cd_cmd = I40E_TX_CTX_DESC_TSO; cd_tso_len = skb->len - *hdr_len; - cd_mss = skb_shinfo(skb)->gso_size; + cd_mss = gso_size; *cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) | (cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) | (cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT); @@ -2699,7 +2734,6 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, u16 i = tx_ring->next_to_use; u32 td_tag = 0; dma_addr_t dma; - u16 gso_segs; u16 desc_count = 1; if (tx_flags & I40E_TX_FLAGS_HW_VLAN) { @@ -2708,15 +2742,6 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, I40E_TX_FLAGS_VLAN_SHIFT; } - if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) - gso_segs = skb_shinfo(skb)->gso_segs; - else - gso_segs = 1; - - /* multiply data chunks by size of headers */ - first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len); - first->gso_segs = gso_segs; - first->skb = skb; first->tx_flags = tx_flags; dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); @@ -2902,8 +2927,10 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, count = i40e_xmit_descriptor_count(skb); if (i40e_chk_linearize(skb, count)) { - if (__skb_linearize(skb)) - goto out_drop; + if (__skb_linearize(skb)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } count = i40e_txd_use_count(skb->len); tx_ring->tx_stats.tx_linearize++; } @@ -2919,6 +2946,12 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, return NETDEV_TX_BUSY; } + /* record the location of the first descriptor for this packet */ + first = &tx_ring->tx_bi[tx_ring->next_to_use]; + first->skb = skb; + first->bytecount = skb->len; + first->gso_segs = 1; + /* prepare the xmit flags */ if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) goto out_drop; @@ -2926,16 +2959,13 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, /* obtain protocol of skb */ protocol = vlan_get_protocol(skb); - /* record the location of the first descriptor for this packet */ - first = &tx_ring->tx_bi[tx_ring->next_to_use]; - /* setup IPv4/IPv6 offloads */ if (protocol == htons(ETH_P_IP)) tx_flags |= I40E_TX_FLAGS_IPV4; else if (protocol == htons(ETH_P_IPV6)) tx_flags |= I40E_TX_FLAGS_IPV6; - tso = i40e_tso(skb, &hdr_len, &cd_type_cmd_tso_mss); + tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss); if (tso < 0) goto out_drop; @@ -2973,7 +3003,8 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, return NETDEV_TX_OK; out_drop: - dev_kfree_skb_any(skb); + dev_kfree_skb_any(first->skb); + first->skb = NULL; return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index e065321ce8ed..f80979025c01 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -52,7 +52,20 @@ */ #define INTRL_ENA BIT(6) #define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2) -#define INTRL_USEC_TO_REG(set) ((set) ? ((set) >> 2) | INTRL_ENA : 0) +/** + * i40e_intrl_usec_to_reg - convert interrupt rate limit to register + * @intrl: interrupt rate limit to convert + * + * This function converts a decimal interrupt rate limit to the appropriate + * register format expected by the firmware when setting interrupt rate limit. + */ +static inline u16 i40e_intrl_usec_to_reg(int intrl) +{ + if (intrl >> 2) + return ((intrl >> 2) | INTRL_ENA); + else + return 0; +} #define I40E_INTRL_8K 125 /* 8000 ints/sec */ #define I40E_INTRL_62K 16 /* 62500 ints/sec */ #define I40E_INTRL_83K 12 /* 83333 ints/sec */ @@ -240,7 +253,6 @@ struct i40e_tx_buffer { }; struct i40e_rx_buffer { - struct sk_buff *skb; dma_addr_t dma; struct page *page; unsigned int page_offset; @@ -341,6 +353,14 @@ struct i40e_ring { struct rcu_head rcu; /* to avoid race on free */ u16 next_to_alloc; + struct sk_buff *skb; /* When i40e_clean_rx_ring_irq() must + * return before it sees the EOP for + * the current packet, we save that skb + * here and resume receiving this + * packet the next time + * i40e_clean_rx_ring_irq() is called + * for this ring. + */ } ____cacheline_internodealigned_in_smp; enum i40e_latency_range { diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index edc0abdf4783..939f9fdc8f85 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -125,7 +125,6 @@ enum i40e_debug_mask { */ enum i40e_mac_type { I40E_MAC_UNKNOWN = 0, - I40E_MAC_X710, I40E_MAC_XL710, I40E_MAC_VF, I40E_MAC_X722, @@ -185,6 +184,7 @@ struct i40e_link_status { enum i40e_aq_link_speed link_speed; u8 link_info; u8 an_info; + u8 fec_info; u8 ext_info; u8 loopback; /* is Link Status Event notification to SW enabled */ @@ -470,6 +470,7 @@ struct i40e_bus_info { u16 func; u16 device; u16 lan_id; + u16 bus_id; }; /* Flow control (FC) parameters */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index a6198b727e24..78460c52b7c4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -689,17 +689,15 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type) spin_lock_bh(&vsi->mac_filter_hash_lock); if (is_valid_ether_addr(vf->default_lan_addr.addr)) { - f = i40e_add_filter(vsi, vf->default_lan_addr.addr, - vf->port_vlan_id ? - vf->port_vlan_id : -1); + f = i40e_add_mac_filter(vsi, + vf->default_lan_addr.addr); if (!f) dev_info(&pf->pdev->dev, "Could not add MAC filter %pM for VF %d\n", vf->default_lan_addr.addr, vf->vf_id); } eth_broadcast_addr(broadcast); - f = i40e_add_filter(vsi, broadcast, - vf->port_vlan_id ? vf->port_vlan_id : -1); + f = i40e_add_mac_filter(vsi, broadcast); if (!f) dev_info(&pf->pdev->dev, "Could not allocate VF broadcast filter\n"); @@ -849,9 +847,7 @@ static void i40e_free_vf_res(struct i40e_vf *vf) wr32(hw, reg_idx, reg); i40e_flush(hw); } - /* reset some of the state varibles keeping - * track of the resources - */ + /* reset some of the state variables keeping track of the resources */ vf->num_queue_pairs = 0; vf->vf_states = 0; clear_bit(I40E_VF_STAT_INIT, &vf->vf_states); @@ -1942,12 +1938,8 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) struct i40e_mac_filter *f; f = i40e_find_mac(vsi, al->list[i].addr); - if (!f) { - if (i40e_is_vsi_in_vlan(vsi)) - f = i40e_put_mac_in_vlan(vsi, al->list[i].addr); - else - f = i40e_add_filter(vsi, al->list[i].addr, -1); - } + if (!f) + f = i40e_add_mac_filter(vsi, al->list[i].addr); if (!f) { dev_err(&pf->pdev->dev, @@ -2012,7 +2004,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) spin_lock_bh(&vsi->mac_filter_hash_lock); /* delete addresses from the list */ for (i = 0; i < al->num_elements; i++) - if (i40e_del_mac_all_vlan(vsi, al->list[i].addr)) { + if (i40e_del_mac_filter(vsi, al->list[i].addr)) { ret = I40E_ERR_INVALID_MAC_ADDR; spin_unlock_bh(&vsi->mac_filter_hash_lock); goto error_param; @@ -2722,14 +2714,13 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) /* delete the temporary mac address */ if (!is_zero_ether_addr(vf->default_lan_addr.addr)) - i40e_del_filter(vsi, vf->default_lan_addr.addr, - vf->port_vlan_id ? vf->port_vlan_id : -1); + i40e_del_mac_filter(vsi, vf->default_lan_addr.addr); /* Delete all the filters for this VSI - we're going to kill it * anyway. */ hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) - i40e_del_filter(vsi, f->macaddr, f->vlan); + __i40e_del_filter(vsi, f); spin_unlock_bh(&vsi->mac_filter_hash_lock); diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c index aa63b7fb993d..89dfdbca13db 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_common.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c @@ -64,7 +64,6 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw) hw->mac.type = I40E_MAC_X722; break; case I40E_DEV_ID_X722_VF: - case I40E_DEV_ID_X722_VF_HV: hw->mac.type = I40E_MAC_X722_VF; break; case I40E_DEV_ID_VF: @@ -305,7 +304,6 @@ void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, { struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc; u8 *buf = (u8 *)buffer; - u16 i = 0; if ((!(mask & hw->debug_mask)) || (desc == NULL)) return; @@ -333,12 +331,18 @@ void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, if (buf_len < len) len = buf_len; /* write the full 16-byte chunks */ - for (i = 0; i < (len - 16); i += 16) - i40e_debug(hw, mask, "\t0x%04X %16ph\n", i, buf + i); - /* write whatever's left over without overrunning the buffer */ - if (i < len) - i40e_debug(hw, mask, "\t0x%04X %*ph\n", - i, len - i, buf + i); + if (hw->debug_mask & mask) { + char prefix[20]; + + snprintf(prefix, 20, + "i40evf %02x:%02x.%x: \t0x", + hw->bus.bus_id, + hw->bus.device, + hw->bus.func); + + print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET, + 16, 1, buf, len, false); + } } } diff --git a/drivers/net/ethernet/intel/i40evf/i40e_devids.h b/drivers/net/ethernet/intel/i40evf/i40e_devids.h index 21dcaee1ad1d..d76393c95056 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_devids.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_devids.h @@ -48,7 +48,6 @@ #define I40E_DEV_ID_10G_BASE_T_X722 0x37D2 #define I40E_DEV_ID_SFP_I_X722 0x37D3 #define I40E_DEV_ID_X722_VF 0x37CD -#define I40E_DEV_ID_X722_VF_HV 0x37D9 #define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \ (d) == I40E_DEV_ID_QSFP_B || \ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index df67ef37b7f3..c91fcf43ccbc 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -501,14 +501,15 @@ void i40evf_clean_rx_ring(struct i40e_ring *rx_ring) if (!rx_ring->rx_bi) return; + if (rx_ring->skb) { + dev_kfree_skb(rx_ring->skb); + rx_ring->skb = NULL; + } + /* Free all the Rx ring sk_buffs */ for (i = 0; i < rx_ring->count; i++) { struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i]; - if (rx_bi->skb) { - dev_kfree_skb(rx_bi->skb); - rx_bi->skb = NULL; - } if (!rx_bi->page) continue; @@ -903,45 +904,6 @@ void i40evf_process_skb_fields(struct i40e_ring *rx_ring, } /** - * i40e_pull_tail - i40e specific version of skb_pull_tail - * @rx_ring: rx descriptor ring packet is being transacted on - * @skb: pointer to current skb being adjusted - * - * This function is an i40e specific version of __pskb_pull_tail. The - * main difference between this version and the original function is that - * this function can make several assumptions about the state of things - * that allow for significant optimizations versus the standard function. - * As a result we can do things like drop a frag and maintain an accurate - * truesize for the skb. - */ -static void i40e_pull_tail(struct i40e_ring *rx_ring, struct sk_buff *skb) -{ - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; - unsigned char *va; - unsigned int pull_len; - - /* it is valid to use page_address instead of kmap since we are - * working with pages allocated out of the lomem pool per - * alloc_page(GFP_ATOMIC) - */ - va = skb_frag_address(frag); - - /* we need the header to contain the greater of either ETH_HLEN or - * 60 bytes if the skb->len is less than 60 for skb_pad. - */ - pull_len = eth_get_headlen(va, I40E_RX_HDR_SIZE); - - /* align pull length to size of long to optimize memcpy performance */ - skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); - - /* update all of the pointers */ - skb_frag_size_sub(frag, pull_len); - frag->page_offset += pull_len; - skb->data_len -= pull_len; - skb->tail += pull_len; -} - -/** * i40e_cleanup_headers - Correct empty headers * @rx_ring: rx descriptor ring packet is being transacted on * @skb: pointer to current skb being fixed @@ -956,10 +918,6 @@ static void i40e_pull_tail(struct i40e_ring *rx_ring, struct sk_buff *skb) **/ static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb) { - /* place header in linear portion of buffer */ - if (skb_is_nonlinear(skb)) - i40e_pull_tail(rx_ring, skb); - /* if eth_skb_pad returns an error the skb was freed */ if (eth_skb_pad(skb)) return true; @@ -991,19 +949,85 @@ static void i40e_reuse_rx_page(struct i40e_ring *rx_ring, } /** - * i40e_page_is_reserved - check if reuse is possible + * i40e_page_is_reusable - check if any reuse is possible * @page: page struct to check + * + * A page is not reusable if it was allocated under low memory + * conditions, or it's not in the same NUMA node as this CPU. */ -static inline bool i40e_page_is_reserved(struct page *page) +static inline bool i40e_page_is_reusable(struct page *page) { - return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); + return (page_to_nid(page) == numa_mem_id()) && + !page_is_pfmemalloc(page); +} + +/** + * i40e_can_reuse_rx_page - Determine if this page can be reused by + * the adapter for another receive + * + * @rx_buffer: buffer containing the page + * @page: page address from rx_buffer + * @truesize: actual size of the buffer in this page + * + * If page is reusable, rx_buffer->page_offset is adjusted to point to + * an unused region in the page. + * + * For small pages, @truesize will be a constant value, half the size + * of the memory at page. We'll attempt to alternate between high and + * low halves of the page, with one half ready for use by the hardware + * and the other half being consumed by the stack. We use the page + * ref count to determine whether the stack has finished consuming the + * portion of this page that was passed up with a previous packet. If + * the page ref count is >1, we'll assume the "other" half page is + * still busy, and this page cannot be reused. + * + * For larger pages, @truesize will be the actual space used by the + * received packet (adjusted upward to an even multiple of the cache + * line size). This will advance through the page by the amount + * actually consumed by the received packets while there is still + * space for a buffer. Each region of larger pages will be used at + * most once, after which the page will not be reused. + * + * In either case, if the page is reusable its refcount is increased. + **/ +static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer, + struct page *page, + const unsigned int truesize) +{ +#if (PAGE_SIZE >= 8192) + unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048; +#endif + + /* Is any reuse possible? */ + if (unlikely(!i40e_page_is_reusable(page))) + return false; + +#if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ + if (unlikely(page_count(page) != 1)) + return false; + + /* flip page offset to other buffer */ + rx_buffer->page_offset ^= truesize; +#else + /* move offset up to the next cache line */ + rx_buffer->page_offset += truesize; + + if (rx_buffer->page_offset > last_offset) + return false; +#endif + + /* Inc ref count on page before passing it up to the stack */ + get_page(page); + + return true; } /** * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff * @rx_ring: rx descriptor ring to transact packets on * @rx_buffer: buffer containing page to add - * @rx_desc: descriptor containing length of buffer written by hardware + * @size: packet length from rx_desc * @skb: sk_buff to place the data into * * This function will add the data contained in rx_buffer->page to the skb. @@ -1016,30 +1040,29 @@ static inline bool i40e_page_is_reserved(struct page *page) **/ static bool i40e_add_rx_frag(struct i40e_ring *rx_ring, struct i40e_rx_buffer *rx_buffer, - union i40e_rx_desc *rx_desc, + unsigned int size, struct sk_buff *skb) { struct page *page = rx_buffer->page; - u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); - unsigned int size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> - I40E_RXD_QW1_LENGTH_PBUF_SHIFT; + unsigned char *va = page_address(page) + rx_buffer->page_offset; #if (PAGE_SIZE < 8192) unsigned int truesize = I40E_RXBUFFER_2048; #else unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); - unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048; #endif + unsigned int pull_len; + + if (unlikely(skb_is_nonlinear(skb))) + goto add_tail_frag; /* will the data fit in the skb we allocated? if so, just * copy it as it is pretty small anyway */ - if ((size <= I40E_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) { - unsigned char *va = page_address(page) + rx_buffer->page_offset; - + if (size <= I40E_RX_HDR_SIZE) { memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); - /* page is not reserved, we can reuse buffer as-is */ - if (likely(!i40e_page_is_reserved(page))) + /* page is reusable, we can reuse buffer as-is */ + if (likely(i40e_page_is_reusable(page))) return true; /* this page cannot be reused so discard it */ @@ -1047,34 +1070,26 @@ static bool i40e_add_rx_frag(struct i40e_ring *rx_ring, return false; } - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, - rx_buffer->page_offset, size, truesize); - - /* avoid re-using remote pages */ - if (unlikely(i40e_page_is_reserved(page))) - return false; - -#if (PAGE_SIZE < 8192) - /* if we are only owner of page we can reuse it */ - if (unlikely(page_count(page) != 1)) - return false; + /* we need the header to contain the greater of either + * ETH_HLEN or 60 bytes if the skb->len is less than + * 60 for skb_pad. + */ + pull_len = eth_get_headlen(va, I40E_RX_HDR_SIZE); - /* flip page offset to other buffer */ - rx_buffer->page_offset ^= truesize; -#else - /* move offset up to the next cache line */ - rx_buffer->page_offset += truesize; + /* align pull length to size of long to optimize + * memcpy performance + */ + memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long))); - if (rx_buffer->page_offset > last_offset) - return false; -#endif + /* update all of the pointers */ + va += pull_len; + size -= pull_len; - /* Even if we own the page, we are not allowed to use atomic_set() - * This would break get_page_unless_zero() users. - */ - get_page(rx_buffer->page); +add_tail_frag: + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + (unsigned long)va & ~PAGE_MASK, size, truesize); - return true; + return i40e_can_reuse_rx_page(rx_buffer, page, truesize); } /** @@ -1089,18 +1104,21 @@ static bool i40e_add_rx_frag(struct i40e_ring *rx_ring, */ static inline struct sk_buff *i40evf_fetch_rx_buffer(struct i40e_ring *rx_ring, - union i40e_rx_desc *rx_desc) + union i40e_rx_desc *rx_desc, + struct sk_buff *skb) { + u64 local_status_error_len = + le64_to_cpu(rx_desc->wb.qword1.status_error_len); + unsigned int size = + (local_status_error_len & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> + I40E_RXD_QW1_LENGTH_PBUF_SHIFT; struct i40e_rx_buffer *rx_buffer; - struct sk_buff *skb; struct page *page; rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean]; page = rx_buffer->page; prefetchw(page); - skb = rx_buffer->skb; - if (likely(!skb)) { void *page_addr = page_address(page) + rx_buffer->page_offset; @@ -1124,19 +1142,17 @@ struct sk_buff *i40evf_fetch_rx_buffer(struct i40e_ring *rx_ring, * it now to avoid a possible cache miss */ prefetchw(skb->data); - } else { - rx_buffer->skb = NULL; } /* we are reusing so sync this buffer for CPU use */ dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma, rx_buffer->page_offset, - I40E_RXBUFFER_2048, + size, DMA_FROM_DEVICE); /* pull page into skb */ - if (i40e_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) { + if (i40e_add_rx_frag(rx_ring, rx_buffer, size, skb)) { /* hand second half of page back to the ring */ i40e_reuse_rx_page(rx_ring, rx_buffer); rx_ring->rx_stats.page_reuse_count++; @@ -1180,8 +1196,6 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring, if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF))) return false; - /* place skb in next buffer to be received */ - rx_ring->rx_bi[ntc].skb = skb; rx_ring->rx_stats.non_eop_descs++; return true; @@ -1202,12 +1216,12 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring, static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) { unsigned int total_rx_bytes = 0, total_rx_packets = 0; + struct sk_buff *skb = rx_ring->skb; u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); bool failure = false; while (likely(total_rx_packets < budget)) { union i40e_rx_desc *rx_desc; - struct sk_buff *skb; u16 vlan_tag; u8 rx_ptype; u64 qword; @@ -1236,7 +1250,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) */ dma_rmb(); - skb = i40evf_fetch_rx_buffer(rx_ring, rx_desc); + skb = i40evf_fetch_rx_buffer(rx_ring, rx_desc, skb); if (!skb) break; @@ -1255,8 +1269,10 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) continue; } - if (i40e_cleanup_headers(rx_ring, skb)) + if (i40e_cleanup_headers(rx_ring, skb)) { + skb = NULL; continue; + } /* probably a little skewed due to removing CRC */ total_rx_bytes += skb->len; @@ -1273,11 +1289,14 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0; i40e_receive_skb(rx_ring, skb, vlan_tag); + skb = NULL; /* update budget accounting */ total_rx_packets++; } + rx_ring->skb = skb; + u64_stats_update_begin(&rx_ring->syncp); rx_ring->stats.packets += total_rx_packets; rx_ring->stats.bytes += total_rx_bytes; @@ -1305,18 +1324,18 @@ static u32 i40e_buildreg_itr(const int type, const u16 itr) /* a small macro to shorten up some long lines */ #define INTREG I40E_VFINT_DYN_CTLN1 -static inline int get_rx_itr_enabled(struct i40e_vsi *vsi, int idx) +static inline int get_rx_itr(struct i40e_vsi *vsi, int idx) { struct i40evf_adapter *adapter = vsi->back; - return !!(adapter->rx_rings[idx].rx_itr_setting); + return adapter->rx_rings[idx].rx_itr_setting; } -static inline int get_tx_itr_enabled(struct i40e_vsi *vsi, int idx) +static inline int get_tx_itr(struct i40e_vsi *vsi, int idx) { struct i40evf_adapter *adapter = vsi->back; - return !!(adapter->tx_rings[idx].tx_itr_setting); + return adapter->tx_rings[idx].tx_itr_setting; } /** @@ -1342,8 +1361,8 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, */ rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0); - rx_itr_setting = get_rx_itr_enabled(vsi, idx); - tx_itr_setting = get_tx_itr_enabled(vsi, idx); + rx_itr_setting = get_rx_itr(vsi, idx); + tx_itr_setting = get_tx_itr(vsi, idx); if (q_vector->itr_countdown > 0 || (!ITR_IS_DYNAMIC(rx_itr_setting) && @@ -1549,14 +1568,16 @@ out: /** * i40e_tso - set up the tso context descriptor - * @skb: ptr to the skb we're sending + * @first: pointer to first Tx buffer for xmit * @hdr_len: ptr to the size of the packet header * @cd_type_cmd_tso_mss: Quad Word 1 * * Returns 0 if no TSO can happen, 1 if tso is going, or error **/ -static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss) +static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len, + u64 *cd_type_cmd_tso_mss) { + struct sk_buff *skb = first->skb; u64 cd_cmd, cd_tso_len, cd_mss; union { struct iphdr *v4; @@ -1569,6 +1590,7 @@ static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss) unsigned char *hdr; } l4; u32 paylen, l4_offset; + u16 gso_segs, gso_size; int err; if (skb->ip_summed != CHECKSUM_PARTIAL) @@ -1607,7 +1629,8 @@ static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss) /* remove payload length from outer checksum */ paylen = skb->len - l4_offset; - csum_replace_by_diff(&l4.udp->check, htonl(paylen)); + csum_replace_by_diff(&l4.udp->check, + (__force __wsum)htonl(paylen)); } /* reset pointers to inner headers */ @@ -1628,15 +1651,23 @@ static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss) /* remove payload length from inner checksum */ paylen = skb->len - l4_offset; - csum_replace_by_diff(&l4.tcp->check, htonl(paylen)); + csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen)); /* compute length of segmentation header */ *hdr_len = (l4.tcp->doff * 4) + l4_offset; + /* pull values out of skb_shinfo */ + gso_size = skb_shinfo(skb)->gso_size; + gso_segs = skb_shinfo(skb)->gso_segs; + + /* update GSO size and bytecount with header size */ + first->gso_segs = gso_segs; + first->bytecount += (first->gso_segs - 1) * *hdr_len; + /* find the field values */ cd_cmd = I40E_TX_CTX_DESC_TSO; cd_tso_len = skb->len - *hdr_len; - cd_mss = skb_shinfo(skb)->gso_size; + cd_mss = gso_size; *cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) | (cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) | (cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT); @@ -1949,7 +1980,6 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, u16 i = tx_ring->next_to_use; u32 td_tag = 0; dma_addr_t dma; - u16 gso_segs; u16 desc_count = 1; if (tx_flags & I40E_TX_FLAGS_HW_VLAN) { @@ -1958,15 +1988,6 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, I40E_TX_FLAGS_VLAN_SHIFT; } - if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) - gso_segs = skb_shinfo(skb)->gso_segs; - else - gso_segs = 1; - - /* multiply data chunks by size of headers */ - first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len); - first->gso_segs = gso_segs; - first->skb = skb; first->tx_flags = tx_flags; dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); @@ -2151,8 +2172,10 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, count = i40e_xmit_descriptor_count(skb); if (i40e_chk_linearize(skb, count)) { - if (__skb_linearize(skb)) - goto out_drop; + if (__skb_linearize(skb)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } count = i40e_txd_use_count(skb->len); tx_ring->tx_stats.tx_linearize++; } @@ -2168,6 +2191,12 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, return NETDEV_TX_BUSY; } + /* record the location of the first descriptor for this packet */ + first = &tx_ring->tx_bi[tx_ring->next_to_use]; + first->skb = skb; + first->bytecount = skb->len; + first->gso_segs = 1; + /* prepare the xmit flags */ if (i40evf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) goto out_drop; @@ -2175,16 +2204,13 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, /* obtain protocol of skb */ protocol = vlan_get_protocol(skb); - /* record the location of the first descriptor for this packet */ - first = &tx_ring->tx_bi[tx_ring->next_to_use]; - /* setup IPv4/IPv6 offloads */ if (protocol == htons(ETH_P_IP)) tx_flags |= I40E_TX_FLAGS_IPV4; else if (protocol == htons(ETH_P_IPV6)) tx_flags |= I40E_TX_FLAGS_IPV6; - tso = i40e_tso(skb, &hdr_len, &cd_type_cmd_tso_mss); + tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss); if (tso < 0) goto out_drop; @@ -2211,7 +2237,8 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, return NETDEV_TX_OK; out_drop: - dev_kfree_skb_any(skb); + dev_kfree_skb_any(first->skb); + first->skb = NULL; return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h index a5fc789f78eb..8274ba68bd32 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h @@ -239,7 +239,6 @@ struct i40e_tx_buffer { }; struct i40e_rx_buffer { - struct sk_buff *skb; dma_addr_t dma; struct page *page; unsigned int page_offset; @@ -340,6 +339,14 @@ struct i40e_ring { struct rcu_head rcu; /* to avoid race on free */ u16 next_to_alloc; + struct sk_buff *skb; /* When i40evf_clean_rx_ring_irq() must + * return before it sees the EOP for + * the current packet, we save that skb + * here and resume receiving this + * packet the next time + * i40evf_clean_rx_ring_irq() is called + * for this ring. + */ } ____cacheline_internodealigned_in_smp; enum i40e_latency_range { diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h index c85e8a31c072..16bb88084bb9 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_type.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h @@ -100,7 +100,6 @@ enum i40e_debug_mask { */ enum i40e_mac_type { I40E_MAC_UNKNOWN = 0, - I40E_MAC_X710, I40E_MAC_XL710, I40E_MAC_VF, I40E_MAC_X722, @@ -159,6 +158,7 @@ struct i40e_link_status { enum i40e_aq_link_speed link_speed; u8 link_info; u8 an_info; + u8 fec_info; u8 ext_info; u8 loopback; /* is Link Status Event notification to SW enabled */ @@ -443,6 +443,7 @@ struct i40e_bus_info { u16 func; u16 device; u16 lan_id; + u16 bus_id; }; /* Flow control (FC) parameters */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h index fc374f833aa9..d38a2b2aea2b 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h @@ -81,6 +81,7 @@ enum i40e_virtchnl_ops { I40E_VIRTCHNL_OP_GET_STATS = 15, I40E_VIRTCHNL_OP_FCOE = 16, I40E_VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */ + I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY = 23, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT = 24, I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25, diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h index fffe4cf2c20b..00c42d803276 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf.h +++ b/drivers/net/ethernet/intel/i40evf/i40evf.h @@ -195,6 +195,7 @@ struct i40evf_adapter { u64 hw_csum_rx_error; u32 rx_desc_count; int num_msix_vectors; + u32 client_pending; struct msix_entry *msix_entries; u32 flags; diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index c0fc53361800..f35dcaac5bb7 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -38,7 +38,7 @@ static const char i40evf_driver_string[] = #define DRV_VERSION_MAJOR 1 #define DRV_VERSION_MINOR 6 -#define DRV_VERSION_BUILD 25 +#define DRV_VERSION_BUILD 27 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) \ @@ -59,7 +59,6 @@ static const struct pci_device_id i40evf_pci_tbl[] = { {PCI_VDEVICE(INTEL, I40E_DEV_ID_VF), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_VF_HV), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_X722_VF), 0}, - {PCI_VDEVICE(INTEL, I40E_DEV_ID_X722_VF_HV), 0}, /* required last entry */ {0, } }; @@ -2154,6 +2153,11 @@ static int i40evf_close(struct net_device *netdev) adapter->state = __I40EVF_DOWN_PENDING; i40evf_free_traffic_irqs(adapter); + /* We explicitly don't free resources here because the hardware is + * still active and can DMA into memory. Resources are cleared in + * i40evf_virtchnl_completion() after we get confirmation from the PF + * driver that the rings have been stopped. + */ return 0; } @@ -2727,6 +2731,7 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw->subsystem_device_id = pdev->subsystem_device; hw->bus.device = PCI_SLOT(pdev->devfn); hw->bus.func = PCI_FUNC(pdev->devfn); + hw->bus.bus_id = pdev->bus->number; /* set up the locks for the AQ, do this only once in probe * and destroy them only once in remove @@ -2871,7 +2876,8 @@ static void i40evf_remove(struct pci_dev *pdev) i40evf_request_reset(adapter); msleep(50); } - + i40evf_free_all_tx_resources(adapter); + i40evf_free_all_rx_resources(adapter); i40evf_misc_irq_disable(adapter); i40evf_free_misc_irq(adapter); i40evf_reset_interrupt_capability(adapter); diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c index 2059a8e88908..bee58af390e1 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c @@ -999,6 +999,10 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, if (v_opcode != adapter->current_op) return; break; + case I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP: + adapter->client_pending &= + ~(BIT(I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP)); + break; case I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS: { struct i40e_virtchnl_rss_hena *vrh = (struct i40e_virtchnl_rss_hena *)msg; diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index a61447fd778e..ee443985581f 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -245,6 +245,17 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw) hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT; + /* Make sure the PHY is in a good state. Several people have reported + * firmware leaving the PHY's page select register set to something + * other than the default of zero, which causes the PHY ID read to + * access something other than the intended register. + */ + ret_val = hw->phy.ops.reset(hw); + if (ret_val) { + hw_dbg("Error resetting the PHY.\n"); + goto out; + } + /* Set phy->phy_addr and phy->id. */ ret_val = igb_get_phy_id_82575(hw); if (ret_val) diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c index 8aa798737d4d..07d48f2e3369 100644 --- a/drivers/net/ethernet/intel/igb/e1000_i210.c +++ b/drivers/net/ethernet/intel/igb/e1000_i210.c @@ -699,9 +699,9 @@ static s32 igb_update_flash_i210(struct e1000_hw *hw) ret_val = igb_pool_flash_update_done_i210(hw); if (ret_val) - hw_dbg("Flash update complete\n"); - else hw_dbg("Flash update time out\n"); + else + hw_dbg("Flash update complete\n"); out: return ret_val; diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c index 5010e2232c50..5eff82678f0b 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mac.c +++ b/drivers/net/ethernet/intel/igb/e1000_mac.c @@ -792,15 +792,13 @@ static s32 igb_set_default_fc(struct e1000_hw *hw) * control setting, then the variable hw->fc will * be initialized based on a value in the EEPROM. */ - if (hw->mac.type == e1000_i350) { + if (hw->mac.type == e1000_i350) lan_offset = NVM_82580_LAN_FUNC_OFFSET(hw->bus.func); - ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG - + lan_offset, 1, &nvm_data); - } else { - ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG, - 1, &nvm_data); - } + else + lan_offset = 0; + ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG + lan_offset, + 1, &nvm_data); if (ret_val) { hw_dbg("NVM Read Error\n"); goto out; @@ -808,8 +806,7 @@ static s32 igb_set_default_fc(struct e1000_hw *hw) if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0) hw->fc.requested_mode = e1000_fc_none; - else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == - NVM_WORD0F_ASM_DIR) + else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == NVM_WORD0F_ASM_DIR) hw->fc.requested_mode = e1000_fc_tx_pause; else hw->fc.requested_mode = e1000_fc_full; diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c index 5b54254aed4f..68812d783f33 100644 --- a/drivers/net/ethernet/intel/igb/e1000_phy.c +++ b/drivers/net/ethernet/intel/igb/e1000_phy.c @@ -77,6 +77,10 @@ s32 igb_get_phy_id(struct e1000_hw *hw) s32 ret_val = 0; u16 phy_id; + /* ensure PHY page selection to fix misconfigured i210 */ + if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) + phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0); + ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id); if (ret_val) goto out; @@ -290,7 +294,7 @@ s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data) u32 i, i2ccmd = 0; u16 phy_data_swapped; - /* Prevent overwritting SFP I2C EEPROM which is at A0 address.*/ + /* Prevent overwriting SFP I2C EEPROM which is at A0 address.*/ if ((hw->phy.addr == 0) || (hw->phy.addr > 7)) { hw_dbg("PHY I2C Address %d is out of range.\n", hw->phy.addr); diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h index d84afdd83e53..58adbf234e07 100644 --- a/drivers/net/ethernet/intel/igb/e1000_regs.h +++ b/drivers/net/ethernet/intel/igb/e1000_regs.h @@ -320,7 +320,7 @@ #define E1000_VT_CTL 0x0581C /* VMDq Control - RW */ #define E1000_WUC 0x05800 /* Wakeup Control - RW */ #define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */ -#define E1000_WUS 0x05810 /* Wakeup Status - RO */ +#define E1000_WUS 0x05810 /* Wakeup Status - R/W1C */ #define E1000_MANC 0x05820 /* Management Control - RW */ #define E1000_IPAV 0x05838 /* IP Address Valid - RW */ #define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */ diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 1515abaa5ac9..be456bae8169 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -137,8 +137,8 @@ static void igb_update_phy_info(unsigned long); static void igb_watchdog(unsigned long); static void igb_watchdog_task(struct work_struct *); static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *); -static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *stats); +static void igb_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats); static int igb_change_mtu(struct net_device *, int); static int igb_set_mac(struct net_device *, void *); static void igb_set_uta(struct igb_adapter *adapter, bool set); @@ -383,9 +383,9 @@ static void igb_dump(struct igb_adapter *adapter) /* Print netdevice Info */ if (netdev) { dev_info(&adapter->pdev->dev, "Net device Info\n"); - pr_info("Device Name state trans_start last_rx\n"); - pr_info("%-15s %016lX %016lX %016lX\n", netdev->name, - netdev->state, dev_trans_start(netdev), netdev->last_rx); + pr_info("Device Name state trans_start\n"); + pr_info("%-15s %016lX %016lX\n", netdev->name, + netdev->state, dev_trans_start(netdev)); } /* Print Registers */ @@ -3275,7 +3275,9 @@ static int __igb_close(struct net_device *netdev, bool suspending) int igb_close(struct net_device *netdev) { - return __igb_close(netdev, false); + if (netif_device_present(netdev)) + return __igb_close(netdev, false); + return 0; } /** @@ -3394,7 +3396,7 @@ void igb_configure_tx_ring(struct igb_adapter *adapter, tdba & 0x00000000ffffffffULL); wr32(E1000_TDBAH(reg_idx), tdba >> 32); - ring->tail = hw->hw_addr + E1000_TDT(reg_idx); + ring->tail = adapter->io_addr + E1000_TDT(reg_idx); wr32(E1000_TDH(reg_idx), 0); writel(0, ring->tail); @@ -3733,7 +3735,7 @@ void igb_configure_rx_ring(struct igb_adapter *adapter, ring->count * sizeof(union e1000_adv_rx_desc)); /* initialize head and tail */ - ring->tail = hw->hw_addr + E1000_RDT(reg_idx); + ring->tail = adapter->io_addr + E1000_RDT(reg_idx); wr32(E1000_RDH(reg_idx), 0); writel(0, ring->tail); @@ -5402,8 +5404,8 @@ static void igb_reset_task(struct work_struct *work) * @netdev: network interface device structure * @stats: rtnl_link_stats64 pointer **/ -static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev, - struct rtnl_link_stats64 *stats) +static void igb_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) { struct igb_adapter *adapter = netdev_priv(netdev); @@ -5411,8 +5413,6 @@ static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev, igb_update_stats(adapter, &adapter->stats64); memcpy(stats, &adapter->stats64, sizeof(*stats)); spin_unlock(&adapter->stats64_lock); - - return stats; } /** @@ -7564,6 +7564,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake, int retval = 0; #endif + rtnl_lock(); netif_device_detach(netdev); if (netif_running(netdev)) @@ -7572,6 +7573,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake, igb_ptp_suspend(adapter); igb_clear_interrupt_scheme(adapter); + rtnl_unlock(); #ifdef CONFIG_PM retval = pci_save_state(pdev); @@ -7690,16 +7692,15 @@ static int igb_resume(struct device *dev) wr32(E1000_WUS, ~0); - if (netdev->flags & IFF_UP) { - rtnl_lock(); + rtnl_lock(); + if (!err && netif_running(netdev)) err = __igb_open(netdev, true); - rtnl_unlock(); - if (err) - return err; - } - netif_device_attach(netdev); - return 0; + if (!err) + netif_device_attach(netdev); + rtnl_unlock(); + + return err; } static int igb_runtime_idle(struct device *dev) @@ -7898,6 +7899,11 @@ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev) pci_enable_wake(pdev, PCI_D3hot, 0); pci_enable_wake(pdev, PCI_D3cold, 0); + /* In case of PCI error, adapter lose its HW address + * so we should re-assign it here. + */ + hw->hw_addr = adapter->io_addr; + igb_reset(adapter); wr32(E1000_WUS, ~0); result = PCI_ERS_RESULT_RECOVERED; diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c index 5826b1ddedcf..fbd220d137b3 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c @@ -1817,7 +1817,7 @@ ixgb_clean(struct napi_struct *napi, int budget) /* If budget not fully consumed, exit the polling mode */ if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); if (!test_bit(__IXGB_DOWN, &adapter->flags)) ixgb_irq_enable(adapter); } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index ef81c3d8c295..b1ecc2627a5a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -55,9 +55,6 @@ #include <net/busy_poll.h> -#ifdef CONFIG_NET_RX_BUSY_POLL -#define BP_EXTENDED_STATS -#endif /* common prefix used by pr_<> macros */ #undef pr_fmt #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -94,6 +91,14 @@ #define IXGBE_RXBUFFER_4K 4096 #define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */ +#define IXGBE_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) +#if (PAGE_SIZE < 8192) +#define IXGBE_MAX_FRAME_BUILD_SKB \ + (SKB_WITH_OVERHEAD(IXGBE_RXBUFFER_2K) - IXGBE_SKB_PAD) +#else +#define IXGBE_MAX_FRAME_BUILD_SKB IXGBE_RXBUFFER_2K +#endif + /* * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we * reserve 64 more, and skb_shared_info adds an additional 320 bytes more, @@ -107,6 +112,9 @@ /* How many Rx Buffers do we bundle into one write to the hardware ? */ #define IXGBE_RX_BUFFER_WRITE 16 /* Must be power of 2 */ +#define IXGBE_RX_DMA_ATTR \ + (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) + enum ixgbe_tx_flags { /* cmd_type flags */ IXGBE_TX_FLAGS_HW_VLAN = 0x01, @@ -159,6 +167,7 @@ enum ixgbevf_xcast_modes { IXGBEVF_XCAST_MODE_NONE = 0, IXGBEVF_XCAST_MODE_MULTI, IXGBEVF_XCAST_MODE_ALLMULTI, + IXGBEVF_XCAST_MODE_PROMISC, }; struct vf_macvlans { @@ -194,17 +203,17 @@ struct ixgbe_rx_buffer { struct sk_buff *skb; dma_addr_t dma; struct page *page; - unsigned int page_offset; +#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) + __u32 page_offset; +#else + __u16 page_offset; +#endif + __u16 pagecnt_bias; }; struct ixgbe_queue_stats { u64 packets; u64 bytes; -#ifdef BP_EXTENDED_STATS - u64 yields; - u64 misses; - u64 cleaned; -#endif /* BP_EXTENDED_STATS */ }; struct ixgbe_tx_queue_stats { @@ -225,15 +234,20 @@ struct ixgbe_rx_queue_stats { #define IXGBE_TS_HDR_LEN 8 enum ixgbe_ring_state_t { + __IXGBE_RX_3K_BUFFER, + __IXGBE_RX_BUILD_SKB_ENABLED, + __IXGBE_RX_RSC_ENABLED, + __IXGBE_RX_CSUM_UDP_ZERO_ERR, + __IXGBE_RX_FCOE, __IXGBE_TX_FDIR_INIT_DONE, __IXGBE_TX_XPS_INIT_DONE, __IXGBE_TX_DETECT_HANG, __IXGBE_HANG_CHECK_ARMED, - __IXGBE_RX_RSC_ENABLED, - __IXGBE_RX_CSUM_UDP_ZERO_ERR, - __IXGBE_RX_FCOE, }; +#define ring_uses_build_skb(ring) \ + test_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &(ring)->state) + struct ixgbe_fwd_adapter { unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; struct net_device *netdev; @@ -343,19 +357,20 @@ struct ixgbe_ring_feature { */ static inline unsigned int ixgbe_rx_bufsz(struct ixgbe_ring *ring) { -#ifdef IXGBE_FCOE - if (test_bit(__IXGBE_RX_FCOE, &ring->state)) - return (PAGE_SIZE < 8192) ? IXGBE_RXBUFFER_4K : - IXGBE_RXBUFFER_3K; + if (test_bit(__IXGBE_RX_3K_BUFFER, &ring->state)) + return IXGBE_RXBUFFER_3K; +#if (PAGE_SIZE < 8192) + if (ring_uses_build_skb(ring)) + return IXGBE_MAX_FRAME_BUILD_SKB; #endif return IXGBE_RXBUFFER_2K; } static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring) { -#ifdef IXGBE_FCOE - if (test_bit(__IXGBE_RX_FCOE, &ring->state)) - return (PAGE_SIZE < 8192) ? 1 : 0; +#if (PAGE_SIZE < 8192) + if (test_bit(__IXGBE_RX_3K_BUFFER, &ring->state)) + return 1; #endif return 0; } @@ -398,127 +413,10 @@ struct ixgbe_q_vector { struct rcu_head rcu; /* to avoid race with update stats on free */ char name[IFNAMSIZ + 9]; -#ifdef CONFIG_NET_RX_BUSY_POLL - atomic_t state; -#endif /* CONFIG_NET_RX_BUSY_POLL */ - /* for dynamic allocation of rings associated with this q_vector */ struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp; }; -#ifdef CONFIG_NET_RX_BUSY_POLL -enum ixgbe_qv_state_t { - IXGBE_QV_STATE_IDLE = 0, - IXGBE_QV_STATE_NAPI, - IXGBE_QV_STATE_POLL, - IXGBE_QV_STATE_DISABLE -}; - -static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector) -{ - /* reset state to idle */ - atomic_set(&q_vector->state, IXGBE_QV_STATE_IDLE); -} - -/* called from the device poll routine to get ownership of a q_vector */ -static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector) -{ - int rc = atomic_cmpxchg(&q_vector->state, IXGBE_QV_STATE_IDLE, - IXGBE_QV_STATE_NAPI); -#ifdef BP_EXTENDED_STATS - if (rc != IXGBE_QV_STATE_IDLE) - q_vector->tx.ring->stats.yields++; -#endif - - return rc == IXGBE_QV_STATE_IDLE; -} - -/* returns true is someone tried to get the qv while napi had it */ -static inline void ixgbe_qv_unlock_napi(struct ixgbe_q_vector *q_vector) -{ - WARN_ON(atomic_read(&q_vector->state) != IXGBE_QV_STATE_NAPI); - - /* flush any outstanding Rx frames */ - if (q_vector->napi.gro_list) - napi_gro_flush(&q_vector->napi, false); - - /* reset state to idle */ - atomic_set(&q_vector->state, IXGBE_QV_STATE_IDLE); -} - -/* called from ixgbe_low_latency_poll() */ -static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector) -{ - int rc = atomic_cmpxchg(&q_vector->state, IXGBE_QV_STATE_IDLE, - IXGBE_QV_STATE_POLL); -#ifdef BP_EXTENDED_STATS - if (rc != IXGBE_QV_STATE_IDLE) - q_vector->rx.ring->stats.yields++; -#endif - return rc == IXGBE_QV_STATE_IDLE; -} - -/* returns true if someone tried to get the qv while it was locked */ -static inline void ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector) -{ - WARN_ON(atomic_read(&q_vector->state) != IXGBE_QV_STATE_POLL); - - /* reset state to idle */ - atomic_set(&q_vector->state, IXGBE_QV_STATE_IDLE); -} - -/* true if a socket is polling, even if it did not get the lock */ -static inline bool ixgbe_qv_busy_polling(struct ixgbe_q_vector *q_vector) -{ - return atomic_read(&q_vector->state) == IXGBE_QV_STATE_POLL; -} - -/* false if QV is currently owned */ -static inline bool ixgbe_qv_disable(struct ixgbe_q_vector *q_vector) -{ - int rc = atomic_cmpxchg(&q_vector->state, IXGBE_QV_STATE_IDLE, - IXGBE_QV_STATE_DISABLE); - - return rc == IXGBE_QV_STATE_IDLE; -} - -#else /* CONFIG_NET_RX_BUSY_POLL */ -static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector) -{ -} - -static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector) -{ - return true; -} - -static inline bool ixgbe_qv_unlock_napi(struct ixgbe_q_vector *q_vector) -{ - return false; -} - -static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector) -{ - return false; -} - -static inline bool ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector) -{ - return false; -} - -static inline bool ixgbe_qv_busy_polling(struct ixgbe_q_vector *q_vector) -{ - return false; -} - -static inline bool ixgbe_qv_disable(struct ixgbe_q_vector *q_vector) -{ - return true; -} - -#endif /* CONFIG_NET_RX_BUSY_POLL */ - #ifdef CONFIG_IXGBE_HWMON #define IXGBE_HWMON_TYPE_LOC 0 @@ -661,6 +559,9 @@ struct ixgbe_adapter { #define IXGBE_FLAG2_PHY_INTERRUPT BIT(11) #define IXGBE_FLAG2_UDP_TUN_REREG_NEEDED BIT(12) #define IXGBE_FLAG2_VLAN_PROMISC BIT(13) +#define IXGBE_FLAG2_EEE_CAPABLE BIT(14) +#define IXGBE_FLAG2_EEE_ENABLED BIT(15) +#define IXGBE_FLAG2_RX_LEGACY BIT(16) /* Tx fast path data */ int num_tx_queues; @@ -862,6 +763,7 @@ enum ixgbe_boards { board_X550, board_X550EM_x, board_x550em_a, + board_x550em_a_fw, }; extern const struct ixgbe_info ixgbe_82598_info; @@ -870,8 +772,9 @@ extern const struct ixgbe_info ixgbe_X540_info; extern const struct ixgbe_info ixgbe_X550_info; extern const struct ixgbe_info ixgbe_X550EM_x_info; extern const struct ixgbe_info ixgbe_x550em_a_info; +extern const struct ixgbe_info ixgbe_x550em_a_fw_info; #ifdef CONFIG_IXGBE_DCB -extern const struct dcbnl_rtnl_ops dcbnl_ops; +extern const struct dcbnl_rtnl_ops ixgbe_dcbnl_ops; #endif extern char ixgbe_driver_name[]; @@ -1026,6 +929,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct ixgbe_adapter *adapter, struct ixgbe_ring *tx_ring); u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter); +void ixgbe_store_key(struct ixgbe_adapter *adapter); void ixgbe_store_reta(struct ixgbe_adapter *adapter); s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c index 805ab319e578..523f9d05a810 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c @@ -139,8 +139,6 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw) case ixgbe_phy_tn: phy->ops.setup_link = &ixgbe_setup_phy_link_tnx; phy->ops.check_link = &ixgbe_check_phy_link_tnx; - phy->ops.get_firmware_version = - &ixgbe_get_phy_firmware_version_tnx; break; case ixgbe_phy_nl: phy->ops.reset = &ixgbe_reset_phy_nl; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index e00aaeb91827..c8ac46049f34 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -331,8 +331,6 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw) case ixgbe_phy_tn: phy->ops.check_link = &ixgbe_check_phy_link_tnx; phy->ops.setup_link = &ixgbe_setup_phy_link_tnx; - phy->ops.get_firmware_version = - &ixgbe_get_phy_firmware_version_tnx; break; default: break; @@ -1451,7 +1449,7 @@ do { \ * @atr_input: input bitstream to compute the hash on * @input_mask: mask for the input bitstream * - * This function serves two main purposes. First it applys the input_mask + * This function serves two main purposes. First it applies the input_mask * to the atr_input resulting in a cleaned up atr_input data stream. * Secondly it computes the hash and stores it in the bkt_hash field at * the end of the input byte stream. This way it will be available for diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 8832df3eba25..c38d50c1fcf7 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -100,6 +100,8 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) case IXGBE_DEV_ID_X550T1: case IXGBE_DEV_ID_X550EM_X_10G_T: case IXGBE_DEV_ID_X550EM_A_10G_T: + case IXGBE_DEV_ID_X550EM_A_1G_T: + case IXGBE_DEV_ID_X550EM_A_1G_T_L: supported = true; break; default: @@ -348,7 +350,7 @@ s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw) } IXGBE_WRITE_FLUSH(hw); -#ifndef CONFIG_SPARC +#ifndef CONFIG_ARCH_WANT_RELAX_ORDER /* Disable relaxed ordering */ for (i = 0; i < hw->mac.max_tx_queues; i++) { u32 regval; @@ -3382,6 +3384,13 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, else *speed = IXGBE_LINK_SPEED_100_FULL; break; + case IXGBE_LINKS_SPEED_10_X550EM_A: + *speed = IXGBE_LINK_SPEED_UNKNOWN; + if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T || + hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) { + *speed = IXGBE_LINK_SPEED_10_FULL; + } + break; default: *speed = IXGBE_LINK_SPEED_UNKNOWN; } @@ -3578,7 +3587,7 @@ void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, * Calculates the checksum for some buffer on a specified length. The * checksum calculated is returned. **/ -static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length) +u8 ixgbe_calculate_checksum(u8 *buffer, u32 length) { u32 i; u8 sum = 0; @@ -3593,43 +3602,29 @@ static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length) } /** - * ixgbe_host_interface_command - Issue command to manageability block + * ixgbe_hic_unlocked - Issue command to manageability block unlocked * @hw: pointer to the HW structure - * @buffer: contains the command to write and where the return status will - * be placed + * @buffer: command to write and where the return status will be placed * @length: length of buffer, must be multiple of 4 bytes * @timeout: time in ms to wait for command completion - * @return_data: read and return data from the buffer (true) or not (false) - * Needed because FW structures are big endian and decoding of - * these fields can be 8 bit or 16 bit based on command. Decoding - * is not easily understood without making a table of commands. - * So we will leave this up to the caller to read back the data - * in these cases. * - * Communicates with the manageability block. On success return 0 - * else return IXGBE_ERR_HOST_INTERFACE_COMMAND. + * Communicates with the manageability block. On success return 0 + * else returns semaphore error when encountering an error acquiring + * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. + * + * This function assumes that the IXGBE_GSSR_SW_MNG_SM semaphore is held + * by the caller. **/ -s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, - u32 length, u32 timeout, - bool return_data) +s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length, + u32 timeout) { - u32 hdr_size = sizeof(struct ixgbe_hic_hdr); - u32 hicr, i, bi, fwsts; - u16 buf_len, dword_len; - union { - struct ixgbe_hic_hdr hdr; - u32 u32arr[1]; - } *bp = buffer; - s32 status; + u32 hicr, i, fwsts; + u16 dword_len; if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length); return IXGBE_ERR_HOST_INTERFACE_COMMAND; } - /* Take management host interface semaphore */ - status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); - if (status) - return status; /* Set bit 9 of FWSTS clearing FW reset indication */ fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS); @@ -3639,15 +3634,13 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, hicr = IXGBE_READ_REG(hw, IXGBE_HICR); if (!(hicr & IXGBE_HICR_EN)) { hw_dbg(hw, "IXGBE_HOST_EN bit disabled.\n"); - status = IXGBE_ERR_HOST_INTERFACE_COMMAND; - goto rel_out; + return IXGBE_ERR_HOST_INTERFACE_COMMAND; } /* Calculate length in DWORDs. We must be DWORD aligned */ if (length % sizeof(u32)) { hw_dbg(hw, "Buffer length failure, not aligned to dword"); - status = IXGBE_ERR_INVALID_ARGUMENT; - goto rel_out; + return IXGBE_ERR_INVALID_ARGUMENT; } dword_len = length >> 2; @@ -3657,7 +3650,7 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, */ for (i = 0; i < dword_len; i++) IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG, - i, cpu_to_le32(bp->u32arr[i])); + i, cpu_to_le32(buffer[i])); /* Setting this bit tells the ARC that a new command is pending. */ IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C); @@ -3671,11 +3664,54 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, /* Check command successful completion. */ if ((timeout && i == timeout) || - !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) { - hw_dbg(hw, "Command has failed with no status valid.\n"); - status = IXGBE_ERR_HOST_INTERFACE_COMMAND; - goto rel_out; + !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) + return IXGBE_ERR_HOST_INTERFACE_COMMAND; + + return 0; +} + +/** + * ixgbe_host_interface_command - Issue command to manageability block + * @hw: pointer to the HW structure + * @buffer: contains the command to write and where the return status will + * be placed + * @length: length of buffer, must be multiple of 4 bytes + * @timeout: time in ms to wait for command completion + * @return_data: read and return data from the buffer (true) or not (false) + * Needed because FW structures are big endian and decoding of + * these fields can be 8 bit or 16 bit based on command. Decoding + * is not easily understood without making a table of commands. + * So we will leave this up to the caller to read back the data + * in these cases. + * + * Communicates with the manageability block. On success return 0 + * else return IXGBE_ERR_HOST_INTERFACE_COMMAND. + **/ +s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, + u32 length, u32 timeout, + bool return_data) +{ + u32 hdr_size = sizeof(struct ixgbe_hic_hdr); + union { + struct ixgbe_hic_hdr hdr; + u32 u32arr[1]; + } *bp = buffer; + u16 buf_len, dword_len; + s32 status; + u32 bi; + + if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { + hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length); + return IXGBE_ERR_HOST_INTERFACE_COMMAND; } + /* Take management host interface semaphore */ + status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); + if (status) + return status; + + status = ixgbe_hic_unlocked(hw, buffer, length, timeout); + if (status) + goto rel_out; if (!return_data) goto rel_out; @@ -3722,6 +3758,8 @@ rel_out: * @min: driver version minor number * @build: driver version build number * @sub: driver version sub build number + * @len: length of driver_ver string + * @driver_ver: driver string * * Sends driver version number to firmware through the manageability * block. On success return 0 @@ -3729,7 +3767,8 @@ rel_out: * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. **/ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, - u8 build, u8 sub) + u8 build, u8 sub, __always_unused u16 len, + __always_unused const char *driver_ver) { struct ixgbe_hic_drv_info fw_cmd; int i; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h index 5b3e3c65927e..e083732adf64 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h @@ -111,9 +111,13 @@ void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf); void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf); s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps); s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, - u8 build, u8 ver); + u8 build, u8 ver, u16 len, const char *str); +u8 ixgbe_calculate_checksum(u8 *buffer, u32 length); s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *, u32 length, u32 timeout, bool return_data); +s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 len, u32 timeout); +s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity, + u32 (*data)[FW_PHY_ACT_DATA_COUNT]); void ixgbe_clear_tx_pending(struct ixgbe_hw *hw); bool ixgbe_mng_present(struct ixgbe_hw *hw); bool ixgbe_mng_enabled(struct ixgbe_hw *hw); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c index b8fc3cfec831..78c52375acc6 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c @@ -777,7 +777,7 @@ static u8 ixgbe_dcbnl_setdcbx(struct net_device *dev, u8 mode) return err ? 1 : 0; } -const struct dcbnl_rtnl_ops dcbnl_ops = { +const struct dcbnl_rtnl_ops ixgbe_dcbnl_ops = { .ieee_getets = ixgbe_dcbnl_ieee_getets, .ieee_setets = ixgbe_dcbnl_ieee_setets, .ieee_getpfc = ixgbe_dcbnl_ieee_getpfc, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index fd192bf29b26..90fa5bf23d1b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -151,6 +151,13 @@ static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = { }; #define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN +static const char ixgbe_priv_flags_strings[][ETH_GSTRING_LEN] = { +#define IXGBE_PRIV_FLAGS_LEGACY_RX BIT(0) + "legacy-rx", +}; + +#define IXGBE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbe_priv_flags_strings) + /* currently supported speeds for 10G */ #define ADVRTSD_MSK_10G (SUPPORTED_10000baseT_Full | \ SUPPORTED_10000baseKX4_Full | \ @@ -197,15 +204,17 @@ static int ixgbe_get_settings(struct net_device *netdev, SUPPORTED_1000baseKX_Full : SUPPORTED_1000baseT_Full; if (supported_link & IXGBE_LINK_SPEED_100_FULL) - ecmd->supported |= ixgbe_isbackplane(hw->phy.media_type) ? - SUPPORTED_1000baseKX_Full : - SUPPORTED_1000baseT_Full; + ecmd->supported |= SUPPORTED_100baseT_Full; + if (supported_link & IXGBE_LINK_SPEED_10_FULL) + ecmd->supported |= SUPPORTED_10baseT_Full; /* default advertised speed if phy.autoneg_advertised isn't set */ ecmd->advertising = ecmd->supported; /* set the advertised speeds */ if (hw->phy.autoneg_advertised) { ecmd->advertising = 0; + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL) + ecmd->advertising |= ADVERTISED_10baseT_Full; if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) ecmd->advertising |= ADVERTISED_100baseT_Full; if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) @@ -237,6 +246,7 @@ static int ixgbe_get_settings(struct net_device *netdev, case ixgbe_phy_tn: case ixgbe_phy_aq: case ixgbe_phy_x550em_ext_t: + case ixgbe_phy_fw: case ixgbe_phy_cu_unknown: ecmd->supported |= SUPPORTED_TP; ecmd->advertising |= ADVERTISED_TP; @@ -337,6 +347,9 @@ static int ixgbe_get_settings(struct net_device *netdev, case IXGBE_LINK_SPEED_10GB_FULL: ethtool_cmd_speed_set(ecmd, SPEED_10000); break; + case IXGBE_LINK_SPEED_5GB_FULL: + ethtool_cmd_speed_set(ecmd, SPEED_5000); + break; case IXGBE_LINK_SPEED_2_5GB_FULL: ethtool_cmd_speed_set(ecmd, SPEED_2500); break; @@ -346,6 +359,9 @@ static int ixgbe_get_settings(struct net_device *netdev, case IXGBE_LINK_SPEED_100_FULL: ethtool_cmd_speed_set(ecmd, SPEED_100); break; + case IXGBE_LINK_SPEED_10_FULL: + ethtool_cmd_speed_set(ecmd, SPEED_10); + break; default: break; } @@ -394,6 +410,9 @@ static int ixgbe_set_settings(struct net_device *netdev, if (ecmd->advertising & ADVERTISED_100baseT_Full) advertised |= IXGBE_LINK_SPEED_100_FULL; + if (ecmd->advertising & ADVERTISED_10baseT_Full) + advertised |= IXGBE_LINK_SPEED_10_FULL; + if (old == advertised) return err; /* this sets the link speed and restarts auto-neg */ @@ -989,6 +1008,8 @@ static void ixgbe_get_drvinfo(struct net_device *netdev, strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); + + drvinfo->n_priv_flags = IXGBE_PRIV_FLAGS_STR_LEN; } static void ixgbe_get_ringparam(struct net_device *netdev, @@ -1128,6 +1149,8 @@ static int ixgbe_get_sset_count(struct net_device *netdev, int sset) return IXGBE_TEST_LEN; case ETH_SS_STATS: return IXGBE_STATS_LEN; + case ETH_SS_PRIV_FLAGS: + return IXGBE_PRIV_FLAGS_STR_LEN; default: return -EOPNOTSUPP; } @@ -1170,12 +1193,6 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev, data[i] = 0; data[i+1] = 0; i += 2; -#ifdef BP_EXTENDED_STATS - data[i] = 0; - data[i+1] = 0; - data[i+2] = 0; - i += 3; -#endif continue; } @@ -1185,12 +1202,6 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev, data[i+1] = ring->stats.bytes; } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); i += 2; -#ifdef BP_EXTENDED_STATS - data[i] = ring->stats.yields; - data[i+1] = ring->stats.misses; - data[i+2] = ring->stats.cleaned; - i += 3; -#endif } for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) { ring = adapter->rx_ring[j]; @@ -1198,12 +1209,6 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev, data[i] = 0; data[i+1] = 0; i += 2; -#ifdef BP_EXTENDED_STATS - data[i] = 0; - data[i+1] = 0; - data[i+2] = 0; - i += 3; -#endif continue; } @@ -1213,12 +1218,6 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev, data[i+1] = ring->stats.bytes; } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); i += 2; -#ifdef BP_EXTENDED_STATS - data[i] = ring->stats.yields; - data[i+1] = ring->stats.misses; - data[i+2] = ring->stats.cleaned; - i += 3; -#endif } for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) { @@ -1255,28 +1254,12 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, p += ETH_GSTRING_LEN; sprintf(p, "tx_queue_%u_bytes", i); p += ETH_GSTRING_LEN; -#ifdef BP_EXTENDED_STATS - sprintf(p, "tx_queue_%u_bp_napi_yield", i); - p += ETH_GSTRING_LEN; - sprintf(p, "tx_queue_%u_bp_misses", i); - p += ETH_GSTRING_LEN; - sprintf(p, "tx_queue_%u_bp_cleaned", i); - p += ETH_GSTRING_LEN; -#endif /* BP_EXTENDED_STATS */ } for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) { sprintf(p, "rx_queue_%u_packets", i); p += ETH_GSTRING_LEN; sprintf(p, "rx_queue_%u_bytes", i); p += ETH_GSTRING_LEN; -#ifdef BP_EXTENDED_STATS - sprintf(p, "rx_queue_%u_bp_poll_yield", i); - p += ETH_GSTRING_LEN; - sprintf(p, "rx_queue_%u_bp_misses", i); - p += ETH_GSTRING_LEN; - sprintf(p, "rx_queue_%u_bp_cleaned", i); - p += ETH_GSTRING_LEN; -#endif /* BP_EXTENDED_STATS */ } for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) { sprintf(p, "tx_pb_%u_pxon", i); @@ -1292,6 +1275,9 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, } /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */ break; + case ETH_SS_PRIV_FLAGS: + memcpy(data, ixgbe_priv_flags_strings, + IXGBE_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN); } } @@ -1896,7 +1882,7 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring, tx_ntc = tx_ring->next_to_clean; rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc); - while (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD)) { + while (rx_desc->wb.upper.length) { /* check Rx buffer */ rx_buffer = &rx_ring->rx_buffer_info[rx_ntc]; @@ -1918,7 +1904,16 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring, /* unmap buffer on Tx side */ tx_buffer = &tx_ring->tx_buffer_info[tx_ntc]; - ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer); + + /* Free all the Tx ring sk_buffs */ + dev_kfree_skb_any(tx_buffer->skb); + + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); /* increment Rx/Tx next to clean counters */ rx_ntc++; @@ -3003,8 +2998,10 @@ static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir, } /* Fill out the rss hash key */ - if (key) + if (key) { memcpy(adapter->rss_key, key, ixgbe_get_rxfh_key_size(netdev)); + ixgbe_store_key(adapter); + } ixgbe_store_reta(adapter); @@ -3173,6 +3170,9 @@ static int ixgbe_get_module_info(struct net_device *dev, u8 sff8472_rev, addr_mode; bool page_swap = false; + if (hw->phy.type == ixgbe_phy_fw) + return -ENXIO; + /* Check whether we support SFF-8472 or not */ status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_SFF_8472_COMP, @@ -3218,6 +3218,9 @@ static int ixgbe_get_module_eeprom(struct net_device *dev, if (ee->len == 0) return -EINVAL; + if (hw->phy.type == ixgbe_phy_fw) + return -ENXIO; + for (i = ee->offset; i < ee->offset + ee->len; i++) { /* I2C reads can take long time */ if (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) @@ -3237,6 +3240,167 @@ static int ixgbe_get_module_eeprom(struct net_device *dev, return 0; } +static const struct { + ixgbe_link_speed mac_speed; + u32 supported; +} ixgbe_ls_map[] = { + { IXGBE_LINK_SPEED_10_FULL, SUPPORTED_10baseT_Full }, + { IXGBE_LINK_SPEED_100_FULL, SUPPORTED_100baseT_Full }, + { IXGBE_LINK_SPEED_1GB_FULL, SUPPORTED_1000baseT_Full }, + { IXGBE_LINK_SPEED_2_5GB_FULL, SUPPORTED_2500baseX_Full }, + { IXGBE_LINK_SPEED_10GB_FULL, SUPPORTED_10000baseT_Full }, +}; + +static const struct { + u32 lp_advertised; + u32 mac_speed; +} ixgbe_lp_map[] = { + { FW_PHY_ACT_UD_2_100M_TX_EEE, SUPPORTED_100baseT_Full }, + { FW_PHY_ACT_UD_2_1G_T_EEE, SUPPORTED_1000baseT_Full }, + { FW_PHY_ACT_UD_2_10G_T_EEE, SUPPORTED_10000baseT_Full }, + { FW_PHY_ACT_UD_2_1G_KX_EEE, SUPPORTED_1000baseKX_Full }, + { FW_PHY_ACT_UD_2_10G_KX4_EEE, SUPPORTED_10000baseKX4_Full }, + { FW_PHY_ACT_UD_2_10G_KR_EEE, SUPPORTED_10000baseKR_Full}, +}; + +static int +ixgbe_get_eee_fw(struct ixgbe_adapter *adapter, struct ethtool_eee *edata) +{ + u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 }; + struct ixgbe_hw *hw = &adapter->hw; + s32 rc; + u16 i; + + rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_UD_2, &info); + if (rc) + return rc; + + edata->lp_advertised = 0; + for (i = 0; i < ARRAY_SIZE(ixgbe_lp_map); ++i) { + if (info[0] & ixgbe_lp_map[i].lp_advertised) + edata->lp_advertised |= ixgbe_lp_map[i].mac_speed; + } + + edata->supported = 0; + for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) { + if (hw->phy.eee_speeds_supported & ixgbe_ls_map[i].mac_speed) + edata->supported |= ixgbe_ls_map[i].supported; + } + + edata->advertised = 0; + for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) { + if (hw->phy.eee_speeds_advertised & ixgbe_ls_map[i].mac_speed) + edata->advertised |= ixgbe_ls_map[i].supported; + } + + edata->eee_enabled = !!edata->advertised; + edata->tx_lpi_enabled = edata->eee_enabled; + if (edata->advertised & edata->lp_advertised) + edata->eee_active = true; + + return 0; +} + +static int ixgbe_get_eee(struct net_device *netdev, struct ethtool_eee *edata) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + + if (!(adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE)) + return -EOPNOTSUPP; + + if (hw->phy.eee_speeds_supported && hw->phy.type == ixgbe_phy_fw) + return ixgbe_get_eee_fw(adapter, edata); + + return -EOPNOTSUPP; +} + +static int ixgbe_set_eee(struct net_device *netdev, struct ethtool_eee *edata) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + struct ethtool_eee eee_data; + s32 ret_val; + + if (!(adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE)) + return -EOPNOTSUPP; + + memset(&eee_data, 0, sizeof(struct ethtool_eee)); + + ret_val = ixgbe_get_eee(netdev, &eee_data); + if (ret_val) + return ret_val; + + if (eee_data.eee_enabled && !edata->eee_enabled) { + if (eee_data.tx_lpi_enabled != edata->tx_lpi_enabled) { + e_err(drv, "Setting EEE tx-lpi is not supported\n"); + return -EINVAL; + } + + if (eee_data.tx_lpi_timer != edata->tx_lpi_timer) { + e_err(drv, + "Setting EEE Tx LPI timer is not supported\n"); + return -EINVAL; + } + + if (eee_data.advertised != edata->advertised) { + e_err(drv, + "Setting EEE advertised speeds is not supported\n"); + return -EINVAL; + } + } + + if (eee_data.eee_enabled != edata->eee_enabled) { + if (edata->eee_enabled) { + adapter->flags2 |= IXGBE_FLAG2_EEE_ENABLED; + hw->phy.eee_speeds_advertised = + hw->phy.eee_speeds_supported; + } else { + adapter->flags2 &= ~IXGBE_FLAG2_EEE_ENABLED; + hw->phy.eee_speeds_advertised = 0; + } + + /* reset link */ + if (netif_running(netdev)) + ixgbe_reinit_locked(adapter); + else + ixgbe_reset(adapter); + } + + return 0; +} + +static u32 ixgbe_get_priv_flags(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + u32 priv_flags = 0; + + if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY) + priv_flags |= IXGBE_PRIV_FLAGS_LEGACY_RX; + + return priv_flags; +} + +static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + unsigned int flags2 = adapter->flags2; + + flags2 &= ~IXGBE_FLAG2_RX_LEGACY; + if (priv_flags & IXGBE_PRIV_FLAGS_LEGACY_RX) + flags2 |= IXGBE_FLAG2_RX_LEGACY; + + if (flags2 != adapter->flags2) { + adapter->flags2 = flags2; + + /* reset interface to repopulate queues */ + if (netif_running(netdev)) + ixgbe_reinit_locked(adapter); + } + + return 0; +} + static const struct ethtool_ops ixgbe_ethtool_ops = { .get_settings = ixgbe_get_settings, .set_settings = ixgbe_set_settings, @@ -3269,8 +3433,12 @@ static const struct ethtool_ops ixgbe_ethtool_ops = { .get_rxfh_key_size = ixgbe_get_rxfh_key_size, .get_rxfh = ixgbe_get_rxfh, .set_rxfh = ixgbe_set_rxfh, + .get_eee = ixgbe_get_eee, + .set_eee = ixgbe_set_eee, .get_channels = ixgbe_get_channels, .set_channels = ixgbe_set_channels, + .get_priv_flags = ixgbe_get_priv_flags, + .set_priv_flags = ixgbe_set_priv_flags, .get_ts_info = ixgbe_get_ts_info, .get_module_info = ixgbe_get_module_info, .get_module_eeprom = ixgbe_get_module_eeprom, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index 15ab337fd7ad..1b8be7d813bd 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -308,6 +308,7 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) ixgbe_cache_ring_rss(adapter); } +#define IXGBE_RSS_64Q_MASK 0x3F #define IXGBE_RSS_16Q_MASK 0xF #define IXGBE_RSS_8Q_MASK 0x7 #define IXGBE_RSS_4Q_MASK 0x3 @@ -604,6 +605,7 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) **/ static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) { + struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_ring_feature *f; u16 rss_i; @@ -612,7 +614,11 @@ static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) rss_i = f->limit; f->indices = rss_i; - f->mask = IXGBE_RSS_16Q_MASK; + + if (hw->mac.type < ixgbe_mac_X550) + f->mask = IXGBE_RSS_16Q_MASK; + else + f->mask = IXGBE_RSS_64Q_MASK; /* disable ATR by default, it will be configured below */ adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; @@ -847,11 +853,6 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, netif_napi_add(adapter->netdev, &q_vector->napi, ixgbe_poll, 64); -#ifdef CONFIG_NET_RX_BUSY_POLL - /* initialize busy poll */ - atomic_set(&q_vector->state, IXGBE_QV_STATE_DISABLE); - -#endif /* tie q_vector and adapter together */ adapter->q_vector[v_idx] = q_vector; q_vector->adapter = adapter; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 1e2f39ebd824..a7a430a7be2c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -72,7 +72,7 @@ char ixgbe_default_device_descr[] = static char ixgbe_default_device_descr[] = "Intel(R) 10 Gigabit Network Connection"; #endif -#define DRV_VERSION "4.4.0-k" +#define DRV_VERSION "5.0.0-k" const char ixgbe_driver_version[] = DRV_VERSION; static const char ixgbe_copyright[] = "Copyright (c) 1999-2016 Intel Corporation."; @@ -86,6 +86,7 @@ static const struct ixgbe_info *ixgbe_info_tbl[] = { [board_X550] = &ixgbe_X550_info, [board_X550EM_x] = &ixgbe_X550EM_x_info, [board_x550em_a] = &ixgbe_x550em_a_info, + [board_x550em_a_fw] = &ixgbe_x550em_a_fw_info, }; /* ixgbe_pci_tbl - PCI Device ID Table @@ -140,6 +141,8 @@ static const struct pci_device_id ixgbe_pci_tbl[] = { {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII_L), board_x550em_a }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_10G_T), board_x550em_a}, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP), board_x550em_a }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T), board_x550em_a_fw }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T_L), board_x550em_a_fw }, /* required last entry */ {0, } }; @@ -180,6 +183,7 @@ MODULE_VERSION(DRV_VERSION); static struct workqueue_struct *ixgbe_wq; static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev); +static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *); static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter, u32 reg, u16 *value) @@ -607,12 +611,11 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter) if (netdev) { dev_info(&adapter->pdev->dev, "Net device Info\n"); pr_info("Device Name state " - "trans_start last_rx\n"); - pr_info("%-15s %016lX %016lX %016lX\n", + "trans_start\n"); + pr_info("%-15s %016lX %016lX\n", netdev->name, netdev->state, - dev_trans_start(netdev), - netdev->last_rx); + dev_trans_start(netdev)); } /* Print Registers */ @@ -942,28 +945,6 @@ static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, } } -void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *ring, - struct ixgbe_tx_buffer *tx_buffer) -{ - if (tx_buffer->skb) { - dev_kfree_skb_any(tx_buffer->skb); - if (dma_unmap_len(tx_buffer, len)) - dma_unmap_single(ring->dev, - dma_unmap_addr(tx_buffer, dma), - dma_unmap_len(tx_buffer, len), - DMA_TO_DEVICE); - } else if (dma_unmap_len(tx_buffer, len)) { - dma_unmap_page(ring->dev, - dma_unmap_addr(tx_buffer, dma), - dma_unmap_len(tx_buffer, len), - DMA_TO_DEVICE); - } - tx_buffer->next_to_watch = NULL; - tx_buffer->skb = NULL; - dma_unmap_len_set(tx_buffer, len, 0); - /* tx_buffer must be completely set up in the transmit path */ -} - static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; @@ -1195,7 +1176,6 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, DMA_TO_DEVICE); /* clear tx_buffer data */ - tx_buffer->skb = NULL; dma_unmap_len_set(tx_buffer, len, 0); /* unmap remaining buffers */ @@ -1549,6 +1529,11 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring, } } +static inline unsigned int ixgbe_rx_offset(struct ixgbe_ring *rx_ring) +{ + return ring_uses_build_skb(rx_ring) ? IXGBE_SKB_PAD : 0; +} + static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring, struct ixgbe_rx_buffer *bi) { @@ -1567,8 +1552,10 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring, } /* map page for use */ - dma = dma_map_page(rx_ring->dev, page, 0, - ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE); + dma = dma_map_page_attrs(rx_ring->dev, page, 0, + ixgbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + IXGBE_RX_DMA_ATTR); /* * if mapping failed free memory back to system since @@ -1583,7 +1570,8 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring, bi->dma = dma; bi->page = page; - bi->page_offset = 0; + bi->page_offset = ixgbe_rx_offset(rx_ring); + bi->pagecnt_bias = 1; return true; } @@ -1598,6 +1586,7 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count) union ixgbe_adv_rx_desc *rx_desc; struct ixgbe_rx_buffer *bi; u16 i = rx_ring->next_to_use; + u16 bufsz; /* nothing to do */ if (!cleaned_count) @@ -1607,10 +1596,17 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count) bi = &rx_ring->rx_buffer_info[i]; i -= rx_ring->count; + bufsz = ixgbe_rx_bufsz(rx_ring); + do { if (!ixgbe_alloc_mapped_page(rx_ring, bi)) break; + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, bi->dma, + bi->page_offset, bufsz, + DMA_FROM_DEVICE); + /* * Refresh the desc even if buffer_addrs didn't change * because each write-back erases this info. @@ -1626,8 +1622,8 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count) i -= rx_ring->count; } - /* clear the status bits for the next_to_use descriptor */ - rx_desc->wb.upper.status_error = 0; + /* clear the length for the next_to_use descriptor */ + rx_desc->wb.upper.length = 0; cleaned_count--; } while (cleaned_count); @@ -1717,11 +1713,7 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector, struct sk_buff *skb) { - skb_mark_napi_id(skb, &q_vector->napi); - if (ixgbe_qv_busy_polling(q_vector)) - netif_receive_skb(skb); - else - napi_gro_receive(&q_vector->napi, skb); + napi_gro_receive(&q_vector->napi, skb); } /** @@ -1833,19 +1825,19 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring, { /* if the page was released unmap it, else just sync our portion */ if (unlikely(IXGBE_CB(skb)->page_released)) { - dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma, - ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE); - IXGBE_CB(skb)->page_released = false; + dma_unmap_page_attrs(rx_ring->dev, IXGBE_CB(skb)->dma, + ixgbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + IXGBE_RX_DMA_ATTR); } else { struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; dma_sync_single_range_for_cpu(rx_ring->dev, IXGBE_CB(skb)->dma, frag->page_offset, - ixgbe_rx_bufsz(rx_ring), + skb_frag_size(frag), DMA_FROM_DEVICE); } - IXGBE_CB(skb)->dma = 0; } /** @@ -1881,7 +1873,7 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring, } /* place header in linear portion of buffer */ - if (skb_is_nonlinear(skb)) + if (!skb_headlen(skb)) ixgbe_pull_tail(rx_ring, skb); #ifdef IXGBE_FCOE @@ -1916,14 +1908,14 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring, nta++; rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; - /* transfer page from old buffer to new buffer */ - *new_buff = *old_buff; - - /* sync the buffer for use by the device */ - dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma, - new_buff->page_offset, - ixgbe_rx_bufsz(rx_ring), - DMA_FROM_DEVICE); + /* Transfer page from old buffer to new buffer. + * Move each member individually to avoid possible store + * forwarding stalls and unnecessary copy of skb. + */ + new_buff->dma = old_buff->dma; + new_buff->page = old_buff->page; + new_buff->page_offset = old_buff->page_offset; + new_buff->pagecnt_bias = old_buff->pagecnt_bias; } static inline bool ixgbe_page_is_reserved(struct page *page) @@ -1931,6 +1923,43 @@ static inline bool ixgbe_page_is_reserved(struct page *page) return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); } +static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer) +{ + unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; + struct page *page = rx_buffer->page; + + /* avoid re-using remote pages */ + if (unlikely(ixgbe_page_is_reserved(page))) + return false; + +#if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ + if (unlikely((page_ref_count(page) - pagecnt_bias) > 1)) + return false; +#else + /* The last offset is a bit aggressive in that we assume the + * worst case of FCoE being enabled and using a 3K buffer. + * However this should have minimal impact as the 1K extra is + * still less than one buffer in size. + */ +#define IXGBE_LAST_OFFSET \ + (SKB_WITH_OVERHEAD(PAGE_SIZE) - IXGBE_RXBUFFER_3K) + if (rx_buffer->page_offset > IXGBE_LAST_OFFSET) + return false; +#endif + + /* If we have drained the page fragment pool we need to update + * the pagecnt_bias and page count so that we fully restock the + * number of references the driver holds. + */ + if (unlikely(!pagecnt_bias)) { + page_ref_add(page, USHRT_MAX); + rx_buffer->pagecnt_bias = USHRT_MAX; + } + + return true; +} + /** * ixgbe_add_rx_frag - Add contents of Rx buffer to sk_buff * @rx_ring: rx descriptor ring to transact packets on @@ -1946,144 +1975,172 @@ static inline bool ixgbe_page_is_reserved(struct page *page) * The function will then update the page offset if necessary and return * true if the buffer can be reused by the adapter. **/ -static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, +static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, struct ixgbe_rx_buffer *rx_buffer, - union ixgbe_adv_rx_desc *rx_desc, - struct sk_buff *skb) + struct sk_buff *skb, + unsigned int size) { - struct page *page = rx_buffer->page; - unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); #if (PAGE_SIZE < 8192) - unsigned int truesize = ixgbe_rx_bufsz(rx_ring); + unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2; #else - unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); - unsigned int last_offset = ixgbe_rx_pg_size(rx_ring) - - ixgbe_rx_bufsz(rx_ring); + unsigned int truesize = ring_uses_build_skb(rx_ring) ? + SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) : + SKB_DATA_ALIGN(size); #endif - - if ((size <= IXGBE_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) { - unsigned char *va = page_address(page) + rx_buffer->page_offset; - - memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); - - /* page is not reserved, we can reuse buffer as-is */ - if (likely(!ixgbe_page_is_reserved(page))) - return true; - - /* this page cannot be reused so discard it */ - __free_pages(page, ixgbe_rx_pg_order(rx_ring)); - return false; - } - - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, rx_buffer->page_offset, size, truesize); - - /* avoid re-using remote pages */ - if (unlikely(ixgbe_page_is_reserved(page))) - return false; - #if (PAGE_SIZE < 8192) - /* if we are only owner of page we can reuse it */ - if (unlikely(page_count(page) != 1)) - return false; - - /* flip page offset to other buffer */ rx_buffer->page_offset ^= truesize; #else - /* move offset up to the next cache line */ rx_buffer->page_offset += truesize; - - if (rx_buffer->page_offset > last_offset) - return false; #endif - - /* Even if we own the page, we are not allowed to use atomic_set() - * This would break get_page_unless_zero() users. - */ - page_ref_inc(page); - - return true; } -static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring, - union ixgbe_adv_rx_desc *rx_desc) +static struct ixgbe_rx_buffer *ixgbe_get_rx_buffer(struct ixgbe_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff **skb, + const unsigned int size) { struct ixgbe_rx_buffer *rx_buffer; - struct sk_buff *skb; - struct page *page; rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; - page = rx_buffer->page; - prefetchw(page); + prefetchw(rx_buffer->page); + *skb = rx_buffer->skb; - skb = rx_buffer->skb; + /* Delay unmapping of the first packet. It carries the header + * information, HW may still access the header after the writeback. + * Only unmap it when EOP is reached + */ + if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)) { + if (!*skb) + goto skip_sync; + } else { + if (*skb) + ixgbe_dma_sync_frag(rx_ring, *skb); + } - if (likely(!skb)) { - void *page_addr = page_address(page) + - rx_buffer->page_offset; + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->dma, + rx_buffer->page_offset, + size, + DMA_FROM_DEVICE); +skip_sync: + rx_buffer->pagecnt_bias--; - /* prefetch first cache line of first page */ - prefetch(page_addr); -#if L1_CACHE_BYTES < 128 - prefetch(page_addr + L1_CACHE_BYTES); -#endif + return rx_buffer; +} - /* allocate a skb to store the frags */ - skb = napi_alloc_skb(&rx_ring->q_vector->napi, - IXGBE_RX_HDR_SIZE); - if (unlikely(!skb)) { - rx_ring->rx_stats.alloc_rx_buff_failed++; - return NULL; +static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring, + struct ixgbe_rx_buffer *rx_buffer, + struct sk_buff *skb) +{ + if (ixgbe_can_reuse_rx_page(rx_buffer)) { + /* hand second half of page back to the ring */ + ixgbe_reuse_rx_page(rx_ring, rx_buffer); + } else { + if (IXGBE_CB(skb)->dma == rx_buffer->dma) { + /* the page has been released from the ring */ + IXGBE_CB(skb)->page_released = true; + } else { + /* we are not reusing the buffer so unmap it */ + dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, + ixgbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + IXGBE_RX_DMA_ATTR); } + __page_frag_cache_drain(rx_buffer->page, + rx_buffer->pagecnt_bias); + } - /* - * we will be copying header into skb->data in - * pskb_may_pull so it is in our interest to prefetch - * it now to avoid a possible cache miss - */ - prefetchw(skb->data); + /* clear contents of rx_buffer */ + rx_buffer->page = NULL; + rx_buffer->skb = NULL; +} - /* - * Delay unmapping of the first packet. It carries the - * header information, HW may still access the header - * after the writeback. Only unmap it when EOP is - * reached - */ - if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))) - goto dma_sync; +static struct sk_buff *ixgbe_construct_skb(struct ixgbe_ring *rx_ring, + struct ixgbe_rx_buffer *rx_buffer, + union ixgbe_adv_rx_desc *rx_desc, + unsigned int size) +{ + void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; +#if (PAGE_SIZE < 8192) + unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = SKB_DATA_ALIGN(size); +#endif + struct sk_buff *skb; - IXGBE_CB(skb)->dma = rx_buffer->dma; - } else { - if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)) - ixgbe_dma_sync_frag(rx_ring, skb); + /* prefetch first cache line of first page */ + prefetch(va); +#if L1_CACHE_BYTES < 128 + prefetch(va + L1_CACHE_BYTES); +#endif -dma_sync: - /* we are reusing so sync this buffer for CPU use */ - dma_sync_single_range_for_cpu(rx_ring->dev, - rx_buffer->dma, - rx_buffer->page_offset, - ixgbe_rx_bufsz(rx_ring), - DMA_FROM_DEVICE); + /* allocate a skb to store the frags */ + skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBE_RX_HDR_SIZE); + if (unlikely(!skb)) + return NULL; - rx_buffer->skb = NULL; - } + if (size > IXGBE_RX_HDR_SIZE) { + if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)) + IXGBE_CB(skb)->dma = rx_buffer->dma; - /* pull page into skb */ - if (ixgbe_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) { - /* hand second half of page back to the ring */ - ixgbe_reuse_rx_page(rx_ring, rx_buffer); - } else if (IXGBE_CB(skb)->dma == rx_buffer->dma) { - /* the page has been released from the ring */ - IXGBE_CB(skb)->page_released = true; + skb_add_rx_frag(skb, 0, rx_buffer->page, + rx_buffer->page_offset, + size, truesize); +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif } else { - /* we are not reusing the buffer so unmap it */ - dma_unmap_page(rx_ring->dev, rx_buffer->dma, - ixgbe_rx_pg_size(rx_ring), - DMA_FROM_DEVICE); + memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); + rx_buffer->pagecnt_bias++; } - /* clear contents of buffer_info */ - rx_buffer->page = NULL; + return skb; +} + +static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring, + struct ixgbe_rx_buffer *rx_buffer, + union ixgbe_adv_rx_desc *rx_desc, + unsigned int size) +{ + void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; +#if (PAGE_SIZE < 8192) + unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + + SKB_DATA_ALIGN(IXGBE_SKB_PAD + size); +#endif + struct sk_buff *skb; + + /* prefetch first cache line of first page */ + prefetch(va); +#if L1_CACHE_BYTES < 128 + prefetch(va + L1_CACHE_BYTES); +#endif + + /* build an skb to around the page buffer */ + skb = build_skb(va - IXGBE_SKB_PAD, truesize); + if (unlikely(!skb)) + return NULL; + + /* update pointers within the skb to store the data */ + skb_reserve(skb, IXGBE_SKB_PAD); + __skb_put(skb, size); + + /* record DMA address if this is the start of a chain of buffers */ + if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)) + IXGBE_CB(skb)->dma = rx_buffer->dma; + + /* update buffer offset */ +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif return skb; } @@ -2115,7 +2172,9 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, while (likely(total_rx_packets < budget)) { union ixgbe_adv_rx_desc *rx_desc; + struct ixgbe_rx_buffer *rx_buffer; struct sk_buff *skb; + unsigned int size; /* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) { @@ -2124,8 +2183,8 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, } rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean); - - if (!rx_desc->wb.upper.status_error) + size = le16_to_cpu(rx_desc->wb.upper.length); + if (!size) break; /* This memory barrier is needed to keep us from reading @@ -2134,13 +2193,26 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, */ dma_rmb(); + rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size); + /* retrieve a buffer from the ring */ - skb = ixgbe_fetch_rx_buffer(rx_ring, rx_desc); + if (skb) + ixgbe_add_rx_frag(rx_ring, rx_buffer, skb, size); + else if (ring_uses_build_skb(rx_ring)) + skb = ixgbe_build_skb(rx_ring, rx_buffer, + rx_desc, size); + else + skb = ixgbe_construct_skb(rx_ring, rx_buffer, + rx_desc, size); /* exit if we failed to retrieve a buffer */ - if (!skb) + if (!skb) { + rx_ring->rx_stats.alloc_rx_buff_failed++; + rx_buffer->pagecnt_bias++; break; + } + ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb); cleaned_count++; /* place incomplete frames back on ring for completion */ @@ -2198,40 +2270,6 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, return total_rx_packets; } -#ifdef CONFIG_NET_RX_BUSY_POLL -/* must be called with local_bh_disable()d */ -static int ixgbe_low_latency_recv(struct napi_struct *napi) -{ - struct ixgbe_q_vector *q_vector = - container_of(napi, struct ixgbe_q_vector, napi); - struct ixgbe_adapter *adapter = q_vector->adapter; - struct ixgbe_ring *ring; - int found = 0; - - if (test_bit(__IXGBE_DOWN, &adapter->state)) - return LL_FLUSH_FAILED; - - if (!ixgbe_qv_lock_poll(q_vector)) - return LL_FLUSH_BUSY; - - ixgbe_for_each_ring(ring, q_vector->rx) { - found = ixgbe_clean_rx_irq(q_vector, ring, 4); -#ifdef BP_EXTENDED_STATS - if (found) - ring->stats.cleaned += found; - else - ring->stats.misses++; -#endif - if (found) - break; - } - - ixgbe_qv_unlock_poll(q_vector); - - return found; -} -#endif /* CONFIG_NET_RX_BUSY_POLL */ - /** * ixgbe_configure_msix - Configure MSI-X hardware * @adapter: board private structure @@ -2447,6 +2485,7 @@ static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 eicr = adapter->interrupt_event; + s32 rc; if (test_bit(__IXGBE_DOWN, &adapter->state)) return; @@ -2485,6 +2524,12 @@ static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter) return; break; + case IXGBE_DEV_ID_X550EM_A_1G_T: + case IXGBE_DEV_ID_X550EM_A_1G_T_L: + rc = hw->phy.ops.check_overtemp(hw); + if (rc != IXGBE_ERR_OVERTEMP) + return; + break; default: if (adapter->hw.mac.type >= ixgbe_mac_X540) return; @@ -2531,6 +2576,18 @@ static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr) return; } return; + case ixgbe_mac_x550em_a: + if (eicr & IXGBE_EICR_GPI_SDP0_X550EM_a) { + adapter->interrupt_event = eicr; + adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT; + ixgbe_service_event_schedule(adapter); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, + IXGBE_EICR_GPI_SDP0_X550EM_a); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICR, + IXGBE_EICR_GPI_SDP0_X550EM_a); + } + return; + case ixgbe_mac_X550: case ixgbe_mac_X540: if (!(eicr & IXGBE_EICR_TS)) return; @@ -2856,8 +2913,8 @@ int ixgbe_poll(struct napi_struct *napi, int budget) clean_complete = false; } - /* Exit if we are called by netpoll or busy polling is active */ - if ((budget <= 0) || !ixgbe_qv_lock_napi(q_vector)) + /* Exit if we are called by netpoll */ + if (budget <= 0) return budget; /* attempt to distribute budget to each queue fairly, but don't allow @@ -2876,7 +2933,6 @@ int ixgbe_poll(struct napi_struct *napi, int budget) clean_complete = false; } - ixgbe_qv_unlock_napi(q_vector); /* If all work not completed, return budget and keep polling */ if (!clean_complete) return budget; @@ -3214,6 +3270,10 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state); + /* reinitialize tx_buffer_info */ + memset(ring->tx_buffer_info, 0, + sizeof(struct ixgbe_tx_buffer) * ring->count); + /* enable queue */ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl); @@ -3384,7 +3444,10 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT; /* configure the packet buffer length */ - srrctl |= ixgbe_rx_bufsz(rx_ring) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; + if (test_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state)) + srrctl |= IXGBE_RXBUFFER_3K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; + else + srrctl |= IXGBE_RXBUFFER_2K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; /* configure descriptor type */ srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; @@ -3411,6 +3474,21 @@ u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter) } /** + * ixgbe_store_key - Write the RSS key to HW + * @adapter: device handle + * + * Write the RSS key stored in adapter.rss_key to HW. + */ +void ixgbe_store_key(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + int i; + + for (i = 0; i < 10; i++) + IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), adapter->rss_key[i]); +} + +/** * ixgbe_store_reta - Write the RETA table to HW * @adapter: device handle * @@ -3475,7 +3553,6 @@ static void ixgbe_store_vfreta(struct ixgbe_adapter *adapter) static void ixgbe_setup_reta(struct ixgbe_adapter *adapter) { - struct ixgbe_hw *hw = &adapter->hw; u32 i, j; u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter); u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; @@ -3488,8 +3565,7 @@ static void ixgbe_setup_reta(struct ixgbe_adapter *adapter) rss_i = 4; /* Fill out hash function seeds */ - for (i = 0; i < 10; i++) - IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), adapter->rss_key[i]); + ixgbe_store_key(adapter); /* Fill out redirection table */ memset(adapter->rss_indir_tbl, 0, sizeof(adapter->rss_indir_tbl)); @@ -3685,6 +3761,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, struct ixgbe_ring *ring) { struct ixgbe_hw *hw = &adapter->hw; + union ixgbe_adv_rx_desc *rx_desc; u64 rdba = ring->dma; u32 rxdctl; u8 reg_idx = ring->reg_idx; @@ -3717,8 +3794,27 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, */ rxdctl &= ~0x3FFFFF; rxdctl |= 0x080420; +#if (PAGE_SIZE < 8192) + } else { + rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK | + IXGBE_RXDCTL_RLPML_EN); + + /* Limit the maximum frame size so we don't overrun the skb */ + if (ring_uses_build_skb(ring) && + !test_bit(__IXGBE_RX_3K_BUFFER, &ring->state)) + rxdctl |= IXGBE_MAX_FRAME_BUILD_SKB | + IXGBE_RXDCTL_RLPML_EN; +#endif } + /* initialize rx_buffer_info */ + memset(ring->rx_buffer_info, 0, + sizeof(struct ixgbe_rx_buffer) * ring->count); + + /* initialize Rx descriptor 0 */ + rx_desc = IXGBE_RX_DESC(ring, 0); + rx_desc->wb.upper.length = 0; + /* enable receive descriptor ring */ rxdctl |= IXGBE_RXDCTL_ENABLE; IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); @@ -3855,10 +3951,31 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) */ for (i = 0; i < adapter->num_rx_queues; i++) { rx_ring = adapter->rx_ring[i]; + + clear_ring_rsc_enabled(rx_ring); + clear_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); + clear_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state); + if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) set_ring_rsc_enabled(rx_ring); - else - clear_ring_rsc_enabled(rx_ring); + + if (test_bit(__IXGBE_RX_FCOE, &rx_ring->state)) + set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); + + clear_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state); + if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY) + continue; + + set_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state); + +#if (PAGE_SIZE < 8192) + if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) + set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); + + if ((max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) || + (max_frame > IXGBE_MAX_FRAME_BUILD_SKB)) + set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); +#endif } } @@ -4559,23 +4676,16 @@ static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) { int q_idx; - for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) { - ixgbe_qv_init_lock(adapter->q_vector[q_idx]); + for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) napi_enable(&adapter->q_vector[q_idx]->napi); - } } static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter) { int q_idx; - for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) { + for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) napi_disable(&adapter->q_vector[q_idx]->napi); - while (!ixgbe_qv_disable(adapter->q_vector[q_idx])) { - pr_info("QV %d locked\n", q_idx); - usleep_range(1000, 20000); - } - } } static void ixgbe_clear_udp_tunnel_port(struct ixgbe_adapter *adapter, u32 mask) @@ -4879,45 +4989,47 @@ static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter) **/ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) { - struct device *dev = rx_ring->dev; - unsigned long size; - u16 i; - - /* ring already cleared, nothing to do */ - if (!rx_ring->rx_buffer_info) - return; + u16 i = rx_ring->next_to_clean; + struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i]; /* Free all the Rx ring sk_buffs */ - for (i = 0; i < rx_ring->count; i++) { - struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i]; - + while (i != rx_ring->next_to_alloc) { if (rx_buffer->skb) { struct sk_buff *skb = rx_buffer->skb; if (IXGBE_CB(skb)->page_released) - dma_unmap_page(dev, - IXGBE_CB(skb)->dma, - ixgbe_rx_bufsz(rx_ring), - DMA_FROM_DEVICE); + dma_unmap_page_attrs(rx_ring->dev, + IXGBE_CB(skb)->dma, + ixgbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + IXGBE_RX_DMA_ATTR); dev_kfree_skb(skb); - rx_buffer->skb = NULL; } - if (!rx_buffer->page) - continue; + /* Invalidate cache lines that may have been written to by + * device so that we avoid corrupting memory. + */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->dma, + rx_buffer->page_offset, + ixgbe_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); - dma_unmap_page(dev, rx_buffer->dma, - ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE); - __free_pages(rx_buffer->page, ixgbe_rx_pg_order(rx_ring)); + /* free resources associated with mapping */ + dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, + ixgbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + IXGBE_RX_DMA_ATTR); + __page_frag_cache_drain(rx_buffer->page, + rx_buffer->pagecnt_bias); - rx_buffer->page = NULL; + i++; + rx_buffer++; + if (i == rx_ring->count) { + i = 0; + rx_buffer = rx_ring->rx_buffer_info; + } } - size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; - memset(rx_ring->rx_buffer_info, 0, size); - - /* Zero out the descriptor ring */ - memset(rx_ring->desc, 0, rx_ring->size); - rx_ring->next_to_alloc = 0; rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; @@ -5294,6 +5406,8 @@ void ixgbe_reinit_locked(struct ixgbe_adapter *adapter) while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) usleep_range(1000, 2000); + if (adapter->hw.phy.type == ixgbe_phy_fw) + ixgbe_watchdog_link_is_down(adapter); ixgbe_down(adapter); /* * If SR-IOV enabled then wait a bit before bringing the adapter @@ -5384,28 +5498,57 @@ void ixgbe_reset(struct ixgbe_adapter *adapter) **/ static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring) { - struct ixgbe_tx_buffer *tx_buffer_info; - unsigned long size; - u16 i; + u16 i = tx_ring->next_to_clean; + struct ixgbe_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; - /* ring already cleared, nothing to do */ - if (!tx_ring->tx_buffer_info) - return; + while (i != tx_ring->next_to_use) { + union ixgbe_adv_tx_desc *eop_desc, *tx_desc; - /* Free all the Tx ring sk_buffs */ - for (i = 0; i < tx_ring->count; i++) { - tx_buffer_info = &tx_ring->tx_buffer_info[i]; - ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); - } + /* Free all the Tx ring sk_buffs */ + dev_kfree_skb_any(tx_buffer->skb); - netdev_tx_reset_queue(txring_txq(tx_ring)); + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); - size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; - memset(tx_ring->tx_buffer_info, 0, size); + /* check for eop_desc to determine the end of the packet */ + eop_desc = tx_buffer->next_to_watch; + tx_desc = IXGBE_TX_DESC(tx_ring, i); - /* Zero out the descriptor ring */ - memset(tx_ring->desc, 0, tx_ring->size); + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + i++; + if (unlikely(i == tx_ring->count)) { + i = 0; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = IXGBE_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + i++; + if (unlikely(i == tx_ring->count)) { + i = 0; + tx_buffer = tx_ring->tx_buffer_info; + } + } + + /* reset BQL for queue */ + netdev_tx_reset_queue(txring_txq(tx_ring)); + + /* reset next_to_use and next_to_clean */ tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; } @@ -5554,6 +5697,31 @@ void ixgbe_down(struct ixgbe_adapter *adapter) } /** + * ixgbe_eee_capable - helper function to determine EEE support on X550 + * @adapter: board private structure + */ +static void ixgbe_set_eee_capable(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + + switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_A_1G_T: + case IXGBE_DEV_ID_X550EM_A_1G_T_L: + if (!hw->phy.eee_speeds_supported) + break; + adapter->flags2 |= IXGBE_FLAG2_EEE_CAPABLE; + if (!hw->phy.eee_speeds_advertised) + break; + adapter->flags2 |= IXGBE_FLAG2_EEE_ENABLED; + break; + default: + adapter->flags2 &= ~IXGBE_FLAG2_EEE_CAPABLE; + adapter->flags2 &= ~IXGBE_FLAG2_EEE_ENABLED; + break; + } +} + +/** * ixgbe_tx_timeout - Respond to a Tx Hang * @netdev: network interface device structure **/ @@ -5717,6 +5885,14 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter, break; case ixgbe_mac_x550em_a: adapter->flags |= IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE; + switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_A_1G_T: + case IXGBE_DEV_ID_X550EM_A_1G_T_L: + adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; + break; + default: + break; + } /* fall through */ case ixgbe_mac_X550EM_x: #ifdef CONFIG_IXGBE_DCB @@ -5730,6 +5906,8 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter, #endif /* IXGBE_FCOE */ /* Fall Through */ case ixgbe_mac_X550: + if (hw->mac.type == ixgbe_mac_X550) + adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; #ifdef CONFIG_IXGBE_DCA adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE; #endif @@ -5816,9 +5994,9 @@ int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring) if (tx_ring->q_vector) ring_node = tx_ring->q_vector->numa_node; - tx_ring->tx_buffer_info = vzalloc_node(size, ring_node); + tx_ring->tx_buffer_info = vmalloc_node(size, ring_node); if (!tx_ring->tx_buffer_info) - tx_ring->tx_buffer_info = vzalloc(size); + tx_ring->tx_buffer_info = vmalloc(size); if (!tx_ring->tx_buffer_info) goto err; @@ -5900,9 +6078,9 @@ int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring) if (rx_ring->q_vector) ring_node = rx_ring->q_vector->numa_node; - rx_ring->rx_buffer_info = vzalloc_node(size, ring_node); + rx_ring->rx_buffer_info = vmalloc_node(size, ring_node); if (!rx_ring->rx_buffer_info) - rx_ring->rx_buffer_info = vzalloc(size); + rx_ring->rx_buffer_info = vmalloc(size); if (!rx_ring->rx_buffer_info) goto err; @@ -6200,7 +6378,8 @@ int ixgbe_close(struct net_device *netdev) ixgbe_ptp_stop(adapter); - ixgbe_close_suspend(adapter); + if (netif_device_present(netdev)) + ixgbe_close_suspend(adapter); ixgbe_fdir_filter_exit(adapter); @@ -6245,14 +6424,12 @@ static int ixgbe_resume(struct pci_dev *pdev) if (!err && netif_running(netdev)) err = ixgbe_open(netdev); - rtnl_unlock(); - - if (err) - return err; - netif_device_attach(netdev); + if (!err) + netif_device_attach(netdev); + rtnl_unlock(); - return 0; + return err; } #endif /* CONFIG_PM */ @@ -6267,14 +6444,14 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) int retval = 0; #endif + rtnl_lock(); netif_device_detach(netdev); - rtnl_lock(); if (netif_running(netdev)) ixgbe_close_suspend(adapter); - rtnl_unlock(); ixgbe_clear_interrupt_scheme(adapter); + rtnl_unlock(); #ifdef CONFIG_PM retval = pci_save_state(pdev); @@ -6808,6 +6985,9 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) case IXGBE_LINK_SPEED_100_FULL: speed_str = "100 Mbps"; break; + case IXGBE_LINK_SPEED_10_FULL: + speed_str = "10 Mbps"; + break; default: speed_str = "unknown speed"; break; @@ -7615,18 +7795,32 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, return; dma_error: dev_err(tx_ring->dev, "TX DMA map failed\n"); + tx_buffer = &tx_ring->tx_buffer_info[i]; /* clear dma mappings for failed tx_buffer_info map */ - for (;;) { + while (tx_buffer != first) { + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + + if (i--) + i += tx_ring->count; tx_buffer = &tx_ring->tx_buffer_info[i]; - ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer); - if (tx_buffer == first) - break; - if (i == 0) - i = tx_ring->count; - i--; } + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + + dev_kfree_skb_any(first->skb); + first->skb = NULL; + tx_ring->next_to_use = i; } @@ -8111,8 +8305,9 @@ static void ixgbe_netpoll(struct net_device *netdev) } #endif -static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev, - struct rtnl_link_stats64 *stats) + +static void ixgbe_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) { struct ixgbe_adapter *adapter = netdev_priv(netdev); int i; @@ -8150,13 +8345,13 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev, } } rcu_read_unlock(); + /* following stats updated by ixgbe_watchdog_task() */ stats->multicast = netdev->stats.multicast; stats->rx_errors = netdev->stats.rx_errors; stats->rx_length_errors = netdev->stats.rx_length_errors; stats->rx_crc_errors = netdev->stats.rx_crc_errors; stats->rx_missed_errors = netdev->stats.rx_missed_errors; - return stats; } #ifdef CONFIG_IXGBE_DCB @@ -9290,9 +9485,6 @@ static const struct net_device_ops ixgbe_netdev_ops = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = ixgbe_netpoll, #endif -#ifdef CONFIG_NET_RX_BUSY_POLL - .ndo_busy_poll = ixgbe_low_latency_recv, -#endif #ifdef IXGBE_FCOE .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get, .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target, @@ -9596,6 +9788,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw->phy.reset_if_overtemp = true; err = hw->mac.ops.reset_hw(hw); hw->phy.reset_if_overtemp = false; + ixgbe_set_eee_capable(adapter); if (err == IXGBE_ERR_SFP_NOT_PRESENT) { err = 0; } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { @@ -9673,7 +9866,7 @@ skip_sriov: #ifdef CONFIG_IXGBE_DCB if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE) - netdev->dcbnl_ops = &dcbnl_ops; + netdev->dcbnl_ops = &ixgbe_dcbnl_ops; #endif #ifdef IXGBE_FCOE @@ -9833,8 +10026,9 @@ skip_sriov: * since os does not support feature */ if (hw->mac.ops.set_fw_drv_ver) - hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF, - 0xFF); + hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF, 0xFF, + sizeof(ixgbe_driver_version) - 1, + ixgbe_driver_version); /* add san mac addr to netdev */ ixgbe_add_sanmac_netdev(netdev); @@ -10082,7 +10276,7 @@ skip_bad_vf_detection: } if (netif_running(netdev)) - ixgbe_down(adapter); + ixgbe_close_suspend(adapter); if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state)) pci_disable_device(pdev); @@ -10152,10 +10346,12 @@ static void ixgbe_io_resume(struct pci_dev *pdev) } #endif + rtnl_lock(); if (netif_running(netdev)) - ixgbe_up(adapter); + ixgbe_open(netdev); netif_device_attach(netdev); + rtnl_unlock(); } static const struct pci_error_handlers ixgbe_err_handler = { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h index 01c2667c0f92..811cb4f64a5b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h @@ -74,6 +74,7 @@ enum ixgbe_pfvf_api_rev { ixgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */ ixgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */ ixgbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */ + ixgbe_mbox_api_13, /* API version 1.3, linux/freebsd VF driver */ /* This value should always be last */ ixgbe_mbox_api_unknown, /* indicates that API version is not known */ }; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index 3b8362085f57..e55b2602f371 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -113,7 +113,7 @@ s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 *val, bool lock) { u32 swfw_mask = hw->phy.phy_semaphore_mask; - int max_retry = 10; + int max_retry = 3; int retry = 0; u8 csum_byte; u8 high_bits; @@ -452,10 +452,27 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw) */ for (i = 0; i < 30; i++) { msleep(100); - hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, &ctrl); - if (!(ctrl & MDIO_CTRL1_RESET)) { - udelay(2); - break; + if (hw->phy.type == ixgbe_phy_x550em_ext_t) { + status = hw->phy.ops.read_reg(hw, + IXGBE_MDIO_TX_VENDOR_ALARMS_3, + MDIO_MMD_PMAPMD, &ctrl); + if (status) + return status; + + if (ctrl & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) { + udelay(2); + break; + } + } else { + status = hw->phy.ops.read_reg(hw, MDIO_CTRL1, + MDIO_MMD_PHYXS, &ctrl); + if (status) + return status; + + if (!(ctrl & MDIO_CTRL1_RESET)) { + udelay(2); + break; + } } } @@ -751,9 +768,7 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete) { - - /* - * Clear autoneg_advertised and set new values based on input link + /* Clear autoneg_advertised and set new values based on input link * speed. */ hw->phy.autoneg_advertised = 0; @@ -761,12 +776,21 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, if (speed & IXGBE_LINK_SPEED_10GB_FULL) hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; + if (speed & IXGBE_LINK_SPEED_5GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_5GB_FULL; + + if (speed & IXGBE_LINK_SPEED_2_5GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_2_5GB_FULL; + if (speed & IXGBE_LINK_SPEED_1GB_FULL) hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; if (speed & IXGBE_LINK_SPEED_100_FULL) hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL; + if (speed & IXGBE_LINK_SPEED_10_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10_FULL; + /* Setup link based on the new speed settings */ hw->phy.ops.setup_link(hw); @@ -960,40 +984,6 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw) } /** - * ixgbe_get_phy_firmware_version_tnx - Gets the PHY Firmware Version - * @hw: pointer to hardware structure - * @firmware_version: pointer to the PHY Firmware Version - **/ -s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, - u16 *firmware_version) -{ - s32 status; - - status = hw->phy.ops.read_reg(hw, TNX_FW_REV, - MDIO_MMD_VEND1, - firmware_version); - - return status; -} - -/** - * ixgbe_get_phy_firmware_version_generic - Gets the PHY Firmware Version - * @hw: pointer to hardware structure - * @firmware_version: pointer to the PHY Firmware Version - **/ -s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, - u16 *firmware_version) -{ - s32 status; - - status = hw->phy.ops.read_reg(hw, AQ_FW_REV, - MDIO_MMD_VEND1, - firmware_version); - - return status; -} - -/** * ixgbe_reset_phy_nl - Performs a PHY reset * @hw: pointer to hardware structure **/ @@ -1738,6 +1728,8 @@ static s32 ixgbe_read_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset, u32 swfw_mask = hw->phy.phy_semaphore_mask; bool nack = true; + if (hw->mac.type >= ixgbe_mac_X550) + max_retry = 3; if (ixgbe_is_sfp_probe(hw, byte_offset, dev_addr)) max_retry = IXGBE_SFP_DETECT_RETRIES; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h index ecf05f838fc5..5aa2c3cf7aec 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h @@ -168,10 +168,6 @@ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *link_up); s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw); -s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, - u16 *firmware_version); -s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, - u16 *firmware_version); s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw); s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c index 1efb404431e9..ef0635e0918c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c @@ -858,14 +858,14 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1; tsync_rx_mtrl |= IXGBE_RXMTRL_V1_SYNC_MSG; - adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED | - IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED | + IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); break; case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1; tsync_rx_mtrl |= IXGBE_RXMTRL_V1_DELAY_REQ_MSG; - adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED | - IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED | + IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); break; case HWTSTAMP_FILTER_PTP_V2_EVENT: case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: @@ -879,8 +879,8 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2; is_l2 = true; config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; - adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED | - IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED | + IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); break; case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: case HWTSTAMP_FILTER_ALL: diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 7e5d9850e4b2..044cb44747cf 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -512,6 +512,7 @@ static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) switch (adapter->vfinfo[vf].vf_api) { case ixgbe_mbox_api_11: case ixgbe_mbox_api_12: + case ixgbe_mbox_api_13: /* * Version 1.1 supports jumbo frames on VFs if PF has * jumbo frames enabled which means legacy VFs are @@ -934,7 +935,8 @@ static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter, IXGBE_VT_MSGINFO_SHIFT; int err; - if (adapter->vfinfo[vf].pf_set_mac && index > 0) { + if (adapter->vfinfo[vf].pf_set_mac && !adapter->vfinfo[vf].trusted && + index > 0) { e_warn(drv, "VF %d requested MACVLAN filter but is administratively denied\n", vf); @@ -978,6 +980,7 @@ static int ixgbe_negotiate_vf_api(struct ixgbe_adapter *adapter, case ixgbe_mbox_api_10: case ixgbe_mbox_api_11: case ixgbe_mbox_api_12: + case ixgbe_mbox_api_13: adapter->vfinfo[vf].vf_api = api; return 0; default: @@ -1002,6 +1005,7 @@ static int ixgbe_get_vf_queues(struct ixgbe_adapter *adapter, case ixgbe_mbox_api_20: case ixgbe_mbox_api_11: case ixgbe_mbox_api_12: + case ixgbe_mbox_api_13: break; default: return -1; @@ -1041,8 +1045,13 @@ static int ixgbe_get_vf_reta(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) return -EPERM; /* verify the PF is supporting the correct API */ - if (adapter->vfinfo[vf].vf_api != ixgbe_mbox_api_12) + switch (adapter->vfinfo[vf].vf_api) { + case ixgbe_mbox_api_13: + case ixgbe_mbox_api_12: + break; + default: return -EOPNOTSUPP; + } /* This mailbox command is supported (required) only for 82599 and x540 * VFs which support up to 4 RSS queues. Therefore we will compress the @@ -1068,8 +1077,13 @@ static int ixgbe_get_vf_rss_key(struct ixgbe_adapter *adapter, return -EPERM; /* verify the PF is supporting the correct API */ - if (adapter->vfinfo[vf].vf_api != ixgbe_mbox_api_12) + switch (adapter->vfinfo[vf].vf_api) { + case ixgbe_mbox_api_13: + case ixgbe_mbox_api_12: + break; + default: return -EOPNOTSUPP; + } memcpy(rss_key, adapter->rss_key, sizeof(adapter->rss_key)); @@ -1081,11 +1095,16 @@ static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter, { struct ixgbe_hw *hw = &adapter->hw; int xcast_mode = msgbuf[1]; - u32 vmolr, disable, enable; + u32 vmolr, fctrl, disable, enable; /* verify the PF is supporting the correct APIs */ switch (adapter->vfinfo[vf].vf_api) { case ixgbe_mbox_api_12: + /* promisc introduced in 1.3 version */ + if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC) + return -EOPNOTSUPP; + /* Fall threw */ + case ixgbe_mbox_api_13: break; default: return -EOPNOTSUPP; @@ -1101,17 +1120,34 @@ static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter, switch (xcast_mode) { case IXGBEVF_XCAST_MODE_NONE: - disable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_MPE; + disable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | + IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; enable = 0; break; case IXGBEVF_XCAST_MODE_MULTI: - disable = IXGBE_VMOLR_MPE; + disable = IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE; break; case IXGBEVF_XCAST_MODE_ALLMULTI: - disable = 0; + disable = IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_MPE; break; + case IXGBEVF_XCAST_MODE_PROMISC: + if (hw->mac.type <= ixgbe_mac_82599EB) + return -EOPNOTSUPP; + + fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); + if (!(fctrl & IXGBE_FCTRL_UPE)) { + /* VF promisc requires PF in promisc */ + e_warn(drv, + "Enabling VF promisc requires PF in promisc\n"); + return -EPERM; + } + + disable = 0; + enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | + IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; + break; default: return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index cf21273db201..1d07f2ead914 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -92,6 +92,8 @@ #define IXGBE_DEV_ID_X550EM_A_SGMII_L 0x15C7 #define IXGBE_DEV_ID_X550EM_A_10G_T 0x15C8 #define IXGBE_DEV_ID_X550EM_A_SFP 0x15CE +#define IXGBE_DEV_ID_X550EM_A_1G_T 0x15E4 +#define IXGBE_DEV_ID_X550EM_A_1G_T_L 0x15E5 /* VF Device IDs */ #define IXGBE_DEV_ID_82599_VF 0x10ED @@ -1499,6 +1501,8 @@ enum { #define IXGBE_VT_CTL_POOL_MASK (0x3F << IXGBE_VT_CTL_POOL_SHIFT) /* VMOLR bitmasks */ +#define IXGBE_VMOLR_UPE 0x00400000 /* unicast promiscuous */ +#define IXGBE_VMOLR_VPE 0x00800000 /* VLAN promiscuous */ #define IXGBE_VMOLR_AUPE 0x01000000 /* accept untagged packets */ #define IXGBE_VMOLR_ROMPE 0x02000000 /* accept packets in MTA tbl */ #define IXGBE_VMOLR_ROPE 0x04000000 /* accept packets in UC tbl */ @@ -1914,6 +1918,7 @@ enum { #define IXGBE_LINKS_SPEED_10G_82599 0x30000000 #define IXGBE_LINKS_SPEED_1G_82599 0x20000000 #define IXGBE_LINKS_SPEED_100_82599 0x10000000 +#define IXGBE_LINKS_SPEED_10_X550EM_A 0 #define IXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */ #define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */ @@ -2619,6 +2624,7 @@ enum ixgbe_fdir_pballoc_type { #define FW_CEM_UNUSED_VER 0x0 #define FW_CEM_MAX_RETRIES 3 #define FW_CEM_RESP_STATUS_SUCCESS 0x1 +#define FW_CEM_DRIVER_VERSION_SIZE 39 /* +9 would send 48 bytes to fw */ #define FW_READ_SHADOW_RAM_CMD 0x31 #define FW_READ_SHADOW_RAM_LEN 0x6 #define FW_WRITE_SHADOW_RAM_CMD 0x33 @@ -2644,6 +2650,59 @@ enum ixgbe_fdir_pballoc_type { #define FW_INT_PHY_REQ_LEN 10 #define FW_INT_PHY_REQ_READ 0 #define FW_INT_PHY_REQ_WRITE 1 +#define FW_PHY_ACT_REQ_CMD 5 +#define FW_PHY_ACT_DATA_COUNT 4 +#define FW_PHY_ACT_REQ_LEN (4 + 4 * FW_PHY_ACT_DATA_COUNT) +#define FW_PHY_ACT_INIT_PHY 1 +#define FW_PHY_ACT_SETUP_LINK 2 +#define FW_PHY_ACT_LINK_SPEED_10 BIT(0) +#define FW_PHY_ACT_LINK_SPEED_100 BIT(1) +#define FW_PHY_ACT_LINK_SPEED_1G BIT(2) +#define FW_PHY_ACT_LINK_SPEED_2_5G BIT(3) +#define FW_PHY_ACT_LINK_SPEED_5G BIT(4) +#define FW_PHY_ACT_LINK_SPEED_10G BIT(5) +#define FW_PHY_ACT_LINK_SPEED_20G BIT(6) +#define FW_PHY_ACT_LINK_SPEED_25G BIT(7) +#define FW_PHY_ACT_LINK_SPEED_40G BIT(8) +#define FW_PHY_ACT_LINK_SPEED_50G BIT(9) +#define FW_PHY_ACT_LINK_SPEED_100G BIT(10) +#define FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT 16 +#define FW_PHY_ACT_SETUP_LINK_PAUSE_MASK (3 << \ + HW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT) +#define FW_PHY_ACT_SETUP_LINK_PAUSE_NONE 0u +#define FW_PHY_ACT_SETUP_LINK_PAUSE_TX 1u +#define FW_PHY_ACT_SETUP_LINK_PAUSE_RX 2u +#define FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX 3u +#define FW_PHY_ACT_SETUP_LINK_LP BIT(18) +#define FW_PHY_ACT_SETUP_LINK_HP BIT(19) +#define FW_PHY_ACT_SETUP_LINK_EEE BIT(20) +#define FW_PHY_ACT_SETUP_LINK_AN BIT(22) +#define FW_PHY_ACT_SETUP_LINK_RSP_DOWN BIT(0) +#define FW_PHY_ACT_GET_LINK_INFO 3 +#define FW_PHY_ACT_GET_LINK_INFO_EEE BIT(19) +#define FW_PHY_ACT_GET_LINK_INFO_FC_TX BIT(20) +#define FW_PHY_ACT_GET_LINK_INFO_FC_RX BIT(21) +#define FW_PHY_ACT_GET_LINK_INFO_POWER BIT(22) +#define FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE BIT(24) +#define FW_PHY_ACT_GET_LINK_INFO_TEMP BIT(25) +#define FW_PHY_ACT_GET_LINK_INFO_LP_FC_TX BIT(28) +#define FW_PHY_ACT_GET_LINK_INFO_LP_FC_RX BIT(29) +#define FW_PHY_ACT_FORCE_LINK_DOWN 4 +#define FW_PHY_ACT_FORCE_LINK_DOWN_OFF BIT(0) +#define FW_PHY_ACT_PHY_SW_RESET 5 +#define FW_PHY_ACT_PHY_HW_RESET 6 +#define FW_PHY_ACT_GET_PHY_INFO 7 +#define FW_PHY_ACT_UD_2 0x1002 +#define FW_PHY_ACT_UD_2_10G_KR_EEE BIT(6) +#define FW_PHY_ACT_UD_2_10G_KX4_EEE BIT(5) +#define FW_PHY_ACT_UD_2_1G_KX_EEE BIT(4) +#define FW_PHY_ACT_UD_2_10G_T_EEE BIT(3) +#define FW_PHY_ACT_UD_2_1G_T_EEE BIT(2) +#define FW_PHY_ACT_UD_2_100M_TX_EEE BIT(1) +#define FW_PHY_ACT_RETRIES 50 +#define FW_PHY_INFO_SPEED_MASK 0xFFFu +#define FW_PHY_INFO_ID_HI_MASK 0xFFFF0000u +#define FW_PHY_INFO_ID_LO_MASK 0x0000FFFFu /* Host Interface Command Structures */ struct ixgbe_hic_hdr { @@ -2686,6 +2745,16 @@ struct ixgbe_hic_drv_info { u16 pad2; /* end spacing to ensure length is mult. of dword2 */ }; +struct ixgbe_hic_drv_info2 { + struct ixgbe_hic_hdr hdr; + u8 port_num; + u8 ver_sub; + u8 ver_build; + u8 ver_min; + u8 ver_maj; + char driver_string[FW_CEM_DRIVER_VERSION_SIZE]; +}; + /* These need to be dword aligned */ struct ixgbe_hic_read_shadow_ram { union ixgbe_hic_hdr2 hdr; @@ -2734,6 +2803,19 @@ struct ixgbe_hic_internal_phy_resp { __be32 read_data; }; +struct ixgbe_hic_phy_activity_req { + struct ixgbe_hic_hdr hdr; + u8 port_number; + u8 pad; + __le16 activity_id; + __be32 data[FW_PHY_ACT_DATA_COUNT]; +}; + +struct ixgbe_hic_phy_activity_resp { + struct ixgbe_hic_hdr hdr; + __be32 data[FW_PHY_ACT_DATA_COUNT]; +}; + /* Transmit Descriptor - Advanced */ union ixgbe_adv_tx_desc { struct { @@ -2849,6 +2931,7 @@ typedef u32 ixgbe_autoneg_advertised; /* Link speed */ typedef u32 ixgbe_link_speed; #define IXGBE_LINK_SPEED_UNKNOWN 0 +#define IXGBE_LINK_SPEED_10_FULL 0x0002 #define IXGBE_LINK_SPEED_100_FULL 0x0008 #define IXGBE_LINK_SPEED_1GB_FULL 0x0020 #define IXGBE_LINK_SPEED_2_5GB_FULL 0x0400 @@ -3064,6 +3147,7 @@ enum ixgbe_phy_type { ixgbe_phy_qsfp_unknown, ixgbe_phy_sfp_unsupported, ixgbe_phy_sgmii, + ixgbe_phy_fw, ixgbe_phy_generic }; @@ -3362,7 +3446,8 @@ struct ixgbe_mac_operations { void (*fc_autoneg)(struct ixgbe_hw *); /* Manageability interface */ - s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8); + s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8, u16, + const char *); s32 (*get_thermal_sensor_data)(struct ixgbe_hw *); s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw); void (*disable_rx)(struct ixgbe_hw *hw); @@ -3392,7 +3477,6 @@ struct ixgbe_phy_operations { s32 (*setup_internal_link)(struct ixgbe_hw *); s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool); s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *); - s32 (*get_firmware_version)(struct ixgbe_hw *, u16 *); s32 (*read_i2c_byte)(struct ixgbe_hw *, u8, u8, u8 *); s32 (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8); s32 (*read_i2c_sff8472)(struct ixgbe_hw *, u8 , u8 *); @@ -3478,6 +3562,8 @@ struct ixgbe_phy_info { bool reset_disable; ixgbe_autoneg_advertised autoneg_advertised; ixgbe_link_speed speeds_supported; + ixgbe_link_speed eee_speeds_supported; + ixgbe_link_speed eee_speeds_advertised; enum ixgbe_smart_speed smart_speed; bool smart_speed_active; bool multispeed_fiber; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c index e2ff823ee202..84a467a8ed3d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c @@ -780,8 +780,10 @@ s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index) ixgbe_link_speed speed; bool link_up; - /* - * Link should be up in order for the blink bit in the LED control + if (index > 3) + return IXGBE_ERR_PARAM; + + /* Link should be up in order for the blink bit in the LED control * register to work. Force link and speed in the MAC if link is down. * This will be reversed when we stop the blinking. */ @@ -814,6 +816,9 @@ s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index) u32 macc_reg; u32 ledctl_reg; + if (index > 3) + return IXGBE_ERR_PARAM; + /* Restore the LED to its default value. */ ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); ledctl_reg &= ~IXGBE_LED_MODE_MASK(index); @@ -913,7 +918,6 @@ static const struct ixgbe_phy_operations phy_ops_X540 = { .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, .check_overtemp = &ixgbe_tn_check_overtemp, .set_phy_power = &ixgbe_set_copper_phy_power, - .get_firmware_version = &ixgbe_get_phy_firmware_version_generic, }; static const u32 ixgbe_mvals_X540[IXGBE_MVALS_IDX_LIMIT] = { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index 11fb433eb924..200f847fd8f3 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -63,6 +63,18 @@ static s32 ixgbe_get_invariants_X550_a(struct ixgbe_hw *hw) return 0; } +static s32 ixgbe_get_invariants_X550_a_fw(struct ixgbe_hw *hw) +{ + struct ixgbe_phy_info *phy = &hw->phy; + + /* Start with X540 invariants, since so similar */ + ixgbe_get_invariants_X540(hw); + + phy->ops.set_phy_power = NULL; + + return 0; +} + /** ixgbe_setup_mux_ctl - Setup ESDP register for I2C mux control * @hw: pointer to hardware structure **/ @@ -402,6 +414,204 @@ ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, false); } +/** + * ixgbe_fw_phy_activity - Perform an activity on a PHY + * @hw: pointer to hardware structure + * @activity: activity to perform + * @data: Pointer to 4 32-bit words of data + */ +s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity, + u32 (*data)[FW_PHY_ACT_DATA_COUNT]) +{ + union { + struct ixgbe_hic_phy_activity_req cmd; + struct ixgbe_hic_phy_activity_resp rsp; + } hic; + u16 retries = FW_PHY_ACT_RETRIES; + s32 rc; + u32 i; + + do { + memset(&hic, 0, sizeof(hic)); + hic.cmd.hdr.cmd = FW_PHY_ACT_REQ_CMD; + hic.cmd.hdr.buf_len = FW_PHY_ACT_REQ_LEN; + hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; + hic.cmd.port_number = hw->bus.lan_id; + hic.cmd.activity_id = cpu_to_le16(activity); + for (i = 0; i < ARRAY_SIZE(hic.cmd.data); ++i) + hic.cmd.data[i] = cpu_to_be32((*data)[i]); + + rc = ixgbe_host_interface_command(hw, &hic.cmd, sizeof(hic.cmd), + IXGBE_HI_COMMAND_TIMEOUT, + true); + if (rc) + return rc; + if (hic.rsp.hdr.cmd_or_resp.ret_status == + FW_CEM_RESP_STATUS_SUCCESS) { + for (i = 0; i < FW_PHY_ACT_DATA_COUNT; ++i) + (*data)[i] = be32_to_cpu(hic.rsp.data[i]); + return 0; + } + usleep_range(20, 30); + --retries; + } while (retries > 0); + + return IXGBE_ERR_HOST_INTERFACE_COMMAND; +} + +static const struct { + u16 fw_speed; + ixgbe_link_speed phy_speed; +} ixgbe_fw_map[] = { + { FW_PHY_ACT_LINK_SPEED_10, IXGBE_LINK_SPEED_10_FULL }, + { FW_PHY_ACT_LINK_SPEED_100, IXGBE_LINK_SPEED_100_FULL }, + { FW_PHY_ACT_LINK_SPEED_1G, IXGBE_LINK_SPEED_1GB_FULL }, + { FW_PHY_ACT_LINK_SPEED_2_5G, IXGBE_LINK_SPEED_2_5GB_FULL }, + { FW_PHY_ACT_LINK_SPEED_5G, IXGBE_LINK_SPEED_5GB_FULL }, + { FW_PHY_ACT_LINK_SPEED_10G, IXGBE_LINK_SPEED_10GB_FULL }, +}; + +/** + * ixgbe_get_phy_id_fw - Get the phy ID via firmware command + * @hw: pointer to hardware structure + * + * Returns error code + */ +static s32 ixgbe_get_phy_id_fw(struct ixgbe_hw *hw) +{ + u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 }; + u16 phy_speeds; + u16 phy_id_lo; + s32 rc; + u16 i; + + if (hw->phy.id) + return 0; + + rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_PHY_INFO, &info); + if (rc) + return rc; + + hw->phy.speeds_supported = 0; + phy_speeds = info[0] & FW_PHY_INFO_SPEED_MASK; + for (i = 0; i < ARRAY_SIZE(ixgbe_fw_map); ++i) { + if (phy_speeds & ixgbe_fw_map[i].fw_speed) + hw->phy.speeds_supported |= ixgbe_fw_map[i].phy_speed; + } + + hw->phy.id = info[0] & FW_PHY_INFO_ID_HI_MASK; + phy_id_lo = info[1] & FW_PHY_INFO_ID_LO_MASK; + hw->phy.id |= phy_id_lo & IXGBE_PHY_REVISION_MASK; + hw->phy.revision = phy_id_lo & ~IXGBE_PHY_REVISION_MASK; + if (!hw->phy.id || hw->phy.id == IXGBE_PHY_REVISION_MASK) + return IXGBE_ERR_PHY_ADDR_INVALID; + + hw->phy.autoneg_advertised = hw->phy.speeds_supported; + hw->phy.eee_speeds_supported = IXGBE_LINK_SPEED_100_FULL | + IXGBE_LINK_SPEED_1GB_FULL; + hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported; + return 0; +} + +/** + * ixgbe_identify_phy_fw - Get PHY type based on firmware command + * @hw: pointer to hardware structure + * + * Returns error code + */ +static s32 ixgbe_identify_phy_fw(struct ixgbe_hw *hw) +{ + if (hw->bus.lan_id) + hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM; + else + hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM; + + hw->phy.type = ixgbe_phy_fw; + hw->phy.ops.read_reg = NULL; + hw->phy.ops.write_reg = NULL; + return ixgbe_get_phy_id_fw(hw); +} + +/** + * ixgbe_shutdown_fw_phy - Shutdown a firmware-controlled PHY + * @hw: pointer to hardware structure + * + * Returns error code + */ +static s32 ixgbe_shutdown_fw_phy(struct ixgbe_hw *hw) +{ + u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 }; + + setup[0] = FW_PHY_ACT_FORCE_LINK_DOWN_OFF; + return ixgbe_fw_phy_activity(hw, FW_PHY_ACT_FORCE_LINK_DOWN, &setup); +} + +/** + * ixgbe_setup_fw_link - Setup firmware-controlled PHYs + * @hw: pointer to hardware structure + */ +static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw) +{ + u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 }; + s32 rc; + u16 i; + + if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw)) + return 0; + + if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { + hw_err(hw, "rx_pause not valid in strict IEEE mode\n"); + return IXGBE_ERR_INVALID_LINK_SETTINGS; + } + + switch (hw->fc.requested_mode) { + case ixgbe_fc_full: + setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX << + FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT; + break; + case ixgbe_fc_rx_pause: + setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RX << + FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT; + break; + case ixgbe_fc_tx_pause: + setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_TX << + FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT; + break; + default: + break; + } + + for (i = 0; i < ARRAY_SIZE(ixgbe_fw_map); ++i) { + if (hw->phy.autoneg_advertised & ixgbe_fw_map[i].phy_speed) + setup[0] |= ixgbe_fw_map[i].fw_speed; + } + setup[0] |= FW_PHY_ACT_SETUP_LINK_HP | FW_PHY_ACT_SETUP_LINK_AN; + + if (hw->phy.eee_speeds_advertised) + setup[0] |= FW_PHY_ACT_SETUP_LINK_EEE; + + rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_SETUP_LINK, &setup); + if (rc) + return rc; + if (setup[0] == FW_PHY_ACT_SETUP_LINK_RSP_DOWN) + return IXGBE_ERR_OVERTEMP; + return 0; +} + +/** + * ixgbe_fc_autoneg_fw - Set up flow control for FW-controlled PHYs + * @hw: pointer to hardware structure + * + * Called at init time to set up flow control. + */ +static s32 ixgbe_fc_autoneg_fw(struct ixgbe_hw *hw) +{ + if (hw->fc.requested_mode == ixgbe_fc_default) + hw->fc.requested_mode = ixgbe_fc_full; + + return ixgbe_setup_fw_link(hw); +} + /** ixgbe_init_eeprom_params_X550 - Initialize EEPROM params * @hw: pointer to hardware structure * @@ -624,41 +834,6 @@ static s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, return status; } -/** ixgbe_read_ee_hostif_data_X550 - Read EEPROM word using a host interface - * command assuming that the semaphore is already obtained. - * @hw: pointer to hardware structure - * @offset: offset of word in the EEPROM to read - * @data: word read from the EEPROM - * - * Reads a 16 bit word from the EEPROM using the hostif. - **/ -static s32 ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, - u16 *data) -{ - s32 status; - struct ixgbe_hic_read_shadow_ram buffer; - - buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; - buffer.hdr.req.buf_lenh = 0; - buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; - buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; - - /* convert offset from words to bytes */ - buffer.address = cpu_to_be32(offset * 2); - /* one word */ - buffer.length = cpu_to_be16(sizeof(u16)); - - status = ixgbe_host_interface_command(hw, &buffer, sizeof(buffer), - IXGBE_HI_COMMAND_TIMEOUT, false); - if (status) - return status; - - *data = (u16)IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, - FW_NVM_DATA_OFFSET); - - return 0; -} - /** ixgbe_read_ee_hostif_buffer_X550- Read EEPROM word(s) using hostif * @hw: pointer to hardware structure * @offset: offset of word in the EEPROM to read @@ -670,6 +845,7 @@ static s32 ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data) { + const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM; struct ixgbe_hic_read_shadow_ram buffer; u32 current_word = 0; u16 words_to_read; @@ -677,7 +853,7 @@ static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw, u32 i; /* Take semaphore for the entire operation. */ - status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + status = hw->mac.ops.acquire_swfw_sync(hw, mask); if (status) { hw_dbg(hw, "EEPROM read buffer - semaphore failed\n"); return status; @@ -698,10 +874,8 @@ static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw, buffer.address = cpu_to_be32((offset + current_word) * 2); buffer.length = cpu_to_be16(words_to_read * 2); - status = ixgbe_host_interface_command(hw, &buffer, - sizeof(buffer), - IXGBE_HI_COMMAND_TIMEOUT, - false); + status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer), + IXGBE_HI_COMMAND_TIMEOUT); if (status) { hw_dbg(hw, "Host interface command failed\n"); goto out; @@ -725,7 +899,7 @@ static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw, } out: - hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + hw->mac.ops.release_swfw_sync(hw, mask); return status; } @@ -896,15 +1070,32 @@ static s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw) **/ static s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data) { - s32 status = 0; + const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM; + struct ixgbe_hic_read_shadow_ram buffer; + s32 status; - if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) { - status = ixgbe_read_ee_hostif_data_X550(hw, offset, data); - hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); - } else { - status = IXGBE_ERR_SWFW_SYNC; + buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; + buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + + /* convert offset from words to bytes */ + buffer.address = cpu_to_be32(offset * 2); + /* one word */ + buffer.length = cpu_to_be16(sizeof(u16)); + + status = hw->mac.ops.acquire_swfw_sync(hw, mask); + if (status) + return status; + + status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer), + IXGBE_HI_COMMAND_TIMEOUT); + if (!status) { + *data = (u16)IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, + FW_NVM_DATA_OFFSET); } + hw->mac.ops.release_swfw_sync(hw, mask); return status; } @@ -1768,6 +1959,125 @@ ixgbe_setup_sgmii(struct ixgbe_hw *hw, __always_unused ixgbe_link_speed speed, return rc; } +/** + * ixgbe_setup_sgmii_fw - Set up link for sgmii with firmware-controlled PHYs + * @hw: pointer to hardware structure + */ +static s32 ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed, + bool autoneg_wait) +{ + struct ixgbe_mac_info *mac = &hw->mac; + u32 lval, sval, flx_val; + s32 rc; + + rc = mac->ops.read_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &lval); + if (rc) + return rc; + + lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; + lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK; + lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN; + lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN; + lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G; + rc = mac->ops.write_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, lval); + if (rc) + return rc; + + rc = mac->ops.read_iosf_sb_reg(hw, + IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &sval); + if (rc) + return rc; + + sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D; + sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D; + rc = mac->ops.write_iosf_sb_reg(hw, + IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, sval); + if (rc) + return rc; + + rc = mac->ops.write_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, lval); + if (rc) + return rc; + + rc = mac->ops.read_iosf_sb_reg(hw, + IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val); + if (rc) + return rc; + + flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK; + flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN; + flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN; + flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN; + flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN; + + rc = mac->ops.write_iosf_sb_reg(hw, + IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val); + if (rc) + return rc; + + ixgbe_restart_an_internal_phy_x550em(hw); + + return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait); +} + +/** + * ixgbe_fc_autoneg_sgmii_x550em_a - Enable flow control IEEE clause 37 + * @hw: pointer to hardware structure + * + * Enable flow control according to IEEE clause 37. + */ +static void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED; + u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 }; + ixgbe_link_speed speed; + bool link_up; + + /* AN should have completed when the cable was plugged in. + * Look for reasons to bail out. Bail out if: + * - FC autoneg is disabled, or if + * - link is not up. + */ + if (hw->fc.disable_fc_autoneg) + goto out; + + hw->mac.ops.check_link(hw, &speed, &link_up, false); + if (!link_up) + goto out; + + /* Check if auto-negotiation has completed */ + status = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &info); + if (status || !(info[0] & FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE)) { + status = IXGBE_ERR_FC_NOT_NEGOTIATED; + goto out; + } + + /* Negotiate the flow control */ + status = ixgbe_negotiate_fc(hw, info[0], info[0], + FW_PHY_ACT_GET_LINK_INFO_FC_RX, + FW_PHY_ACT_GET_LINK_INFO_FC_TX, + FW_PHY_ACT_GET_LINK_INFO_LP_FC_RX, + FW_PHY_ACT_GET_LINK_INFO_LP_FC_TX); + +out: + if (!status) { + hw->fc.fc_was_autonegged = true; + } else { + hw->fc.fc_was_autonegged = false; + hw->fc.current_mode = hw->fc.requested_mode; + } +} + /** ixgbe_init_mac_link_ops_X550em_a - Init mac link function pointers * @hw: pointer to hardware structure **/ @@ -1780,6 +2090,17 @@ static void ixgbe_init_mac_link_ops_X550em_a(struct ixgbe_hw *hw) mac->ops.setup_fc = NULL; mac->ops.fc_autoneg = ixgbe_fc_autoneg_fiber_x550em_a; break; + case ixgbe_media_type_copper: + if (hw->device_id != IXGBE_DEV_ID_X550EM_A_1G_T && + hw->device_id != IXGBE_DEV_ID_X550EM_A_1G_T_L) { + mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em; + break; + } + mac->ops.fc_autoneg = ixgbe_fc_autoneg_sgmii_x550em_a; + mac->ops.setup_fc = ixgbe_fc_autoneg_fw; + mac->ops.setup_link = ixgbe_setup_sgmii_fw; + mac->ops.check_link = ixgbe_check_mac_link_generic; + break; case ixgbe_media_type_backplane: mac->ops.fc_autoneg = ixgbe_fc_autoneg_backplane_x550em_a; mac->ops.setup_fc = ixgbe_setup_fc_backplane_x550em_a; @@ -1827,7 +2148,7 @@ static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw) mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em; mac->ops.setup_fc = ixgbe_setup_fc_generic; mac->ops.check_link = ixgbe_check_link_t_X550em; - return; + break; case ixgbe_media_type_backplane: if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII || hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L) @@ -1870,6 +2191,12 @@ static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *autoneg) { + if (hw->phy.type == ixgbe_phy_fw) { + *autoneg = true; + *speed = hw->phy.speeds_supported; + return 0; + } + /* SFP */ if (hw->phy.media_type == ixgbe_media_type_fiber) { /* CS4227 SFP must not enable auto-negotiation */ @@ -2108,8 +2435,6 @@ static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw, return status; reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; - reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ | - IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC); reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR | IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX); @@ -2189,12 +2514,11 @@ static s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw) /** * ixgbe_setup_kr_x550em - Configure the KR PHY * @hw: pointer to hardware structure - * - * Configures the integrated KR PHY for X550EM_x. **/ static s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw) { - if (hw->mac.type != ixgbe_mac_X550EM_x) + /* leave link alone for 2.5G */ + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL) return 0; return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised); @@ -2356,6 +2680,62 @@ static s32 ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx) return 0; } +/** + * ixgbe_set_fw_drv_ver_x550 - Sends driver version to firmware + * @hw: pointer to the HW structure + * @maj: driver version major number + * @min: driver version minor number + * @build: driver version build number + * @sub: driver version sub build number + * @len: length of driver_ver string + * @driver_ver: driver string + * + * Sends driver version number to firmware through the manageability + * block. On success return 0 + * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring + * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. + **/ +static s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min, + u8 build, u8 sub, u16 len, + const char *driver_ver) +{ + struct ixgbe_hic_drv_info2 fw_cmd; + s32 ret_val; + int i; + + if (!len || !driver_ver || (len > sizeof(fw_cmd.driver_string))) + return IXGBE_ERR_INVALID_ARGUMENT; + + fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO; + fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN + len; + fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + fw_cmd.port_num = (u8)hw->bus.func; + fw_cmd.ver_maj = maj; + fw_cmd.ver_min = min; + fw_cmd.ver_build = build; + fw_cmd.ver_sub = sub; + fw_cmd.hdr.checksum = 0; + memcpy(fw_cmd.driver_string, driver_ver, len); + fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd, + (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len)); + + for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { + ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd, + sizeof(fw_cmd), + IXGBE_HI_COMMAND_TIMEOUT, + true); + if (ret_val) + continue; + + if (fw_cmd.hdr.cmd_or_resp.ret_status != + FW_CEM_RESP_STATUS_SUCCESS) + return IXGBE_ERR_HOST_INTERFACE_COMMAND; + return 0; + } + + return ret_val; +} + /** ixgbe_get_lcd_x550em - Determine lowest common denominator * @hw: pointer to hardware structure * @lcd_speed: pointer to lowest common link speed @@ -2655,6 +3035,50 @@ static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw) } /** + * ixgbe_reset_phy_fw - Reset firmware-controlled PHYs + * @hw: pointer to hardware structure + */ +static s32 ixgbe_reset_phy_fw(struct ixgbe_hw *hw) +{ + u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 }; + s32 rc; + + if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw)) + return 0; + + rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_PHY_SW_RESET, &store); + if (rc) + return rc; + memset(store, 0, sizeof(store)); + + rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_INIT_PHY, &store); + if (rc) + return rc; + + return ixgbe_setup_fw_link(hw); +} + +/** + * ixgbe_check_overtemp_fw - Check firmware-controlled PHYs for overtemp + * @hw: pointer to hardware structure + */ +static s32 ixgbe_check_overtemp_fw(struct ixgbe_hw *hw) +{ + u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 }; + s32 rc; + + rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &store); + if (rc) + return rc; + + if (store[0] & FW_PHY_ACT_GET_LINK_INFO_TEMP) { + ixgbe_shutdown_fw_phy(hw); + return IXGBE_ERR_OVERTEMP; + } + return 0; +} + +/** * ixgbe_read_mng_if_sel_x550em - Read NW_MNG_IF_SEL register * @hw: pointer to hardware structure * @@ -2740,6 +3164,10 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em; phy->ops.reset = ixgbe_reset_phy_t_X550em; break; + case ixgbe_phy_fw: + phy->ops.setup_link = ixgbe_setup_fw_link; + phy->ops.reset = ixgbe_reset_phy_fw; + break; default: break; } @@ -2777,6 +3205,8 @@ static enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw) case IXGBE_DEV_ID_X550EM_X_1G_T: case IXGBE_DEV_ID_X550EM_X_10G_T: case IXGBE_DEV_ID_X550EM_A_10G_T: + case IXGBE_DEV_ID_X550EM_A_1G_T: + case IXGBE_DEV_ID_X550EM_A_1G_T_L: media_type = ixgbe_media_type_copper; break; default: @@ -2844,6 +3274,13 @@ static void ixgbe_set_mdio_speed(struct ixgbe_hw *hw) hlreg0 &= ~IXGBE_HLREG0_MDCSPD; IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); break; + case IXGBE_DEV_ID_X550EM_A_1G_T: + case IXGBE_DEV_ID_X550EM_A_1G_T_L: + /* Select fast MDIO clock speed for these devices */ + hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); + hlreg0 |= IXGBE_HLREG0_MDCSPD; + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); + break; default: break; } @@ -3275,7 +3712,7 @@ static s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, .clear_vfta = &ixgbe_clear_vfta_generic, \ .set_vfta = &ixgbe_set_vfta_generic, \ .fc_enable = &ixgbe_fc_enable_generic, \ - .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic, \ + .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_x550, \ .init_uta_tables = &ixgbe_init_uta_tables_generic, \ .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, \ .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, \ @@ -3355,6 +3792,27 @@ static struct ixgbe_mac_operations mac_ops_x550em_a = { .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a, }; +static struct ixgbe_mac_operations mac_ops_x550em_a_fw = { + X550_COMMON_MAC + .led_on = ixgbe_led_on_generic, + .led_off = ixgbe_led_off_generic, + .init_led_link_act = ixgbe_init_led_link_act_generic, + .reset_hw = ixgbe_reset_hw_X550em, + .get_media_type = ixgbe_get_media_type_X550em, + .get_san_mac_addr = NULL, + .get_wwn_prefix = NULL, + .setup_link = NULL, /* defined later */ + .get_link_capabilities = ixgbe_get_link_capabilities_X550em, + .get_bus_info = ixgbe_get_bus_info_X550em, + .setup_sfp = ixgbe_setup_sfp_modules_X550em, + .acquire_swfw_sync = ixgbe_acquire_swfw_sync_x550em_a, + .release_swfw_sync = ixgbe_release_swfw_sync_x550em_a, + .setup_fc = ixgbe_setup_fc_x550em, + .fc_autoneg = ixgbe_fc_autoneg, + .read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550a, + .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a, +}; + #define X550_COMMON_EEP \ .read = &ixgbe_read_ee_hostif_X550, \ .read_buffer = &ixgbe_read_ee_hostif_buffer_X550, \ @@ -3384,12 +3842,11 @@ static const struct ixgbe_eeprom_operations eeprom_ops_X550EM_x = { .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, \ .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, \ .setup_link = &ixgbe_setup_phy_link_generic, \ - .set_phy_power = NULL, \ - .check_overtemp = &ixgbe_tn_check_overtemp, \ - .get_firmware_version = &ixgbe_get_phy_firmware_version_generic, + .set_phy_power = NULL, static const struct ixgbe_phy_operations phy_ops_X550 = { X550_COMMON_PHY + .check_overtemp = &ixgbe_tn_check_overtemp, .init = NULL, .identify = &ixgbe_identify_phy_generic, .read_reg = &ixgbe_read_phy_reg_generic, @@ -3398,6 +3855,7 @@ static const struct ixgbe_phy_operations phy_ops_X550 = { static const struct ixgbe_phy_operations phy_ops_X550EM_x = { X550_COMMON_PHY + .check_overtemp = &ixgbe_tn_check_overtemp, .init = &ixgbe_init_phy_ops_X550em, .identify = &ixgbe_identify_phy_x550em, .read_reg = &ixgbe_read_phy_reg_generic, @@ -3406,6 +3864,7 @@ static const struct ixgbe_phy_operations phy_ops_X550EM_x = { static const struct ixgbe_phy_operations phy_ops_x550em_a = { X550_COMMON_PHY + .check_overtemp = &ixgbe_tn_check_overtemp, .init = &ixgbe_init_phy_ops_X550em, .identify = &ixgbe_identify_phy_x550em, .read_reg = &ixgbe_read_phy_reg_x550a, @@ -3414,6 +3873,17 @@ static const struct ixgbe_phy_operations phy_ops_x550em_a = { .write_reg_mdi = &ixgbe_write_phy_reg_mdi, }; +static const struct ixgbe_phy_operations phy_ops_x550em_a_fw = { + X550_COMMON_PHY + .check_overtemp = ixgbe_check_overtemp_fw, + .init = ixgbe_init_phy_ops_X550em, + .identify = ixgbe_identify_phy_fw, + .read_reg = NULL, + .write_reg = NULL, + .read_reg_mdi = NULL, + .write_reg_mdi = NULL, +}; + static const struct ixgbe_link_operations link_ops_x550em_x = { .read_link = &ixgbe_read_i2c_combined_generic, .read_link_unlocked = &ixgbe_read_i2c_combined_generic_unlocked, @@ -3463,3 +3933,13 @@ const struct ixgbe_info ixgbe_x550em_a_info = { .mbx_ops = &mbx_ops_generic, .mvals = ixgbe_mvals_x550em_a, }; + +const struct ixgbe_info ixgbe_x550em_a_fw_info = { + .mac = ixgbe_mac_x550em_a, + .get_invariants = ixgbe_get_invariants_X550_a_fw, + .mac_ops = &mac_ops_x550em_a_fw, + .eeprom_ops = &eeprom_ops_X550EM_x, + .phy_ops = &phy_ops_x550em_a_fw, + .mbx_ops = &mbx_ops_generic, + .mvals = ixgbe_mvals_x550em_a, +}; diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c index 508e72c5f1c2..1f6c0ecd50bb 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c +++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c @@ -432,11 +432,6 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev, if (!ring) { data[i++] = 0; data[i++] = 0; -#ifdef BP_EXTENDED_STATS - data[i++] = 0; - data[i++] = 0; - data[i++] = 0; -#endif continue; } @@ -446,12 +441,6 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev, data[i + 1] = ring->stats.bytes; } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); i += 2; -#ifdef BP_EXTENDED_STATS - data[i] = ring->stats.yields; - data[i + 1] = ring->stats.misses; - data[i + 2] = ring->stats.cleaned; - i += 3; -#endif } /* populate Rx queue data */ @@ -460,11 +449,6 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev, if (!ring) { data[i++] = 0; data[i++] = 0; -#ifdef BP_EXTENDED_STATS - data[i++] = 0; - data[i++] = 0; - data[i++] = 0; -#endif continue; } @@ -474,12 +458,6 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev, data[i + 1] = ring->stats.bytes; } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); i += 2; -#ifdef BP_EXTENDED_STATS - data[i] = ring->stats.yields; - data[i + 1] = ring->stats.misses; - data[i + 2] = ring->stats.cleaned; - i += 3; -#endif } } @@ -507,28 +485,12 @@ static void ixgbevf_get_strings(struct net_device *netdev, u32 stringset, p += ETH_GSTRING_LEN; sprintf(p, "tx_queue_%u_bytes", i); p += ETH_GSTRING_LEN; -#ifdef BP_EXTENDED_STATS - sprintf(p, "tx_queue_%u_bp_napi_yield", i); - p += ETH_GSTRING_LEN; - sprintf(p, "tx_queue_%u_bp_misses", i); - p += ETH_GSTRING_LEN; - sprintf(p, "tx_queue_%u_bp_cleaned", i); - p += ETH_GSTRING_LEN; -#endif /* BP_EXTENDED_STATS */ } for (i = 0; i < adapter->num_rx_queues; i++) { sprintf(p, "rx_queue_%u_packets", i); p += ETH_GSTRING_LEN; sprintf(p, "rx_queue_%u_bytes", i); p += ETH_GSTRING_LEN; -#ifdef BP_EXTENDED_STATS - sprintf(p, "rx_queue_%u_bp_poll_yield", i); - p += ETH_GSTRING_LEN; - sprintf(p, "rx_queue_%u_bp_misses", i); - p += ETH_GSTRING_LEN; - sprintf(p, "rx_queue_%u_bp_cleaned", i); - p += ETH_GSTRING_LEN; -#endif /* BP_EXTENDED_STATS */ } break; } diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index 5639fbe294d0..a8cbc2dda0dd 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -37,11 +37,6 @@ #include "vf.h" -#ifdef CONFIG_NET_RX_BUSY_POLL -#include <net/busy_poll.h> -#define BP_EXTENDED_STATS -#endif - #define IXGBE_MAX_TXD_PWR 14 #define IXGBE_MAX_DATA_PER_TXD BIT(IXGBE_MAX_TXD_PWR) @@ -73,11 +68,6 @@ struct ixgbevf_rx_buffer { struct ixgbevf_stats { u64 packets; u64 bytes; -#ifdef BP_EXTENDED_STATS - u64 yields; - u64 misses; - u64 cleaned; -#endif }; struct ixgbevf_tx_queue_stats { @@ -217,109 +207,6 @@ struct ixgbevf_q_vector { #endif /* CONFIG_NET_RX_BUSY_POLL */ }; -#ifdef CONFIG_NET_RX_BUSY_POLL -static inline void ixgbevf_qv_init_lock(struct ixgbevf_q_vector *q_vector) -{ - spin_lock_init(&q_vector->lock); - q_vector->state = IXGBEVF_QV_STATE_IDLE; -} - -/* called from the device poll routine to get ownership of a q_vector */ -static inline bool ixgbevf_qv_lock_napi(struct ixgbevf_q_vector *q_vector) -{ - int rc = true; - - spin_lock_bh(&q_vector->lock); - if (q_vector->state & IXGBEVF_QV_LOCKED) { - WARN_ON(q_vector->state & IXGBEVF_QV_STATE_NAPI); - q_vector->state |= IXGBEVF_QV_STATE_NAPI_YIELD; - rc = false; -#ifdef BP_EXTENDED_STATS - q_vector->tx.ring->stats.yields++; -#endif - } else { - /* we don't care if someone yielded */ - q_vector->state = IXGBEVF_QV_STATE_NAPI; - } - spin_unlock_bh(&q_vector->lock); - return rc; -} - -/* returns true is someone tried to get the qv while napi had it */ -static inline bool ixgbevf_qv_unlock_napi(struct ixgbevf_q_vector *q_vector) -{ - int rc = false; - - spin_lock_bh(&q_vector->lock); - WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_POLL | - IXGBEVF_QV_STATE_NAPI_YIELD)); - - if (q_vector->state & IXGBEVF_QV_STATE_POLL_YIELD) - rc = true; - /* reset state to idle, unless QV is disabled */ - q_vector->state &= IXGBEVF_QV_STATE_DISABLED; - spin_unlock_bh(&q_vector->lock); - return rc; -} - -/* called from ixgbevf_low_latency_poll() */ -static inline bool ixgbevf_qv_lock_poll(struct ixgbevf_q_vector *q_vector) -{ - int rc = true; - - spin_lock_bh(&q_vector->lock); - if ((q_vector->state & IXGBEVF_QV_LOCKED)) { - q_vector->state |= IXGBEVF_QV_STATE_POLL_YIELD; - rc = false; -#ifdef BP_EXTENDED_STATS - q_vector->rx.ring->stats.yields++; -#endif - } else { - /* preserve yield marks */ - q_vector->state |= IXGBEVF_QV_STATE_POLL; - } - spin_unlock_bh(&q_vector->lock); - return rc; -} - -/* returns true if someone tried to get the qv while it was locked */ -static inline bool ixgbevf_qv_unlock_poll(struct ixgbevf_q_vector *q_vector) -{ - int rc = false; - - spin_lock_bh(&q_vector->lock); - WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_NAPI)); - - if (q_vector->state & IXGBEVF_QV_STATE_POLL_YIELD) - rc = true; - /* reset state to idle, unless QV is disabled */ - q_vector->state &= IXGBEVF_QV_STATE_DISABLED; - spin_unlock_bh(&q_vector->lock); - return rc; -} - -/* true if a socket is polling, even if it did not get the lock */ -static inline bool ixgbevf_qv_busy_polling(struct ixgbevf_q_vector *q_vector) -{ - WARN_ON(!(q_vector->state & IXGBEVF_QV_OWNED)); - return q_vector->state & IXGBEVF_QV_USER_PEND; -} - -/* false if QV is currently owned */ -static inline bool ixgbevf_qv_disable(struct ixgbevf_q_vector *q_vector) -{ - int rc = true; - - spin_lock_bh(&q_vector->lock); - if (q_vector->state & IXGBEVF_QV_OWNED) - rc = false; - q_vector->state |= IXGBEVF_QV_STATE_DISABLED; - spin_unlock_bh(&q_vector->lock); - return rc; -} - -#endif /* CONFIG_NET_RX_BUSY_POLL */ - /* microsecond values for various ITR rates shifted by 2 to fit itr register * with the first 3 bits reserved 0 */ @@ -464,6 +351,7 @@ enum ixgbevf_xcast_modes { IXGBEVF_XCAST_MODE_NONE = 0, IXGBEVF_XCAST_MODE_MULTI, IXGBEVF_XCAST_MODE_ALLMULTI, + IXGBEVF_XCAST_MODE_PROMISC, }; extern const struct ixgbevf_info ixgbevf_82599_vf_info; diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 6d4bef5803f2..80bab261a0ec 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -457,16 +457,6 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector, struct sk_buff *skb) { -#ifdef CONFIG_NET_RX_BUSY_POLL - skb_mark_napi_id(skb, &q_vector->napi); - - if (ixgbevf_qv_busy_polling(q_vector)) { - netif_receive_skb(skb); - /* exit early if we busy polled */ - return; - } -#endif /* CONFIG_NET_RX_BUSY_POLL */ - napi_gro_receive(&q_vector->napi, skb); } @@ -1031,10 +1021,6 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget) if (budget <= 0) return budget; -#ifdef CONFIG_NET_RX_BUSY_POLL - if (!ixgbevf_qv_lock_napi(q_vector)) - return budget; -#endif /* attempt to distribute budget to each queue fairly, but don't allow * the budget to go below 1 because we'll exit polling @@ -1052,10 +1038,6 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget) clean_complete = false; } -#ifdef CONFIG_NET_RX_BUSY_POLL - ixgbevf_qv_unlock_napi(q_vector); -#endif - /* If all work not completed, return budget and keep polling */ if (!clean_complete) return budget; @@ -1090,40 +1072,6 @@ void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector) IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg); } -#ifdef CONFIG_NET_RX_BUSY_POLL -/* must be called with local_bh_disable()d */ -static int ixgbevf_busy_poll_recv(struct napi_struct *napi) -{ - struct ixgbevf_q_vector *q_vector = - container_of(napi, struct ixgbevf_q_vector, napi); - struct ixgbevf_adapter *adapter = q_vector->adapter; - struct ixgbevf_ring *ring; - int found = 0; - - if (test_bit(__IXGBEVF_DOWN, &adapter->state)) - return LL_FLUSH_FAILED; - - if (!ixgbevf_qv_lock_poll(q_vector)) - return LL_FLUSH_BUSY; - - ixgbevf_for_each_ring(ring, q_vector->rx) { - found = ixgbevf_clean_rx_irq(q_vector, ring, 4); -#ifdef BP_EXTENDED_STATS - if (found) - ring->stats.cleaned += found; - else - ring->stats.misses++; -#endif - if (found) - break; - } - - ixgbevf_qv_unlock_poll(q_vector); - - return found; -} -#endif /* CONFIG_NET_RX_BUSY_POLL */ - /** * ixgbevf_configure_msix - Configure MSI-X hardware * @adapter: board private structure @@ -1930,6 +1878,16 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev) (flags & (IFF_BROADCAST | IFF_MULTICAST)) ? IXGBEVF_XCAST_MODE_MULTI : IXGBEVF_XCAST_MODE_NONE; + /* request the most inclusive mode we need */ + if (flags & IFF_PROMISC) + xcast_mode = IXGBEVF_XCAST_MODE_PROMISC; + else if (flags & IFF_ALLMULTI) + xcast_mode = IXGBEVF_XCAST_MODE_ALLMULTI; + else if (flags & (IFF_BROADCAST | IFF_MULTICAST)) + xcast_mode = IXGBEVF_XCAST_MODE_MULTI; + else + xcast_mode = IXGBEVF_XCAST_MODE_NONE; + spin_lock_bh(&adapter->mbx_lock); hw->mac.ops.update_xcast_mode(hw, xcast_mode); @@ -1950,9 +1908,6 @@ static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter) for (q_idx = 0; q_idx < q_vectors; q_idx++) { q_vector = adapter->q_vector[q_idx]; -#ifdef CONFIG_NET_RX_BUSY_POLL - ixgbevf_qv_init_lock(adapter->q_vector[q_idx]); -#endif napi_enable(&q_vector->napi); } } @@ -1966,12 +1921,6 @@ static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter) for (q_idx = 0; q_idx < q_vectors; q_idx++) { q_vector = adapter->q_vector[q_idx]; napi_disable(&q_vector->napi); -#ifdef CONFIG_NET_RX_BUSY_POLL - while (!ixgbevf_qv_disable(adapter->q_vector[q_idx])) { - pr_info("QV %d locked\n", q_idx); - usleep_range(1000, 20000); - } -#endif /* CONFIG_NET_RX_BUSY_POLL */ } } @@ -2071,7 +2020,8 @@ static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter) static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; - int api[] = { ixgbe_mbox_api_12, + int api[] = { ixgbe_mbox_api_13, + ixgbe_mbox_api_12, ixgbe_mbox_api_11, ixgbe_mbox_api_10, ixgbe_mbox_api_unknown }; @@ -2373,6 +2323,7 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter) switch (hw->api_version) { case ixgbe_mbox_api_11: case ixgbe_mbox_api_12: + case ixgbe_mbox_api_13: adapter->num_rx_queues = rss; adapter->num_tx_queues = rss; default: @@ -3228,6 +3179,21 @@ err_setup_reset: } /** + * ixgbevf_close_suspend - actions necessary to both suspend and close flows + * @adapter: the private adapter struct + * + * This function should contain the necessary work common to both suspending + * and closing of the device. + */ +static void ixgbevf_close_suspend(struct ixgbevf_adapter *adapter) +{ + ixgbevf_down(adapter); + ixgbevf_free_irq(adapter); + ixgbevf_free_all_tx_resources(adapter); + ixgbevf_free_all_rx_resources(adapter); +} + +/** * ixgbevf_close - Disables a network interface * @netdev: network interface device structure * @@ -3242,11 +3208,8 @@ int ixgbevf_close(struct net_device *netdev) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); - ixgbevf_down(adapter); - ixgbevf_free_irq(adapter); - - ixgbevf_free_all_tx_resources(adapter); - ixgbevf_free_all_rx_resources(adapter); + if (netif_device_present(netdev)) + ixgbevf_close_suspend(adapter); return 0; } @@ -3268,6 +3231,8 @@ static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter) * match packet buffer alignment. Unfortunately, the * hardware is not flexible enough to do this dynamically. */ + rtnl_lock(); + if (netif_running(dev)) ixgbevf_close(dev); @@ -3276,6 +3241,8 @@ static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter) if (netif_running(dev)) ixgbevf_open(dev); + + rtnl_unlock(); } static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring, @@ -3796,17 +3763,14 @@ static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state) int retval = 0; #endif + rtnl_lock(); netif_device_detach(netdev); - if (netif_running(netdev)) { - rtnl_lock(); - ixgbevf_down(adapter); - ixgbevf_free_irq(adapter); - ixgbevf_free_all_tx_resources(adapter); - ixgbevf_free_all_rx_resources(adapter); - ixgbevf_clear_interrupt_scheme(adapter); - rtnl_unlock(); - } + if (netif_running(netdev)) + ixgbevf_close_suspend(adapter); + + ixgbevf_clear_interrupt_scheme(adapter); + rtnl_unlock(); #ifdef CONFIG_PM retval = pci_save_state(pdev); @@ -3838,6 +3802,8 @@ static int ixgbevf_resume(struct pci_dev *pdev) dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n"); return err; } + + adapter->hw.hw_addr = adapter->io_addr; smp_mb__before_atomic(); clear_bit(__IXGBEVF_DISABLED, &adapter->state); pci_set_master(pdev); @@ -3869,8 +3835,8 @@ static void ixgbevf_shutdown(struct pci_dev *pdev) ixgbevf_suspend(pdev, PMSG_SUSPEND); } -static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev, - struct rtnl_link_stats64 *stats) +static void ixgbevf_get_stats(struct net_device *netdev, + struct rtnl_link_stats64 *stats) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); unsigned int start; @@ -3903,8 +3869,6 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev, stats->tx_bytes += bytes; stats->tx_packets += packets; } - - return stats; } #define IXGBEVF_MAX_MAC_HDR_LEN 127 @@ -3953,9 +3917,6 @@ static const struct net_device_ops ixgbevf_netdev_ops = { .ndo_tx_timeout = ixgbevf_tx_timeout, .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid, -#ifdef CONFIG_NET_RX_BUSY_POLL - .ndo_busy_poll = ixgbevf_busy_poll_recv, -#endif #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = ixgbevf_netpoll, #endif @@ -4102,6 +4063,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) switch (adapter->hw.api_version) { case ixgbe_mbox_api_11: case ixgbe_mbox_api_12: + case ixgbe_mbox_api_13: netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN); break; @@ -4244,7 +4206,7 @@ static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev, } if (netif_running(netdev)) - ixgbevf_down(adapter); + ixgbevf_close_suspend(adapter); if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state)) pci_disable_device(pdev); @@ -4272,6 +4234,7 @@ static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev) return PCI_ERS_RESULT_DISCONNECT; } + adapter->hw.hw_addr = adapter->io_addr; smp_mb__before_atomic(); clear_bit(__IXGBEVF_DISABLED, &adapter->state); pci_set_master(pdev); @@ -4292,12 +4255,13 @@ static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev) static void ixgbevf_io_resume(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); - struct ixgbevf_adapter *adapter = netdev_priv(netdev); + rtnl_lock(); if (netif_running(netdev)) - ixgbevf_up(adapter); + ixgbevf_open(netdev); netif_device_attach(netdev); + rtnl_unlock(); } /* PCI Error Recovery (ERS) */ diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h index 340cdd469455..bc0442acae78 100644 --- a/drivers/net/ethernet/intel/ixgbevf/mbx.h +++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h @@ -84,6 +84,7 @@ enum ixgbe_pfvf_api_rev { ixgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */ ixgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */ ixgbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */ + ixgbe_mbox_api_13, /* API version 1.3, linux/freebsd VF driver */ /* This value should always be last */ ixgbe_mbox_api_unknown, /* indicates that API version is not known */ }; diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c index d46ba1dabcb7..8a5db9d7219d 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.c +++ b/drivers/net/ethernet/intel/ixgbevf/vf.c @@ -330,9 +330,14 @@ int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues) * Thus return an error if API doesn't support RETA querying or querying * is not supported for this device type. */ - if (hw->api_version != ixgbe_mbox_api_12 || - hw->mac.type >= ixgbe_mac_X550_vf) + switch (hw->api_version) { + case ixgbe_mbox_api_13: + case ixgbe_mbox_api_12: + if (hw->mac.type >= ixgbe_mac_X550_vf) + break; + default: return -EOPNOTSUPP; + } msgbuf[0] = IXGBE_VF_GET_RETA; @@ -391,9 +396,14 @@ int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key) * Thus return an error if API doesn't support RSS Random Key retrieval * or if the operation is not supported for this device type. */ - if (hw->api_version != ixgbe_mbox_api_12 || - hw->mac.type >= ixgbe_mac_X550_vf) + switch (hw->api_version) { + case ixgbe_mbox_api_13: + case ixgbe_mbox_api_12: + if (hw->mac.type >= ixgbe_mac_X550_vf) + break; + default: return -EOPNOTSUPP; + } msgbuf[0] = IXGBE_VF_GET_RSS_KEY; err = hw->mbx.ops.write_posted(hw, msgbuf, 1); @@ -545,6 +555,11 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode) switch (hw->api_version) { case ixgbe_mbox_api_12: + /* promisc introduced in 1.3 version */ + if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC) + return -EOPNOTSUPP; + /* Fall threw */ + case ixgbe_mbox_api_13: break; default: return -EOPNOTSUPP; @@ -884,6 +899,7 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, switch (hw->api_version) { case ixgbe_mbox_api_11: case ixgbe_mbox_api_12: + case ixgbe_mbox_api_13: break; default: return 0; diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c index f9fcab54783c..f580b49e6b67 100644 --- a/drivers/net/ethernet/jme.c +++ b/drivers/net/ethernet/jme.c @@ -1879,7 +1879,7 @@ jme_open(struct net_device *netdev) jme_phy_on(jme); if (test_bit(JME_FLAG_SSET, &jme->flags)) - jme_set_settings(netdev, &jme->old_ecmd); + jme_set_link_ksettings(netdev, &jme->old_cmd); else jme_reset_phy_processor(jme); jme_phy_calibration(jme); @@ -2374,7 +2374,7 @@ jme_tx_timeout(struct net_device *netdev) jme->phylink = 0; jme_reset_phy_processor(jme); if (test_bit(JME_FLAG_SSET, &jme->flags)) - jme_set_settings(netdev, &jme->old_ecmd); + jme_set_link_ksettings(netdev, &jme->old_cmd); /* * Force to Reset the link again @@ -2648,27 +2648,27 @@ jme_set_wol(struct net_device *netdev, } static int -jme_get_settings(struct net_device *netdev, - struct ethtool_cmd *ecmd) +jme_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) { struct jme_adapter *jme = netdev_priv(netdev); int rc; spin_lock_bh(&jme->phy_lock); - rc = mii_ethtool_gset(&(jme->mii_if), ecmd); + rc = mii_ethtool_get_link_ksettings(&jme->mii_if, cmd); spin_unlock_bh(&jme->phy_lock); return rc; } static int -jme_set_settings(struct net_device *netdev, - struct ethtool_cmd *ecmd) +jme_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) { struct jme_adapter *jme = netdev_priv(netdev); int rc, fdc = 0; - if (ethtool_cmd_speed(ecmd) == SPEED_1000 - && ecmd->autoneg != AUTONEG_ENABLE) + if (cmd->base.speed == SPEED_1000 && + cmd->base.autoneg != AUTONEG_ENABLE) return -EINVAL; /* @@ -2676,18 +2676,18 @@ jme_set_settings(struct net_device *netdev, * Hardware would not generate link change interrupt. */ if (jme->mii_if.force_media && - ecmd->autoneg != AUTONEG_ENABLE && - (jme->mii_if.full_duplex != ecmd->duplex)) + cmd->base.autoneg != AUTONEG_ENABLE && + (jme->mii_if.full_duplex != cmd->base.duplex)) fdc = 1; spin_lock_bh(&jme->phy_lock); - rc = mii_ethtool_sset(&(jme->mii_if), ecmd); + rc = mii_ethtool_set_link_ksettings(&jme->mii_if, cmd); spin_unlock_bh(&jme->phy_lock); if (!rc) { if (fdc) jme_reset_link(jme); - jme->old_ecmd = *ecmd; + jme->old_cmd = *cmd; set_bit(JME_FLAG_SSET, &jme->flags); } @@ -2716,7 +2716,7 @@ jme_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) if (!rc && (cmd == SIOCSMIIREG)) { if (duplex_chg) jme_reset_link(jme); - jme_get_settings(netdev, &jme->old_ecmd); + jme_get_link_ksettings(netdev, &jme->old_cmd); set_bit(JME_FLAG_SSET, &jme->flags); } @@ -2915,8 +2915,6 @@ static const struct ethtool_ops jme_ethtool_ops = { .set_pauseparam = jme_set_pauseparam, .get_wol = jme_get_wol, .set_wol = jme_set_wol, - .get_settings = jme_get_settings, - .set_settings = jme_set_settings, .get_link = jme_get_link, .get_msglevel = jme_get_msglevel, .set_msglevel = jme_set_msglevel, @@ -2924,6 +2922,8 @@ static const struct ethtool_ops jme_ethtool_ops = { .get_eeprom_len = jme_get_eeprom_len, .get_eeprom = jme_get_eeprom, .set_eeprom = jme_set_eeprom, + .get_link_ksettings = jme_get_link_ksettings, + .set_link_ksettings = jme_set_link_ksettings, }; static int @@ -3306,7 +3306,7 @@ jme_resume(struct device *dev) jme_clear_pm_disable_wol(jme); jme_phy_on(jme); if (test_bit(JME_FLAG_SSET, &jme->flags)) - jme_set_settings(netdev, &jme->old_ecmd); + jme_set_link_ksettings(netdev, &jme->old_cmd); else jme_reset_phy_processor(jme); jme_phy_calibration(jme); diff --git a/drivers/net/ethernet/jme.h b/drivers/net/ethernet/jme.h index 58cd67c0c8e4..89535c019f04 100644 --- a/drivers/net/ethernet/jme.h +++ b/drivers/net/ethernet/jme.h @@ -447,7 +447,7 @@ struct jme_adapter { u8 chip_sub_rev; u8 pcirev; u32 msg_enable; - struct ethtool_cmd old_ecmd; + struct ethtool_link_ksettings old_cmd; unsigned int old_mtu; struct dynpcc_info dpi; atomic_t intr_sem; @@ -1270,8 +1270,8 @@ static inline int new_phy_power_ctrl(u8 chip_main_rev) /* * Function prototypes */ -static int jme_set_settings(struct net_device *netdev, - struct ethtool_cmd *ecmd); +static int jme_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd); static void jme_set_unicastaddr(struct net_device *netdev); static void jme_set_multi(struct net_device *netdev); diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c index 8037426ec50f..9fae98caf83a 100644 --- a/drivers/net/ethernet/korina.c +++ b/drivers/net/ethernet/korina.c @@ -464,7 +464,7 @@ static int korina_poll(struct napi_struct *napi, int budget) work_done = korina_rx(dev, budget); if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); writel(readl(&lp->rx_dma_regs->dmasm) & ~(DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR), @@ -695,25 +695,27 @@ static void netdev_get_drvinfo(struct net_device *dev, strlcpy(info->bus_info, lp->dev->name, sizeof(info->bus_info)); } -static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int netdev_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct korina_private *lp = netdev_priv(dev); int rc; spin_lock_irq(&lp->lock); - rc = mii_ethtool_gset(&lp->mii_if, cmd); + rc = mii_ethtool_get_link_ksettings(&lp->mii_if, cmd); spin_unlock_irq(&lp->lock); return rc; } -static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int netdev_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct korina_private *lp = netdev_priv(dev); int rc; spin_lock_irq(&lp->lock); - rc = mii_ethtool_sset(&lp->mii_if, cmd); + rc = mii_ethtool_set_link_ksettings(&lp->mii_if, cmd); spin_unlock_irq(&lp->lock); korina_set_carrier(&lp->mii_if); @@ -729,9 +731,9 @@ static u32 netdev_get_link(struct net_device *dev) static const struct ethtool_ops netdev_ethtool_ops = { .get_drvinfo = netdev_get_drvinfo, - .get_settings = netdev_get_settings, - .set_settings = netdev_set_settings, .get_link = netdev_get_link, + .get_link_ksettings = netdev_get_link_ksettings, + .set_link_ksettings = netdev_set_link_ksettings, }; static int korina_alloc_ring(struct net_device *dev) diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c index faea52da8dae..afc810069440 100644 --- a/drivers/net/ethernet/lantiq_etop.c +++ b/drivers/net/ethernet/lantiq_etop.c @@ -156,24 +156,21 @@ ltq_etop_poll_rx(struct napi_struct *napi, int budget) { struct ltq_etop_chan *ch = container_of(napi, struct ltq_etop_chan, napi); - int rx = 0; - int complete = 0; + int work_done = 0; - while ((rx < budget) && !complete) { + while (work_done < budget) { struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc]; - if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) { - ltq_etop_hw_receive(ch); - rx++; - } else { - complete = 1; - } + if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) != LTQ_DMA_C) + break; + ltq_etop_hw_receive(ch); + work_done++; } - if (complete || !rx) { - napi_complete(&ch->napi); + if (work_done < budget) { + napi_complete_done(&ch->napi, work_done); ltq_dma_ack_irq(&ch->dma); } - return rx; + return work_done; } static int diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig index f4b7cf18fb0f..d2555e8b947e 100644 --- a/drivers/net/ethernet/marvell/Kconfig +++ b/drivers/net/ethernet/marvell/Kconfig @@ -83,9 +83,8 @@ config MVNETA_BM config MVPP2 tristate "Marvell Armada 375 network interface support" - depends on MACH_ARMADA_375 || COMPILE_TEST + depends on ARCH_MVEBU || COMPILE_TEST depends on HAS_DMA - depends on !64BIT select MVMDIO ---help--- This driver supports the network interface units in the diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 1fa7c03edec2..25642dee49d3 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -1504,9 +1504,7 @@ mv643xx_eth_get_link_ksettings_phy(struct mv643xx_eth_private *mp, int err; u32 supported, advertising; - err = phy_read_status(dev->phydev); - if (err == 0) - err = phy_ethtool_ksettings_get(dev->phydev, cmd); + err = phy_ethtool_ksettings_get(dev->phydev, cmd); /* * The MAC does not support 1000baseT_Half. @@ -2319,7 +2317,7 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget) if (work_done < budget) { if (mp->oom) mod_timer(&mp->rx_oom, jiffies + (HZ / 10)); - napi_complete(napi); + napi_complete_done(napi, work_done); wrlp(mp, INT_MASK, mp->int_mask); } diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index e05e22705cf7..61dd4462411c 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -28,6 +28,7 @@ #include <linux/of_mdio.h> #include <linux/of_net.h> #include <linux/phy.h> +#include <linux/phy_fixed.h> #include <linux/platform_device.h> #include <linux/skbuff.h> #include <net/hwbm.h> @@ -224,6 +225,7 @@ #define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16) #define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2)) #define MVNETA_TXQ_DEC_SENT_SHIFT 16 +#define MVNETA_TXQ_DEC_SENT_MASK 0xff #define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2)) #define MVNETA_TXQ_SENT_DESC_SHIFT 16 #define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000 @@ -525,6 +527,7 @@ struct mvneta_tx_queue { * descriptor ring */ int count; + int pending; int tx_stop_threshold; int tx_wake_threshold; @@ -652,7 +655,7 @@ static void mvneta_mib_counters_clear(struct mvneta_port *pp) } /* Get System Network Statistics */ -static struct rtnl_link_stats64 * +static void mvneta_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { @@ -686,8 +689,6 @@ mvneta_get_stats64(struct net_device *dev, stats->rx_dropped = dev->stats.rx_dropped; stats->tx_dropped = dev->stats.tx_dropped; - - return stats; } /* Rx descriptors helper methods */ @@ -820,8 +821,9 @@ static void mvneta_txq_pend_desc_add(struct mvneta_port *pp, /* Only 255 descriptors can be added at once ; Assume caller * process TX desriptors in quanta less than 256 */ - val = pend_desc; + val = pend_desc + txq->pending; mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); + txq->pending = 0; } /* Get pointer to next TX descriptor to be processed (send) by HW */ @@ -1758,8 +1760,10 @@ static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp, /* Free tx queue skbuffs */ static void mvneta_txq_bufs_free(struct mvneta_port *pp, - struct mvneta_tx_queue *txq, int num) + struct mvneta_tx_queue *txq, int num, + struct netdev_queue *nq) { + unsigned int bytes_compl = 0, pkts_compl = 0; int i; for (i = 0; i < num; i++) { @@ -1767,6 +1771,11 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp, txq->txq_get_index; struct sk_buff *skb = txq->tx_skb[txq->txq_get_index]; + if (skb) { + bytes_compl += skb->len; + pkts_compl++; + } + mvneta_txq_inc_get(txq); if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr)) @@ -1777,6 +1786,8 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp, continue; dev_kfree_skb_any(skb); } + + netdev_tx_completed_queue(nq, pkts_compl, bytes_compl); } /* Handle end of transmission */ @@ -1790,7 +1801,7 @@ static void mvneta_txq_done(struct mvneta_port *pp, if (!tx_done) return; - mvneta_txq_bufs_free(pp, txq, tx_done); + mvneta_txq_bufs_free(pp, txq, tx_done, nq); txq->count -= tx_done; @@ -2400,12 +2411,18 @@ out: struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id); - txq->count += frags; - mvneta_txq_pend_desc_add(pp, txq, frags); + netdev_tx_sent_queue(nq, len); + txq->count += frags; if (txq->count >= txq->tx_stop_threshold) netif_tx_stop_queue(nq); + if (!skb->xmit_more || netif_xmit_stopped(nq) || + txq->pending + frags > MVNETA_TXQ_DEC_SENT_MASK) + mvneta_txq_pend_desc_add(pp, txq, frags); + else + txq->pending += frags; + u64_stats_update_begin(&stats->syncp); stats->tx_packets++; stats->tx_bytes += len; @@ -2424,9 +2441,10 @@ static void mvneta_txq_done_force(struct mvneta_port *pp, struct mvneta_tx_queue *txq) { + struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); int tx_done = txq->count; - mvneta_txq_bufs_free(pp, txq, tx_done); + mvneta_txq_bufs_free(pp, txq, tx_done, nq); /* reset txq */ txq->count = 0; @@ -2750,11 +2768,9 @@ static int mvneta_poll(struct napi_struct *napi, int budget) rx_done = mvneta_rx_swbm(pp, budget, &pp->rxqs[rx_queue]); } - budget -= rx_done; - - if (budget > 0) { + if (rx_done < budget) { cause_rx_tx = 0; - napi_complete(napi); + napi_complete_done(napi, rx_done); if (pp->neta_armada3700) { unsigned long flags; @@ -2952,6 +2968,8 @@ static int mvneta_txq_init(struct mvneta_port *pp, static void mvneta_txq_deinit(struct mvneta_port *pp, struct mvneta_tx_queue *txq) { + struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); + kfree(txq->tx_skb); if (txq->tso_hdrs) @@ -2963,6 +2981,8 @@ static void mvneta_txq_deinit(struct mvneta_port *pp, txq->size * MVNETA_DESC_ALIGNED_SIZE, txq->descs, txq->descs_phys); + netdev_tx_reset_queue(nq); + txq->descs = NULL; txq->last_desc = 0; txq->next_desc_to_proc = 0; @@ -3908,6 +3928,25 @@ static int mvneta_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, return 0; } +static void mvneta_ethtool_get_wol(struct net_device *dev, + struct ethtool_wolinfo *wol) +{ + wol->supported = 0; + wol->wolopts = 0; + + if (dev->phydev) + phy_ethtool_get_wol(dev->phydev, wol); +} + +static int mvneta_ethtool_set_wol(struct net_device *dev, + struct ethtool_wolinfo *wol) +{ + if (!dev->phydev) + return -EOPNOTSUPP; + + return phy_ethtool_set_wol(dev->phydev, wol); +} + static const struct net_device_ops mvneta_netdev_ops = { .ndo_open = mvneta_open, .ndo_stop = mvneta_stop, @@ -3920,7 +3959,7 @@ static const struct net_device_ops mvneta_netdev_ops = { .ndo_do_ioctl = mvneta_ioctl, }; -const struct ethtool_ops mvneta_eth_tool_ops = { +static const struct ethtool_ops mvneta_eth_tool_ops = { .nway_reset = phy_ethtool_nway_reset, .get_link = ethtool_op_get_link, .set_coalesce = mvneta_ethtool_set_coalesce, @@ -3937,6 +3976,8 @@ const struct ethtool_ops mvneta_eth_tool_ops = { .set_rxfh = mvneta_ethtool_set_rxfh, .get_link_ksettings = phy_ethtool_get_link_ksettings, .set_link_ksettings = mvneta_ethtool_set_link_ksettings, + .get_wol = mvneta_ethtool_get_wol, + .set_wol = mvneta_ethtool_set_wol, }; /* Initialize hw */ diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 4fe430ceb194..d00421b9ffea 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -154,6 +154,7 @@ /* Interrupt Cause and Mask registers */ #define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq)) +#define MVPP2_MAX_ISR_RX_THRESHOLD 0xfffff0 #define MVPP2_ISR_RXQ_GROUP_REG(rxq) (0x5400 + 4 * (rxq)) #define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port)) #define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff) @@ -252,12 +253,8 @@ #define MVPP2_SRC_ADDR_HIGH 0x28 #define MVPP2_PHY_AN_CFG0_REG 0x34 #define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7) -#define MVPP2_MIB_COUNTERS_BASE(port) (0x1000 + ((port) >> 1) * \ - 0x400 + (port) * 0x400) -#define MVPP2_MIB_LATE_COLLISION 0x7c -#define MVPP2_ISR_SUM_MASK_REG 0x220c #define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c -#define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27 +#define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27 /* Per-port registers */ #define MVPP2_GMAC_CTRL_0_REG 0x0 @@ -513,28 +510,28 @@ enum mvpp2_tag_type { /* Sram result info bits assignment */ #define MVPP2_PRS_RI_MAC_ME_MASK 0x1 #define MVPP2_PRS_RI_DSA_MASK 0x2 -#define MVPP2_PRS_RI_VLAN_MASK 0xc -#define MVPP2_PRS_RI_VLAN_NONE ~(BIT(2) | BIT(3)) +#define MVPP2_PRS_RI_VLAN_MASK (BIT(2) | BIT(3)) +#define MVPP2_PRS_RI_VLAN_NONE 0x0 #define MVPP2_PRS_RI_VLAN_SINGLE BIT(2) #define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3) #define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3)) #define MVPP2_PRS_RI_CPU_CODE_MASK 0x70 #define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4) -#define MVPP2_PRS_RI_L2_CAST_MASK 0x600 -#define MVPP2_PRS_RI_L2_UCAST ~(BIT(9) | BIT(10)) +#define MVPP2_PRS_RI_L2_CAST_MASK (BIT(9) | BIT(10)) +#define MVPP2_PRS_RI_L2_UCAST 0x0 #define MVPP2_PRS_RI_L2_MCAST BIT(9) #define MVPP2_PRS_RI_L2_BCAST BIT(10) #define MVPP2_PRS_RI_PPPOE_MASK 0x800 -#define MVPP2_PRS_RI_L3_PROTO_MASK 0x7000 -#define MVPP2_PRS_RI_L3_UN ~(BIT(12) | BIT(13) | BIT(14)) +#define MVPP2_PRS_RI_L3_PROTO_MASK (BIT(12) | BIT(13) | BIT(14)) +#define MVPP2_PRS_RI_L3_UN 0x0 #define MVPP2_PRS_RI_L3_IP4 BIT(12) #define MVPP2_PRS_RI_L3_IP4_OPT BIT(13) #define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13)) #define MVPP2_PRS_RI_L3_IP6 BIT(14) #define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14)) #define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14)) -#define MVPP2_PRS_RI_L3_ADDR_MASK 0x18000 -#define MVPP2_PRS_RI_L3_UCAST ~(BIT(15) | BIT(16)) +#define MVPP2_PRS_RI_L3_ADDR_MASK (BIT(15) | BIT(16)) +#define MVPP2_PRS_RI_L3_UCAST 0x0 #define MVPP2_PRS_RI_L3_MCAST BIT(15) #define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16)) #define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000 @@ -822,9 +819,6 @@ struct mvpp2_tx_queue { /* Per-CPU control of physical Tx queues */ struct mvpp2_txq_pcpu __percpu *pcpu; - /* Array of transmitted skb */ - struct sk_buff **tx_skb; - u32 done_pkts_coal; /* Virtual address of thex Tx DMA descriptors array */ @@ -924,6 +918,7 @@ struct mvpp2_bm_pool { int buf_size; /* Packet size */ int pkt_size; + int frag_size; /* BPPE virtual base address */ u32 *virt_addr; @@ -932,10 +927,6 @@ struct mvpp2_bm_pool { /* Ports using BM pool */ u32 port_map; - - /* Occupied buffers indicator */ - atomic_t in_use; - int in_use_thresh; }; struct mvpp2_buff_hdr { @@ -991,7 +982,7 @@ static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu *txq_pcpu, txq_pcpu->buffs + txq_pcpu->txq_put_index; tx_buf->skb = skb; tx_buf->size = tx_desc->data_size; - tx_buf->phys = tx_desc->buf_phys_addr; + tx_buf->phys = tx_desc->buf_phys_addr + tx_desc->packet_offset; txq_pcpu->txq_put_index++; if (txq_pcpu->txq_put_index == txq_pcpu->size) txq_pcpu->txq_put_index = 0; @@ -3364,6 +3355,22 @@ static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port) mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val); } +static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool) +{ + if (likely(pool->frag_size <= PAGE_SIZE)) + return netdev_alloc_frag(pool->frag_size); + else + return kmalloc(pool->frag_size, GFP_ATOMIC); +} + +static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool, void *data) +{ + if (likely(pool->frag_size <= PAGE_SIZE)) + skb_free_frag(data); + else + kfree(data); +} + /* Buffer Manager configuration routines */ /* Create pool */ @@ -3381,7 +3388,8 @@ static int mvpp2_bm_pool_create(struct platform_device *pdev, if (!bm_pool->virt_addr) return -ENOMEM; - if (!IS_ALIGNED((u32)bm_pool->virt_addr, MVPP2_BM_POOL_PTR_ALIGN)) { + if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr, + MVPP2_BM_POOL_PTR_ALIGN)) { dma_free_coherent(&pdev->dev, size_bytes, bm_pool->virt_addr, bm_pool->phys_addr); dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n", @@ -3401,7 +3409,6 @@ static int mvpp2_bm_pool_create(struct platform_device *pdev, bm_pool->size = size; bm_pool->pkt_size = 0; bm_pool->buf_num = 0; - atomic_set(&bm_pool->in_use, 0); return 0; } @@ -3427,7 +3434,7 @@ static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv, for (i = 0; i < bm_pool->buf_num; i++) { dma_addr_t buf_phys_addr; - u32 vaddr; + unsigned long vaddr; /* Get buffer virtual address (indirect access) */ buf_phys_addr = mvpp2_read(priv, @@ -3439,7 +3446,8 @@ static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv, if (!vaddr) break; - dev_kfree_skb_any((struct sk_buff *)vaddr); + + mvpp2_frag_free(bm_pool, (void *)vaddr); } /* Update BM driver with number of buffers removed from pool */ @@ -3553,29 +3561,28 @@ static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port, mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); } -/* Allocate skb for BM pool */ -static struct sk_buff *mvpp2_skb_alloc(struct mvpp2_port *port, - struct mvpp2_bm_pool *bm_pool, - dma_addr_t *buf_phys_addr, - gfp_t gfp_mask) +static void *mvpp2_buf_alloc(struct mvpp2_port *port, + struct mvpp2_bm_pool *bm_pool, + dma_addr_t *buf_phys_addr, + gfp_t gfp_mask) { - struct sk_buff *skb; dma_addr_t phys_addr; + void *data; - skb = __dev_alloc_skb(bm_pool->pkt_size, gfp_mask); - if (!skb) + data = mvpp2_frag_alloc(bm_pool); + if (!data) return NULL; - phys_addr = dma_map_single(port->dev->dev.parent, skb->head, + phys_addr = dma_map_single(port->dev->dev.parent, data, MVPP2_RX_BUF_SIZE(bm_pool->pkt_size), DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(port->dev->dev.parent, phys_addr))) { - dev_kfree_skb_any(skb); + mvpp2_frag_free(bm_pool, data); return NULL; } *buf_phys_addr = phys_addr; - return skb; + return data; } /* Set pool number in a BM cookie */ @@ -3590,14 +3597,15 @@ static inline u32 mvpp2_bm_cookie_pool_set(u32 cookie, int pool) } /* Get pool number from a BM cookie */ -static inline int mvpp2_bm_cookie_pool_get(u32 cookie) +static inline int mvpp2_bm_cookie_pool_get(unsigned long cookie) { return (cookie >> MVPP2_BM_COOKIE_POOL_OFFS) & 0xFF; } /* Release buffer to BM */ static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool, - u32 buf_phys_addr, u32 buf_virt_addr) + dma_addr_t buf_phys_addr, + unsigned long buf_virt_addr) { mvpp2_write(port->priv, MVPP2_BM_VIRT_RLS_REG, buf_virt_addr); mvpp2_write(port->priv, MVPP2_BM_PHY_RLS_REG(pool), buf_phys_addr); @@ -3605,7 +3613,8 @@ static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool, /* Release multicast buffer */ static void mvpp2_bm_pool_mc_put(struct mvpp2_port *port, int pool, - u32 buf_phys_addr, u32 buf_virt_addr, + dma_addr_t buf_phys_addr, + unsigned long buf_virt_addr, int mc_id) { u32 val = 0; @@ -3620,7 +3629,8 @@ static void mvpp2_bm_pool_mc_put(struct mvpp2_port *port, int pool, /* Refill BM pool */ static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm, - u32 phys_addr, u32 cookie) + dma_addr_t phys_addr, + unsigned long cookie) { int pool = mvpp2_bm_cookie_pool_get(bm); @@ -3631,10 +3641,9 @@ static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm, static int mvpp2_bm_bufs_add(struct mvpp2_port *port, struct mvpp2_bm_pool *bm_pool, int buf_num) { - struct sk_buff *skb; int i, buf_size, total_size; - u32 bm; dma_addr_t phys_addr; + void *buf; buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size); total_size = MVPP2_RX_TOTAL_SIZE(buf_size); @@ -3647,18 +3656,17 @@ static int mvpp2_bm_bufs_add(struct mvpp2_port *port, return 0; } - bm = mvpp2_bm_cookie_pool_set(0, bm_pool->id); for (i = 0; i < buf_num; i++) { - skb = mvpp2_skb_alloc(port, bm_pool, &phys_addr, GFP_KERNEL); - if (!skb) + buf = mvpp2_buf_alloc(port, bm_pool, &phys_addr, GFP_KERNEL); + if (!buf) break; - mvpp2_pool_refill(port, bm, (u32)phys_addr, (u32)skb); + mvpp2_bm_pool_put(port, bm_pool->id, phys_addr, + (unsigned long)buf); } /* Update BM driver with number of buffers added to pool */ bm_pool->buf_num += i; - bm_pool->in_use_thresh = bm_pool->buf_num / 4; netdev_dbg(port->dev, "%s pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n", @@ -3710,6 +3718,9 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type, port->priv, new_pool); new_pool->pkt_size = pkt_size; + new_pool->frag_size = + SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) + + MVPP2_SKB_SHINFO_SIZE; /* Allocate buffers for this pool */ num = mvpp2_bm_bufs_add(port, new_pool, pkts_num); @@ -3778,6 +3789,8 @@ static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu) } port_pool->pkt_size = pkt_size; + port_pool->frag_size = SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) + + MVPP2_SKB_SHINFO_SIZE; num = mvpp2_bm_bufs_add(port, port_pool, pkts_num); if (num != pkts_num) { WARN(1, "pool %d: %d of %d allocated\n", @@ -4379,27 +4392,50 @@ static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port) * will be generated by HW. */ static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port, - struct mvpp2_rx_queue *rxq, u32 pkts) + struct mvpp2_rx_queue *rxq) { - u32 val; + if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK) + rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK; - val = (pkts & MVPP2_OCCUPIED_THRESH_MASK); mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id); - mvpp2_write(port->priv, MVPP2_RXQ_THRESH_REG, val); + mvpp2_write(port->priv, MVPP2_RXQ_THRESH_REG, + rxq->pkts_coal); +} + +static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz) +{ + u64 tmp = (u64)clk_hz * usec; + + do_div(tmp, USEC_PER_SEC); + + return tmp > U32_MAX ? U32_MAX : tmp; +} + +static u32 mvpp2_cycles_to_usec(u32 cycles, unsigned long clk_hz) +{ + u64 tmp = (u64)cycles * USEC_PER_SEC; + + do_div(tmp, clk_hz); - rxq->pkts_coal = pkts; + return tmp > U32_MAX ? U32_MAX : tmp; } /* Set the time delay in usec before Rx interrupt */ static void mvpp2_rx_time_coal_set(struct mvpp2_port *port, - struct mvpp2_rx_queue *rxq, u32 usec) + struct mvpp2_rx_queue *rxq) { - u32 val; + unsigned long freq = port->priv->tclk; + u32 val = mvpp2_usec_to_cycles(rxq->time_coal, freq); - val = (port->priv->tclk / USEC_PER_SEC) * usec; - mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val); + if (val > MVPP2_MAX_ISR_RX_THRESHOLD) { + rxq->time_coal = + mvpp2_cycles_to_usec(MVPP2_MAX_ISR_RX_THRESHOLD, freq); + + /* re-evaluate to get actual register value */ + val = mvpp2_usec_to_cycles(rxq->time_coal, freq); + } - rxq->time_coal = usec; + mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val); } /* Free Tx queue skbuffs */ @@ -4413,13 +4449,12 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port, struct mvpp2_txq_pcpu_buf *tx_buf = txq_pcpu->buffs + txq_pcpu->txq_get_index; - mvpp2_txq_inc_get(txq_pcpu); - dma_unmap_single(port->dev->dev.parent, tx_buf->phys, tx_buf->size, DMA_TO_DEVICE); - if (!tx_buf->skb) - continue; - dev_kfree_skb_any(tx_buf->skb); + if (tx_buf->skb) + dev_kfree_skb_any(tx_buf->skb); + + mvpp2_txq_inc_get(txq_pcpu); } } @@ -4543,8 +4578,8 @@ static int mvpp2_rxq_init(struct mvpp2_port *port, mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD); /* Set coalescing pkts and time */ - mvpp2_rx_pkts_coal_set(port, rxq, rxq->pkts_coal); - mvpp2_rx_time_coal_set(port, rxq, rxq->time_coal); + mvpp2_rx_pkts_coal_set(port, rxq); + mvpp2_rx_time_coal_set(port, rxq); /* Add number of descriptors ready for receiving packets */ mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size); @@ -4994,23 +5029,18 @@ static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status, /* Reuse skb if possible, or allocate a new skb and add it to BM pool */ static int mvpp2_rx_refill(struct mvpp2_port *port, - struct mvpp2_bm_pool *bm_pool, - u32 bm, int is_recycle) + struct mvpp2_bm_pool *bm_pool, u32 bm) { - struct sk_buff *skb; dma_addr_t phys_addr; - - if (is_recycle && - (atomic_read(&bm_pool->in_use) < bm_pool->in_use_thresh)) - return 0; + void *buf; /* No recycle or too many buffers are in use, so allocate a new skb */ - skb = mvpp2_skb_alloc(port, bm_pool, &phys_addr, GFP_ATOMIC); - if (!skb) + buf = mvpp2_buf_alloc(port, bm_pool, &phys_addr, GFP_ATOMIC); + if (!buf) return -ENOMEM; - mvpp2_pool_refill(port, bm, (u32)phys_addr, (u32)skb); - atomic_dec(&bm_pool->in_use); + mvpp2_pool_refill(port, bm, phys_addr, (unsigned long)buf); + return 0; } @@ -5051,10 +5081,10 @@ static void mvpp2_buff_hdr_rx(struct mvpp2_port *port, struct mvpp2_buff_hdr *buff_hdr; struct sk_buff *skb; u32 rx_status = rx_desc->status; - u32 buff_phys_addr; - u32 buff_virt_addr; - u32 buff_phys_addr_next; - u32 buff_virt_addr_next; + dma_addr_t buff_phys_addr; + unsigned long buff_virt_addr; + dma_addr_t buff_phys_addr_next; + unsigned long buff_virt_addr_next; int mc_id; int pool_id; @@ -5101,14 +5131,17 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo, struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); struct mvpp2_bm_pool *bm_pool; struct sk_buff *skb; + unsigned int frag_size; dma_addr_t phys_addr; u32 bm, rx_status; int pool, rx_bytes, err; + void *data; rx_done++; rx_status = rx_desc->status; rx_bytes = rx_desc->data_size - MVPP2_MH_SIZE; phys_addr = rx_desc->buf_phys_addr; + data = (void *)(uintptr_t)rx_desc->buf_cookie; bm = mvpp2_bm_cookie_build(rx_desc); pool = mvpp2_bm_cookie_pool_get(bm); @@ -5129,14 +5162,24 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo, dev->stats.rx_errors++; mvpp2_rx_error(port, rx_desc); /* Return the buffer to the pool */ + mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr, rx_desc->buf_cookie); continue; } - skb = (struct sk_buff *)rx_desc->buf_cookie; + if (bm_pool->frag_size > PAGE_SIZE) + frag_size = 0; + else + frag_size = bm_pool->frag_size; + + skb = build_skb(data, frag_size); + if (!skb) { + netdev_warn(port->dev, "skb build failed\n"); + goto err_drop_frame; + } - err = mvpp2_rx_refill(port, bm_pool, bm, 0); + err = mvpp2_rx_refill(port, bm_pool, bm); if (err) { netdev_err(port->dev, "failed to refill BM pools\n"); goto err_drop_frame; @@ -5147,9 +5190,8 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo, rcvd_pkts++; rcvd_bytes += rx_bytes; - atomic_inc(&bm_pool->in_use); - skb_reserve(skb, MVPP2_MH_SIZE); + skb_reserve(skb, MVPP2_MH_SIZE + NET_SKB_PAD); skb_put(skb, rx_bytes); skb->protocol = eth_type_trans(skb, dev); mvpp2_rx_csum(port, rx_status, skb); @@ -5405,7 +5447,7 @@ static int mvpp2_poll(struct napi_struct *napi, int budget) if (budget > 0) { cause_rx = 0; - napi_complete(napi); + napi_complete_done(napi, rx_done); mvpp2_interrupts_enable(port); } @@ -5739,7 +5781,7 @@ error: return err; } -static struct rtnl_link_stats64 * +static void mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct mvpp2_port *port = netdev_priv(dev); @@ -5771,8 +5813,6 @@ mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) stats->rx_errors = dev->stats.rx_errors; stats->rx_dropped = dev->stats.rx_dropped; stats->tx_dropped = dev->stats.tx_dropped; - - return stats; } static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) @@ -5803,8 +5843,8 @@ static int mvpp2_ethtool_set_coalesce(struct net_device *dev, rxq->time_coal = c->rx_coalesce_usecs; rxq->pkts_coal = c->rx_max_coalesced_frames; - mvpp2_rx_pkts_coal_set(port, rxq, rxq->pkts_coal); - mvpp2_rx_time_coal_set(port, rxq, rxq->time_coal); + mvpp2_rx_pkts_coal_set(port, rxq); + mvpp2_rx_time_coal_set(port, rxq); } for (queue = 0; queue < txq_number; queue++) { @@ -5973,8 +6013,10 @@ static int mvpp2_port_init(struct mvpp2_port *port) struct mvpp2_tx_queue *txq; txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL); - if (!txq) - return -ENOMEM; + if (!txq) { + err = -ENOMEM; + goto err_free_percpu; + } txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu); if (!txq->pcpu) { diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index 3af2814ada23..28cb36d9e50a 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -274,8 +274,6 @@ enum hash_table_entry { HASH_ENTRY_RECEIVE_DISCARD_BIT = 2 }; -static int pxa168_get_link_ksettings(struct net_device *dev, - struct ethtool_link_ksettings *cmd); static int pxa168_init_hw(struct pxa168_eth_private *pep); static int pxa168_init_phy(struct net_device *dev); static void eth_port_reset(struct net_device *dev); @@ -987,10 +985,6 @@ static int pxa168_init_phy(struct net_device *dev) if (err) return err; - err = pxa168_get_link_ksettings(dev, &cmd); - if (err) - return err; - cmd.base.phy_address = pep->phy_addr; cmd.base.speed = pep->phy_speed; cmd.base.duplex = pep->phy_duplex; @@ -1261,7 +1255,7 @@ static int pxa168_rx_poll(struct napi_struct *napi, int budget) } work_done = rxq_process(dev, budget); if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); wrl(pep, INT_MASK, ALL_INTS); } @@ -1370,18 +1364,6 @@ static int pxa168_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr, return -EOPNOTSUPP; } -static int pxa168_get_link_ksettings(struct net_device *dev, - struct ethtool_link_ksettings *cmd) -{ - int err; - - err = phy_read_status(dev->phydev); - if (err == 0) - err = phy_ethtool_ksettings_get(dev->phydev, cmd); - - return err; -} - static void pxa168_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { @@ -1396,7 +1378,7 @@ static const struct ethtool_ops pxa168_ethtool_ops = { .nway_reset = phy_ethtool_nway_reset, .get_link = ethtool_op_get_link, .get_ts_info = ethtool_op_get_ts_info, - .get_link_ksettings = pxa168_get_link_ksettings, + .get_link_ksettings = phy_ethtool_get_link_ksettings, .set_link_ksettings = phy_ethtool_set_link_ksettings, }; diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index 9146a514fb33..edb95271a4f2 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -300,65 +300,76 @@ static u32 skge_supported_modes(const struct skge_hw *hw) return supported; } -static int skge_get_settings(struct net_device *dev, - struct ethtool_cmd *ecmd) +static int skge_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct skge_port *skge = netdev_priv(dev); struct skge_hw *hw = skge->hw; + u32 supported, advertising; - ecmd->transceiver = XCVR_INTERNAL; - ecmd->supported = skge_supported_modes(hw); + supported = skge_supported_modes(hw); if (hw->copper) { - ecmd->port = PORT_TP; - ecmd->phy_address = hw->phy_addr; + cmd->base.port = PORT_TP; + cmd->base.phy_address = hw->phy_addr; } else - ecmd->port = PORT_FIBRE; + cmd->base.port = PORT_FIBRE; + + advertising = skge->advertising; + cmd->base.autoneg = skge->autoneg; + cmd->base.speed = skge->speed; + cmd->base.duplex = skge->duplex; + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); - ecmd->advertising = skge->advertising; - ecmd->autoneg = skge->autoneg; - ethtool_cmd_speed_set(ecmd, skge->speed); - ecmd->duplex = skge->duplex; return 0; } -static int skge_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +static int skge_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct skge_port *skge = netdev_priv(dev); const struct skge_hw *hw = skge->hw; u32 supported = skge_supported_modes(hw); int err = 0; + u32 advertising; + + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); - if (ecmd->autoneg == AUTONEG_ENABLE) { - ecmd->advertising = supported; + if (cmd->base.autoneg == AUTONEG_ENABLE) { + advertising = supported; skge->duplex = -1; skge->speed = -1; } else { u32 setting; - u32 speed = ethtool_cmd_speed(ecmd); + u32 speed = cmd->base.speed; switch (speed) { case SPEED_1000: - if (ecmd->duplex == DUPLEX_FULL) + if (cmd->base.duplex == DUPLEX_FULL) setting = SUPPORTED_1000baseT_Full; - else if (ecmd->duplex == DUPLEX_HALF) + else if (cmd->base.duplex == DUPLEX_HALF) setting = SUPPORTED_1000baseT_Half; else return -EINVAL; break; case SPEED_100: - if (ecmd->duplex == DUPLEX_FULL) + if (cmd->base.duplex == DUPLEX_FULL) setting = SUPPORTED_100baseT_Full; - else if (ecmd->duplex == DUPLEX_HALF) + else if (cmd->base.duplex == DUPLEX_HALF) setting = SUPPORTED_100baseT_Half; else return -EINVAL; break; case SPEED_10: - if (ecmd->duplex == DUPLEX_FULL) + if (cmd->base.duplex == DUPLEX_FULL) setting = SUPPORTED_10baseT_Full; - else if (ecmd->duplex == DUPLEX_HALF) + else if (cmd->base.duplex == DUPLEX_HALF) setting = SUPPORTED_10baseT_Half; else return -EINVAL; @@ -371,11 +382,11 @@ static int skge_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) return -EINVAL; skge->speed = speed; - skge->duplex = ecmd->duplex; + skge->duplex = cmd->base.duplex; } - skge->autoneg = ecmd->autoneg; - skge->advertising = ecmd->advertising; + skge->autoneg = cmd->base.autoneg; + skge->advertising = advertising; if (netif_running(dev)) { skge_down(dev); @@ -875,8 +886,6 @@ static int skge_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom } static const struct ethtool_ops skge_ethtool_ops = { - .get_settings = skge_get_settings, - .set_settings = skge_set_settings, .get_drvinfo = skge_get_drvinfo, .get_regs_len = skge_get_regs_len, .get_regs = skge_get_regs, @@ -899,6 +908,8 @@ static const struct ethtool_ops skge_ethtool_ops = { .set_phys_id = skge_set_phys_id, .get_sset_count = skge_get_sset_count, .get_ethtool_stats = skge_get_ethtool_stats, + .get_link_ksettings = skge_get_link_ksettings, + .set_link_ksettings = skge_set_link_ksettings, }; /* @@ -3190,7 +3201,7 @@ static void skge_tx_done(struct net_device *dev) } } -static int skge_poll(struct napi_struct *napi, int to_do) +static int skge_poll(struct napi_struct *napi, int budget) { struct skge_port *skge = container_of(napi, struct skge_port, napi); struct net_device *dev = skge->netdev; @@ -3203,7 +3214,7 @@ static int skge_poll(struct napi_struct *napi, int to_do) skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F); - for (e = ring->to_clean; prefetch(e->next), work_done < to_do; e = e->next) { + for (e = ring->to_clean; prefetch(e->next), work_done < budget; e = e->next) { struct skge_rx_desc *rd = e->desc; struct sk_buff *skb; u32 control; @@ -3225,12 +3236,10 @@ static int skge_poll(struct napi_struct *napi, int to_do) wmb(); skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_START); - if (work_done < to_do) { + if (work_done < budget && napi_complete_done(napi, work_done)) { unsigned long flags; - napi_gro_flush(napi, false); spin_lock_irqsave(&hw->hw_lock, flags); - __napi_complete(napi); hw->intr_mask |= napimask[skge->port]; skge_write32(hw, B0_IMSK, hw->intr_mask); skge_read32(hw, B0_IMSK); diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index b60ad0e56a9f..2b2cc3f3ca10 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -2666,7 +2666,7 @@ static inline void sky2_rx_done(struct sky2_hw *hw, unsigned port, sky2->rx_stats.bytes += bytes; u64_stats_update_end(&sky2->rx_stats.syncp); - dev->last_rx = jiffies; + sky2->last_rx = jiffies; sky2_rx_update(netdev_priv(dev), rxqaddr[port]); } @@ -2953,7 +2953,7 @@ static int sky2_rx_hung(struct net_device *dev) u8 fifo_lev = sky2_read8(hw, Q_ADDR(rxq, Q_RL)); /* If idle and MAC or PCI is stuck */ - if (sky2->check.last == dev->last_rx && + if (sky2->check.last == sky2->last_rx && ((mac_rp == sky2->check.mac_rp && mac_lev != 0 && mac_lev >= sky2->check.mac_lev) || /* Check if the PCI RX hang */ @@ -2965,7 +2965,7 @@ static int sky2_rx_hung(struct net_device *dev) fifo_rp, sky2_read8(hw, Q_ADDR(rxq, Q_WP))); return 1; } else { - sky2->check.last = dev->last_rx; + sky2->check.last = sky2->last_rx; sky2->check.mac_rp = mac_rp; sky2->check.mac_lev = mac_lev; sky2->check.fifo_rp = fifo_rp; @@ -3589,47 +3589,59 @@ static u32 sky2_supported_modes(const struct sky2_hw *hw) | SUPPORTED_1000baseT_Full; } -static int sky2_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +static int sky2_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct sky2_port *sky2 = netdev_priv(dev); struct sky2_hw *hw = sky2->hw; + u32 supported, advertising; - ecmd->transceiver = XCVR_INTERNAL; - ecmd->supported = sky2_supported_modes(hw); - ecmd->phy_address = PHY_ADDR_MARV; + supported = sky2_supported_modes(hw); + cmd->base.phy_address = PHY_ADDR_MARV; if (sky2_is_copper(hw)) { - ecmd->port = PORT_TP; - ethtool_cmd_speed_set(ecmd, sky2->speed); - ecmd->supported |= SUPPORTED_Autoneg | SUPPORTED_TP; + cmd->base.port = PORT_TP; + cmd->base.speed = sky2->speed; + supported |= SUPPORTED_Autoneg | SUPPORTED_TP; } else { - ethtool_cmd_speed_set(ecmd, SPEED_1000); - ecmd->port = PORT_FIBRE; - ecmd->supported |= SUPPORTED_Autoneg | SUPPORTED_FIBRE; + cmd->base.speed = SPEED_1000; + cmd->base.port = PORT_FIBRE; + supported |= SUPPORTED_Autoneg | SUPPORTED_FIBRE; } - ecmd->advertising = sky2->advertising; - ecmd->autoneg = (sky2->flags & SKY2_FLAG_AUTO_SPEED) + advertising = sky2->advertising; + cmd->base.autoneg = (sky2->flags & SKY2_FLAG_AUTO_SPEED) ? AUTONEG_ENABLE : AUTONEG_DISABLE; - ecmd->duplex = sky2->duplex; + cmd->base.duplex = sky2->duplex; + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); + return 0; } -static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +static int sky2_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct sky2_port *sky2 = netdev_priv(dev); const struct sky2_hw *hw = sky2->hw; u32 supported = sky2_supported_modes(hw); + u32 new_advertising; - if (ecmd->autoneg == AUTONEG_ENABLE) { - if (ecmd->advertising & ~supported) + ethtool_convert_link_mode_to_legacy_u32(&new_advertising, + cmd->link_modes.advertising); + + if (cmd->base.autoneg == AUTONEG_ENABLE) { + if (new_advertising & ~supported) return -EINVAL; if (sky2_is_copper(hw)) - sky2->advertising = ecmd->advertising | + sky2->advertising = new_advertising | ADVERTISED_TP | ADVERTISED_Autoneg; else - sky2->advertising = ecmd->advertising | + sky2->advertising = new_advertising | ADVERTISED_FIBRE | ADVERTISED_Autoneg; @@ -3638,30 +3650,30 @@ static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) sky2->speed = -1; } else { u32 setting; - u32 speed = ethtool_cmd_speed(ecmd); + u32 speed = cmd->base.speed; switch (speed) { case SPEED_1000: - if (ecmd->duplex == DUPLEX_FULL) + if (cmd->base.duplex == DUPLEX_FULL) setting = SUPPORTED_1000baseT_Full; - else if (ecmd->duplex == DUPLEX_HALF) + else if (cmd->base.duplex == DUPLEX_HALF) setting = SUPPORTED_1000baseT_Half; else return -EINVAL; break; case SPEED_100: - if (ecmd->duplex == DUPLEX_FULL) + if (cmd->base.duplex == DUPLEX_FULL) setting = SUPPORTED_100baseT_Full; - else if (ecmd->duplex == DUPLEX_HALF) + else if (cmd->base.duplex == DUPLEX_HALF) setting = SUPPORTED_100baseT_Half; else return -EINVAL; break; case SPEED_10: - if (ecmd->duplex == DUPLEX_FULL) + if (cmd->base.duplex == DUPLEX_FULL) setting = SUPPORTED_10baseT_Full; - else if (ecmd->duplex == DUPLEX_HALF) + else if (cmd->base.duplex == DUPLEX_HALF) setting = SUPPORTED_10baseT_Half; else return -EINVAL; @@ -3674,7 +3686,7 @@ static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) return -EINVAL; sky2->speed = speed; - sky2->duplex = ecmd->duplex; + sky2->duplex = cmd->base.duplex; sky2->flags &= ~SKY2_FLAG_AUTO_SPEED; } @@ -3888,8 +3900,8 @@ static void sky2_set_multicast(struct net_device *dev) gma_write16(hw, port, GM_RX_CTRL, reg); } -static struct rtnl_link_stats64 *sky2_get_stats(struct net_device *dev, - struct rtnl_link_stats64 *stats) +static void sky2_get_stats(struct net_device *dev, + struct rtnl_link_stats64 *stats) { struct sky2_port *sky2 = netdev_priv(dev); struct sky2_hw *hw = sky2->hw; @@ -3929,8 +3941,6 @@ static struct rtnl_link_stats64 *sky2_get_stats(struct net_device *dev, stats->rx_dropped = dev->stats.rx_dropped; stats->rx_fifo_errors = dev->stats.rx_fifo_errors; stats->tx_fifo_errors = dev->stats.tx_fifo_errors; - - return stats; } /* Can have one global because blinking is controlled by @@ -4407,8 +4417,6 @@ static int sky2_set_features(struct net_device *dev, netdev_features_t features) } static const struct ethtool_ops sky2_ethtool_ops = { - .get_settings = sky2_get_settings, - .set_settings = sky2_set_settings, .get_drvinfo = sky2_get_drvinfo, .get_wol = sky2_get_wol, .set_wol = sky2_set_wol, @@ -4431,6 +4439,8 @@ static const struct ethtool_ops sky2_ethtool_ops = { .set_phys_id = sky2_set_phys_id, .get_sset_count = sky2_get_sset_count, .get_ethtool_stats = sky2_get_ethtool_stats, + .get_link_ksettings = sky2_get_link_ksettings, + .set_link_ksettings = sky2_set_link_ksettings, }; #ifdef CONFIG_SKY2_DEBUG diff --git a/drivers/net/ethernet/marvell/sky2.h b/drivers/net/ethernet/marvell/sky2.h index ec6dcd80152b..0fe160796842 100644 --- a/drivers/net/ethernet/marvell/sky2.h +++ b/drivers/net/ethernet/marvell/sky2.h @@ -2247,6 +2247,7 @@ struct sky2_port { u16 rx_data_size; u16 rx_nfrags; + unsigned long last_rx; struct { unsigned long last; u32 mac_rp; diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 1c29c86f8709..9e757684816d 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -462,8 +462,8 @@ static void mtk_stats_update(struct mtk_eth *eth) } } -static struct rtnl_link_stats64 *mtk_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *storage) +static void mtk_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *storage) { struct mtk_mac *mac = netdev_priv(dev); struct mtk_hw_stats *hw_stats = mac->hw_stats; @@ -494,8 +494,6 @@ static struct rtnl_link_stats64 *mtk_get_stats64(struct net_device *dev, storage->tx_errors = dev->stats.tx_errors; storage->rx_dropped = dev->stats.rx_dropped; storage->tx_dropped = dev->stats.tx_dropped; - - return storage; } static inline int mtk_max_frag_size(int mtu) diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index a49072b4fa52..e8c105164931 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -43,6 +43,7 @@ #include <linux/semaphore.h> #include <rdma/ib_smi.h> #include <linux/delay.h> +#include <linux/etherdevice.h> #include <asm/io.h> @@ -2955,7 +2956,7 @@ static bool mlx4_valid_vf_state_change(struct mlx4_dev *dev, int port, return false; } -int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac) +int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u8 *mac) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_vport_state *s_info; @@ -2964,13 +2965,22 @@ int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac) if (!mlx4_is_master(dev)) return -EPROTONOSUPPORT; + if (is_multicast_ether_addr(mac)) + return -EINVAL; + slave = mlx4_get_slave_indx(dev, vf); if (slave < 0) return -EINVAL; port = mlx4_slaves_closest_port(dev, slave, port); s_info = &priv->mfunc.master.vf_admin[slave].vport[port]; - s_info->mac = mac; + + if (s_info->spoofchk && is_zero_ether_addr(mac)) { + mlx4_info(dev, "MAC invalidation is not allowed when spoofchk is on\n"); + return -EPERM; + } + + s_info->mac = mlx4_mac_to_u64(mac); mlx4_info(dev, "default mac on vf %d port %d to %llX will take effect only after vf restart\n", vf, port, s_info->mac); return 0; @@ -3143,6 +3153,7 @@ int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting) struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_vport_state *s_info; int slave; + u8 mac[ETH_ALEN]; if ((!mlx4_is_master(dev)) || !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FSM)) @@ -3154,6 +3165,13 @@ int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting) port = mlx4_slaves_closest_port(dev, slave, port); s_info = &priv->mfunc.master.vf_admin[slave].vport[port]; + + mlx4_u64_to_mac(mac, s_info->mac); + if (setting && !is_valid_ether_addr(mac)) { + mlx4_info(dev, "Illegal MAC with spoofchk\n"); + return -EPERM; + } + s_info->spoofchk = setting; return 0; diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c index 6b8635378f1f..fa6d2354a0e9 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/cq.c @@ -81,8 +81,9 @@ void mlx4_cq_tasklet_cb(unsigned long data) static void mlx4_add_cq_to_tasklet(struct mlx4_cq *cq) { - unsigned long flags; struct mlx4_eq_tasklet *tasklet_ctx = cq->tasklet_ctx.priv; + unsigned long flags; + bool kick; spin_lock_irqsave(&tasklet_ctx->lock, flags); /* When migrating CQs between EQs will be implemented, please note @@ -92,7 +93,10 @@ static void mlx4_add_cq_to_tasklet(struct mlx4_cq *cq) */ if (list_empty_careful(&cq->tasklet_ctx.list)) { atomic_inc(&cq->refcount); + kick = list_empty(&tasklet_ctx->list); list_add_tail(&cq->tasklet_ctx.list, &tasklet_ctx->list); + if (kick) + tasklet_schedule(&tasklet_ctx->task); } spin_unlock_irqrestore(&tasklet_ctx->lock, flags); } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c index 504461a464c5..024788549c25 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c @@ -62,12 +62,13 @@ void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev, struct skb_shared_hwtstamps *hwts, u64 timestamp) { - unsigned long flags; + unsigned int seq; u64 nsec; - read_lock_irqsave(&mdev->clock_lock, flags); - nsec = timecounter_cyc2time(&mdev->clock, timestamp); - read_unlock_irqrestore(&mdev->clock_lock, flags); + do { + seq = read_seqbegin(&mdev->clock_lock); + nsec = timecounter_cyc2time(&mdev->clock, timestamp); + } while (read_seqretry(&mdev->clock_lock, seq)); memset(hwts, 0, sizeof(struct skb_shared_hwtstamps)); hwts->hwtstamp = ns_to_ktime(nsec); @@ -88,16 +89,23 @@ void mlx4_en_remove_timestamp(struct mlx4_en_dev *mdev) } } +#define MLX4_EN_WRAP_AROUND_SEC 10UL +/* By scheduling the overflow check every 5 seconds, we have a reasonably + * good chance we wont miss a wrap around. + * TOTO: Use a timer instead of a work queue to increase the guarantee. + */ +#define MLX4_EN_OVERFLOW_PERIOD (MLX4_EN_WRAP_AROUND_SEC * HZ / 2) + void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev) { bool timeout = time_is_before_jiffies(mdev->last_overflow_check + - mdev->overflow_period); + MLX4_EN_OVERFLOW_PERIOD); unsigned long flags; if (timeout) { - write_lock_irqsave(&mdev->clock_lock, flags); + write_seqlock_irqsave(&mdev->clock_lock, flags); timecounter_read(&mdev->clock); - write_unlock_irqrestore(&mdev->clock_lock, flags); + write_sequnlock_irqrestore(&mdev->clock_lock, flags); mdev->last_overflow_check = jiffies; } } @@ -128,10 +136,10 @@ static int mlx4_en_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta) adj *= delta; diff = div_u64(adj, 1000000000ULL); - write_lock_irqsave(&mdev->clock_lock, flags); + write_seqlock_irqsave(&mdev->clock_lock, flags); timecounter_read(&mdev->clock); mdev->cycles.mult = neg_adj ? mult - diff : mult + diff; - write_unlock_irqrestore(&mdev->clock_lock, flags); + write_sequnlock_irqrestore(&mdev->clock_lock, flags); return 0; } @@ -149,9 +157,9 @@ static int mlx4_en_phc_adjtime(struct ptp_clock_info *ptp, s64 delta) ptp_clock_info); unsigned long flags; - write_lock_irqsave(&mdev->clock_lock, flags); + write_seqlock_irqsave(&mdev->clock_lock, flags); timecounter_adjtime(&mdev->clock, delta); - write_unlock_irqrestore(&mdev->clock_lock, flags); + write_sequnlock_irqrestore(&mdev->clock_lock, flags); return 0; } @@ -172,9 +180,9 @@ static int mlx4_en_phc_gettime(struct ptp_clock_info *ptp, unsigned long flags; u64 ns; - write_lock_irqsave(&mdev->clock_lock, flags); + write_seqlock_irqsave(&mdev->clock_lock, flags); ns = timecounter_read(&mdev->clock); - write_unlock_irqrestore(&mdev->clock_lock, flags); + write_sequnlock_irqrestore(&mdev->clock_lock, flags); *ts = ns_to_timespec64(ns); @@ -198,9 +206,9 @@ static int mlx4_en_phc_settime(struct ptp_clock_info *ptp, unsigned long flags; /* reset the timecounter */ - write_lock_irqsave(&mdev->clock_lock, flags); + write_seqlock_irqsave(&mdev->clock_lock, flags); timecounter_init(&mdev->clock, &mdev->cycles, ns); - write_unlock_irqrestore(&mdev->clock_lock, flags); + write_sequnlock_irqrestore(&mdev->clock_lock, flags); return 0; } @@ -236,7 +244,6 @@ static const struct ptp_clock_info mlx4_en_ptp_clock_info = { .enable = mlx4_en_phc_enable, }; -#define MLX4_EN_WRAP_AROUND_SEC 10ULL /* This function calculates the max shift that enables the user range * of MLX4_EN_WRAP_AROUND_SEC values in the cycles register. @@ -257,7 +264,6 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev) { struct mlx4_dev *dev = mdev->dev; unsigned long flags; - u64 ns, zero = 0; /* mlx4_en_init_timestamp is called for each netdev. * mdev->ptp_clock is common for all ports, skip initialization if @@ -266,7 +272,7 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev) if (mdev->ptp_clock) return; - rwlock_init(&mdev->clock_lock); + seqlock_init(&mdev->clock_lock); memset(&mdev->cycles, 0, sizeof(mdev->cycles)); mdev->cycles.read = mlx4_en_read_clock; @@ -276,17 +282,10 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev) clocksource_khz2mult(1000 * dev->caps.hca_core_clock, mdev->cycles.shift); mdev->nominal_c_mult = mdev->cycles.mult; - write_lock_irqsave(&mdev->clock_lock, flags); + write_seqlock_irqsave(&mdev->clock_lock, flags); timecounter_init(&mdev->clock, &mdev->cycles, ktime_to_ns(ktime_get_real())); - write_unlock_irqrestore(&mdev->clock_lock, flags); - - /* Calculate period in seconds to call the overflow watchdog - to make - * sure counter is checked at least once every wrap around. - */ - ns = cyclecounter_cyc2ns(&mdev->cycles, mdev->cycles.mask, zero, &zero); - do_div(ns, NSEC_PER_SEC / 2 / HZ); - mdev->overflow_period = ns; + write_sequnlock_irqrestore(&mdev->clock_lock, flags); /* Configure the PHC */ mdev->ptp_clock_info = mlx4_en_ptp_clock_info; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c index b04760a5034b..1dae8e40fb25 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c @@ -319,7 +319,7 @@ static int mlx4_en_ets_validate(struct mlx4_en_priv *priv, struct ieee_ets *ets) default: en_err(priv, "TC[%d]: Not supported TSA: %d\n", i, ets->tc_tsa[i]); - return -ENOTSUPP; + return -EOPNOTSUPP; } } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index 9aa422691954..c4d714fcc7da 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -902,6 +902,7 @@ mlx4_en_set_link_ksettings(struct net_device *dev, struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_ptys_reg ptys_reg; __be32 proto_admin; + u8 cur_autoneg; int ret; u32 ptys_adv = ethtool2ptys_link_modes( @@ -931,10 +932,21 @@ mlx4_en_set_link_ksettings(struct net_device *dev, return 0; } - proto_admin = link_ksettings->base.autoneg == AUTONEG_ENABLE ? - cpu_to_be32(ptys_adv) : - speed_set_ptys_admin(priv, speed, - ptys_reg.eth_proto_cap); + cur_autoneg = ptys_reg.flags & MLX4_PTYS_AN_DISABLE_ADMIN ? + AUTONEG_DISABLE : AUTONEG_ENABLE; + + if (link_ksettings->base.autoneg == AUTONEG_DISABLE) { + proto_admin = speed_set_ptys_admin(priv, speed, + ptys_reg.eth_proto_cap); + if ((be32_to_cpu(proto_admin) & + (MLX4_PROT_MASK(MLX4_1000BASE_CX_SGMII) | + MLX4_PROT_MASK(MLX4_1000BASE_KX))) && + (ptys_reg.flags & MLX4_PTYS_AN_DISABLE_CAP)) + ptys_reg.flags |= MLX4_PTYS_AN_DISABLE_ADMIN; + } else { + proto_admin = cpu_to_be32(ptys_adv); + ptys_reg.flags &= ~MLX4_PTYS_AN_DISABLE_ADMIN; + } proto_admin &= ptys_reg.eth_proto_cap; if (!proto_admin) { @@ -942,7 +954,9 @@ mlx4_en_set_link_ksettings(struct net_device *dev, return -EINVAL; /* nothing to change due to bad input */ } - if (proto_admin == ptys_reg.eth_proto_admin) + if ((proto_admin == ptys_reg.eth_proto_admin) && + ((ptys_reg.flags & MLX4_PTYS_AN_DISABLE_CAP) && + (link_ksettings->base.autoneg == cur_autoneg))) return 0; /* Nothing to change */ en_dbg(DRV, priv, "mlx4_ACCESS_PTYS_REG SET: ptys_reg.eth_proto_admin = 0x%x\n", @@ -1788,7 +1802,7 @@ static int mlx4_en_set_channels(struct net_device *dev, netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]); netif_set_real_num_rx_queues(dev, priv->rx_ring_num); - if (dev->num_tc) + if (netdev_get_num_tc(dev)) mlx4_en_setup_tc(dev, MLX4_EN_NUM_UP); en_warn(priv, "Using %d TX rings\n", priv->tx_ring_num[TX]); @@ -1980,7 +1994,7 @@ static int mlx4_en_get_module_info(struct net_device *dev, modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; break; default: - return -ENOSYS; + return -EINVAL; } return 0; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 3b4961a8e8e4..61420473fe5f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -1321,7 +1321,7 @@ static void mlx4_en_tx_timeout(struct net_device *dev) } -static struct rtnl_link_stats64 * +static void mlx4_en_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct mlx4_en_priv *priv = netdev_priv(dev); @@ -1330,8 +1330,6 @@ mlx4_en_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) mlx4_en_fold_software_stats(dev); netdev_stats_to_stats64(stats, &dev->stats); spin_unlock_bh(&priv->stats_lock); - - return stats; } static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv) @@ -1384,6 +1382,7 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv) static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv) { unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies); + u32 pkt_rate_high, pkt_rate_low; struct mlx4_en_cq *cq; unsigned long packets; unsigned long rate; @@ -1397,37 +1396,40 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv) if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ) return; + pkt_rate_low = READ_ONCE(priv->pkt_rate_low); + pkt_rate_high = READ_ONCE(priv->pkt_rate_high); + for (ring = 0; ring < priv->rx_ring_num; ring++) { rx_packets = READ_ONCE(priv->rx_ring[ring]->packets); rx_bytes = READ_ONCE(priv->rx_ring[ring]->bytes); - rx_pkt_diff = ((unsigned long) (rx_packets - - priv->last_moder_packets[ring])); + rx_pkt_diff = rx_packets - priv->last_moder_packets[ring]; packets = rx_pkt_diff; rate = packets * HZ / period; - avg_pkt_size = packets ? ((unsigned long) (rx_bytes - - priv->last_moder_bytes[ring])) / packets : 0; + avg_pkt_size = packets ? (rx_bytes - + priv->last_moder_bytes[ring]) / packets : 0; /* Apply auto-moderation only when packet rate * exceeds a rate that it matters */ if (rate > (MLX4_EN_RX_RATE_THRESH / priv->rx_ring_num) && avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) { - if (rate < priv->pkt_rate_low) + if (rate <= pkt_rate_low) moder_time = priv->rx_usecs_low; - else if (rate > priv->pkt_rate_high) + else if (rate >= pkt_rate_high) moder_time = priv->rx_usecs_high; else - moder_time = (rate - priv->pkt_rate_low) * + moder_time = (rate - pkt_rate_low) * (priv->rx_usecs_high - priv->rx_usecs_low) / - (priv->pkt_rate_high - priv->pkt_rate_low) + + (pkt_rate_high - pkt_rate_low) + priv->rx_usecs_low; } else { moder_time = priv->rx_usecs_low; } - if (moder_time != priv->last_moder_time[ring]) { + cq = priv->rx_cq[ring]; + if (moder_time != priv->last_moder_time[ring] || + cq->moder_cnt != priv->rx_frames) { priv->last_moder_time[ring] = moder_time; - cq = priv->rx_cq[ring]; cq->moder_time = moder_time; cq->moder_cnt = priv->rx_frames; err = mlx4_en_set_cq_moder(priv, cq); @@ -1697,6 +1699,14 @@ int mlx4_en_start_port(struct net_device *dev) priv->port, err); goto tx_err; } + + err = mlx4_SET_PORT_user_mtu(mdev->dev, priv->port, dev->mtu); + if (err) { + en_err(priv, "Failed to pass user MTU(%d) to Firmware for port %d, with error %d\n", + dev->mtu, priv->port, err); + goto tx_err; + } + /* Set default qp number */ err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0); if (err) { @@ -2475,12 +2485,8 @@ static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac) { struct mlx4_en_priv *en_priv = netdev_priv(dev); struct mlx4_en_dev *mdev = en_priv->mdev; - u64 mac_u64 = mlx4_mac_to_u64(mac); - - if (is_multicast_ether_addr(mac)) - return -EINVAL; - return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac_u64); + return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac); } static int mlx4_en_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos, diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.h b/drivers/net/ethernet/mellanox/mlx4/en_port.h index 040da4b16b1c..930f961fee42 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_port.h +++ b/drivers/net/ethernet/mellanox/mlx4/en_port.h @@ -35,7 +35,6 @@ #define _MLX4_EN_PORT_H_ -#define SET_PORT_GEN_ALL_VALID 0x7 #define SET_PORT_PROMISC_SHIFT 31 #define SET_PORT_MC_PROMISC_SHIFT 30 diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index cc003fdf0ed9..867292880c07 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -33,6 +33,7 @@ #include <net/busy_poll.h> #include <linux/bpf.h> +#include <linux/bpf_trace.h> #include <linux/mlx4/cq.h> #include <linux/slab.h> #include <linux/mlx4/qp.h> @@ -603,10 +604,10 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv, dma_sync_single_for_cpu(priv->ddev, dma, frag_info->frag_size, DMA_FROM_DEVICE); - /* Save page reference in skb */ - __skb_frag_set_page(&skb_frags_rx[nr], frags[nr].page); - skb_frag_size_set(&skb_frags_rx[nr], frag_info->frag_size); - skb_frags_rx[nr].page_offset = frags[nr].page_offset; + __skb_fill_page_desc(skb, nr, frags[nr].page, + frags[nr].page_offset, + frag_info->frag_size); + skb->truesize += frag_info->frag_stride; frags[nr].page = NULL; } @@ -709,7 +710,8 @@ static bool mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv, do { if (mlx4_en_prepare_rx_desc(priv, ring, ring->prod & ring->size_mask, - GFP_ATOMIC | __GFP_COLD)) + GFP_ATOMIC | __GFP_COLD | + __GFP_MEMALLOC)) break; ring->prod++; } while (--missing); @@ -928,10 +930,12 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud length, cq->ring, &doorbell_pending))) goto consumed; + trace_xdp_exception(dev, xdp_prog, act); goto xdp_drop_no_cnt; /* Drop on xmit failure */ default: bpf_warn_invalid_xdp_action(act); case XDP_ABORTED: + trace_xdp_exception(dev, xdp_prog, act); case XDP_DROP: ring->xdp_drop++; xdp_drop_no_cnt: diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 5886ad78058f..3ed42199d3f1 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -710,7 +710,7 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, u16 rings_p_up = priv->num_tx_rings_p_up; u8 up = 0; - if (dev->num_tc) + if (netdev_get_num_tc(dev)) return skb_tx_hash(dev, skb); if (skb_vlan_tag_present(skb)) diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index 0509996957d9..07406cf2eacd 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -494,7 +494,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_eqe *eqe; - int cqn = -1; + int cqn; int eqes_found = 0; int set_ci = 0; int port; @@ -840,13 +840,6 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) eq_set_ci(eq, 1); - /* cqn is 24bit wide but is initialized such that its higher bits - * are ones too. Thus, if we got any event, cqn's high bits should be off - * and we need to schedule the tasklet. - */ - if (!(cqn & ~0xffffff)) - tasklet_schedule(&eq->tasklet_ctx.task); - return eqes_found; } @@ -1256,9 +1249,8 @@ int mlx4_init_eq_table(struct mlx4_dev *dev) mlx4_warn(dev, "Failed adding irq rmap\n"); } #endif - err = mlx4_create_eq(dev, dev->caps.num_cqs - - dev->caps.reserved_cqs + - MLX4_NUM_SPARE_EQE, + err = mlx4_create_eq(dev, dev->quotas.cq + + MLX4_NUM_SPARE_EQE, (dev->flags & MLX4_FLAG_MSI_X) ? i + 1 - !!(i > MLX4_EQ_ASYNC) : 0, eq); diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 84bab9f0732e..37e84a59e751 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -672,7 +672,7 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port, MLX4_GET(field, outbox, QUERY_FUNC_CAP_PHYS_PORT_OFFSET); func_cap->physical_port = field; if (func_cap->physical_port != gen_or_port) { - err = -ENOSYS; + err = -EINVAL; goto out; } @@ -1875,7 +1875,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) *((u8 *) mailbox->buf + INIT_HCA_VERSION_OFFSET) = INIT_HCA_VERSION; *((u8 *) mailbox->buf + INIT_HCA_CACHELINE_SZ_OFFSET) = - (ilog2(cache_line_size()) - 4) << 5; + ((ilog2(cache_line_size()) - 4) << 5) | (1 << 4); #if defined(__LITTLE_ENDIAN) *(inbox + INIT_HCA_FLAGS_OFFSET / 4) &= ~cpu_to_be32(1 << 1); @@ -2436,7 +2436,7 @@ int mlx4_config_dev_retrieval(struct mlx4_dev *dev, #define CONFIG_DEV_RX_CSUM_MODE_PORT2_BIT_OFFSET 4 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_CONFIG_DEV)) - return -ENOTSUPP; + return -EOPNOTSUPP; err = mlx4_CONFIG_DEV_get(dev, &config_dev); if (err) @@ -2983,7 +2983,7 @@ static int mlx4_SET_PORT_phv_bit(struct mlx4_dev *dev, u8 port, u8 phv_bit) return PTR_ERR(mailbox); context = mailbox->buf; - context->v_ignore_fcs |= SET_PORT_GEN_PHV_VALID; + context->flags2 |= SET_PORT_GEN_PHV_VALID; if (phv_bit) context->phv_en |= SET_PORT_GEN_PHV_EN; diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c index 8258d08acd8c..e00f627331cb 100644 --- a/drivers/net/ethernet/mellanox/mlx4/intf.c +++ b/drivers/net/ethernet/mellanox/mlx4/intf.c @@ -136,7 +136,7 @@ int mlx4_do_bond(struct mlx4_dev *dev, bool enable) LIST_HEAD(bond_list); if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP)) - return -ENOTSUPP; + return -EOPNOTSUPP; ret = mlx4_disable_rx_port_check(dev, enable); if (ret) { diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index bffa6f345f2f..21377c315083 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -838,11 +838,9 @@ static int mlx4_slave_cap(struct mlx4_dev *dev) */ if (hca_param.global_caps) { mlx4_err(dev, "Unknown hca global capabilities\n"); - return -ENOSYS; + return -EINVAL; } - mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz; - dev->caps.hca_core_clock = hca_param.hca_core_clock; memset(&dev_cap, 0, sizeof(dev_cap)); @@ -896,7 +894,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev) PF_CONTEXT_BEHAVIOUR_MASK) { mlx4_err(dev, "Unknown pf context behaviour %x known flags %x\n", func_cap.pf_context_behaviour, PF_CONTEXT_BEHAVIOUR_MASK); - return -ENOSYS; + return -EINVAL; } dev->caps.num_ports = func_cap.num_ports; @@ -1447,7 +1445,7 @@ int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p) int err; if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP)) - return -ENOTSUPP; + return -EOPNOTSUPP; mutex_lock(&priv->bond_mutex); @@ -1884,7 +1882,7 @@ int mlx4_get_internal_clock_params(struct mlx4_dev *dev, struct mlx4_priv *priv = mlx4_priv(dev); if (mlx4_is_slave(dev)) - return -ENOTSUPP; + return -EOPNOTSUPP; if (!params) return -EINVAL; @@ -2384,7 +2382,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev) /* Query CONFIG_DEV parameters */ err = mlx4_config_dev_retrieval(dev, ¶ms); - if (err && err != -ENOTSUPP) { + if (err && err != -EOPNOTSUPP) { mlx4_err(dev, "Failed to query CONFIG_DEV parameters\n"); } else if (!err) { dev->caps.rx_checksum_flags_port[1] = params.rx_csum_flags_port_1; @@ -3492,7 +3490,7 @@ slave_start: mlx4_enable_msi_x(dev); if ((mlx4_is_mfunc(dev)) && !(dev->flags & MLX4_FLAG_MSI_X)) { - err = -ENOSYS; + err = -EOPNOTSUPP; mlx4_err(dev, "INTx is not supported in multi-function mode, aborting\n"); goto err_free_eq; } @@ -3503,6 +3501,8 @@ slave_start: goto err_disable_msix; } + mlx4_init_quotas(dev); + err = mlx4_setup_hca(dev); if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) && !mlx4_is_mfunc(dev)) { @@ -3515,7 +3515,6 @@ slave_start: if (err) goto err_steer; - mlx4_init_quotas(dev); /* When PF resources are ready arm its comm channel to enable * getting commands */ diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index 086920b615af..b4f1bc56cc68 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -487,6 +487,7 @@ struct mlx4_slave_state { bool vst_qinq_supported; u8 function; dma_addr_t vhcr_dma; + u16 user_mtu[MLX4_MAX_PORTS + 1]; u16 mtu[MLX4_MAX_PORTS + 1]; __be32 ib_cap_mask[MLX4_MAX_PORTS + 1]; struct mlx4_slave_eqe eq[MLX4_MFUNC_MAX_EQES]; @@ -590,6 +591,7 @@ struct mlx4_mfunc_master_ctx { struct mlx4_master_qp0_state qp0_state[MLX4_MAX_PORTS + 1]; int init_port_ref[MLX4_MAX_PORTS + 1]; u16 max_mtu[MLX4_MAX_PORTS + 1]; + u16 max_user_mtu[MLX4_MAX_PORTS + 1]; u8 pptx; u8 pprx; int disable_mcast_ref[MLX4_MAX_PORTS + 1]; @@ -774,7 +776,9 @@ struct mlx4_vlan_table { int max; }; -#define SET_PORT_GEN_ALL_VALID 0x7 +#define SET_PORT_GEN_ALL_VALID (MLX4_FLAG_V_MTU_MASK | \ + MLX4_FLAG_V_PPRX_MASK | \ + MLX4_FLAG_V_PPTX_MASK) #define SET_PORT_PROMISC_SHIFT 31 #define SET_PORT_MC_PROMISC_SHIFT 30 @@ -787,7 +791,7 @@ enum { struct mlx4_set_port_general_context { u16 reserved1; - u8 v_ignore_fcs; + u8 flags2; u8 flags; union { u8 ignore_fcs; @@ -803,7 +807,8 @@ struct mlx4_set_port_general_context { u16 reserved4; u32 reserved5; u8 phv_en; - u8 reserved6[3]; + u8 reserved6[5]; + __be16 user_mtu; }; struct mlx4_set_port_rqp_calc_context { diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index cec59bc264c9..3629ce11a68b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -102,7 +102,8 @@ /* Use the maximum between 16384 and a single page */ #define MLX4_EN_ALLOC_SIZE PAGE_ALIGN(16384) -#define MLX4_EN_ALLOC_PREFER_ORDER PAGE_ALLOC_COSTLY_ORDER +#define MLX4_EN_ALLOC_PREFER_ORDER min_t(int, get_order(32768), \ + PAGE_ALLOC_COSTLY_ORDER) /* Receive fragment sizes; we use at most 3 fragments (for 9600 byte MTU * and 4K allocations) */ @@ -424,12 +425,11 @@ struct mlx4_en_dev { u32 priv_pdn; spinlock_t uar_lock; u8 mac_removed[MLX4_MAX_PORTS + 1]; - rwlock_t clock_lock; u32 nominal_c_mult; struct cyclecounter cycles; + seqlock_t clock_lock; struct timecounter clock; unsigned long last_overflow_check; - unsigned long overflow_period; struct ptp_clock *ptp_clock; struct ptp_clock_info ptp_clock_info; struct notifier_block nb; diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c index 395b5463cfd9..db65f72879e9 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mr.c +++ b/drivers/net/ethernet/mellanox/mlx4/mr.c @@ -823,7 +823,7 @@ int mlx4_mw_alloc(struct mlx4_dev *dev, u32 pd, enum mlx4_mw_type type, !(dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW)) || (type == MLX4_MW_TYPE_2 && !(dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN))) - return -ENOTSUPP; + return -EOPNOTSUPP; index = mlx4_mpt_reserve(dev); if (index == -1) diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index b656dd5772e5..4e36e287d605 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c @@ -50,7 +50,11 @@ #define MLX4_STATS_ERROR_COUNTERS_MASK 0x1ffc30ULL #define MLX4_STATS_PORT_COUNTERS_MASK 0x1fe00000ULL -#define MLX4_FLAG_V_IGNORE_FCS_MASK 0x2 +#define MLX4_FLAG2_V_IGNORE_FCS_MASK BIT(1) +#define MLX4_FLAG2_V_USER_MTU_MASK BIT(5) +#define MLX4_FLAG_V_MTU_MASK BIT(0) +#define MLX4_FLAG_V_PPRX_MASK BIT(1) +#define MLX4_FLAG_V_PPTX_MASK BIT(2) #define MLX4_IGNORE_FCS_MASK 0x1 #define MLX4_TC_MAX_NUMBER 8 @@ -1239,13 +1243,96 @@ void mlx4_reset_roce_gids(struct mlx4_dev *dev, int slave) return; } +static void +mlx4_en_set_port_mtu(struct mlx4_dev *dev, int slave, int port, + struct mlx4_set_port_general_context *gen_context) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master; + struct mlx4_slave_state *slave_st = &master->slave_state[slave]; + u16 mtu, prev_mtu; + + /* Mtu is configured as the max USER_MTU among all + * the functions on the port. + */ + mtu = be16_to_cpu(gen_context->mtu); + mtu = min_t(int, mtu, dev->caps.eth_mtu_cap[port] + + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN); + prev_mtu = slave_st->mtu[port]; + slave_st->mtu[port] = mtu; + if (mtu > master->max_mtu[port]) + master->max_mtu[port] = mtu; + if (mtu < prev_mtu && prev_mtu == master->max_mtu[port]) { + int i; + + slave_st->mtu[port] = mtu; + master->max_mtu[port] = mtu; + for (i = 0; i < dev->num_slaves; i++) + master->max_mtu[port] = + max_t(u16, master->max_mtu[port], + master->slave_state[i].mtu[port]); + } + gen_context->mtu = cpu_to_be16(master->max_mtu[port]); +} + +static void +mlx4_en_set_port_user_mtu(struct mlx4_dev *dev, int slave, int port, + struct mlx4_set_port_general_context *gen_context) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master; + struct mlx4_slave_state *slave_st = &master->slave_state[slave]; + u16 user_mtu, prev_user_mtu; + + /* User Mtu is configured as the max USER_MTU among all + * the functions on the port. + */ + user_mtu = be16_to_cpu(gen_context->user_mtu); + user_mtu = min_t(int, user_mtu, dev->caps.eth_mtu_cap[port]); + prev_user_mtu = slave_st->user_mtu[port]; + slave_st->user_mtu[port] = user_mtu; + if (user_mtu > master->max_user_mtu[port]) + master->max_user_mtu[port] = user_mtu; + if (user_mtu < prev_user_mtu && + prev_user_mtu == master->max_user_mtu[port]) { + int i; + + slave_st->user_mtu[port] = user_mtu; + master->max_user_mtu[port] = user_mtu; + for (i = 0; i < dev->num_slaves; i++) + master->max_user_mtu[port] = + max_t(u16, master->max_user_mtu[port], + master->slave_state[i].user_mtu[port]); + } + gen_context->user_mtu = cpu_to_be16(master->max_user_mtu[port]); +} + +static void +mlx4_en_set_port_global_pause(struct mlx4_dev *dev, int slave, + struct mlx4_set_port_general_context *gen_context) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master; + + /* Slave cannot change Global Pause configuration */ + if (slave != mlx4_master_func_num(dev) && + (gen_context->pptx != master->pptx || + gen_context->pprx != master->pprx)) { + gen_context->pptx = master->pptx; + gen_context->pprx = master->pprx; + mlx4_warn(dev, "denying Global Pause change for slave:%d\n", + slave); + } else { + master->pptx = gen_context->pptx; + master->pprx = gen_context->pprx; + } +} + static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod, u8 op_mod, struct mlx4_cmd_mailbox *inbox) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_port_info *port_info; - struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master; - struct mlx4_slave_state *slave_st = &master->slave_state[slave]; struct mlx4_set_port_rqp_calc_context *qpn_context; struct mlx4_set_port_general_context *gen_context; struct mlx4_roce_gid_entry *gid_entry_tbl, *gid_entry_mbox, *gid_entry_mb1; @@ -1256,7 +1343,6 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod, int base; u32 in_modifier; u32 promisc; - u16 mtu, prev_mtu; int err; int i, j; int offset; @@ -1269,7 +1355,9 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod, is_eth = op_mod; port_info = &priv->port[port]; - /* Slaves cannot perform SET_PORT operations except changing MTU */ + /* Slaves cannot perform SET_PORT operations, + * except for changing MTU and USER_MTU. + */ if (is_eth) { if (slave != dev->caps.function && in_modifier != MLX4_SET_PORT_GENERAL && @@ -1297,40 +1385,20 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod, break; case MLX4_SET_PORT_GENERAL: gen_context = inbox->buf; - /* Mtu is configured as the max MTU among all the - * the functions on the port. */ - mtu = be16_to_cpu(gen_context->mtu); - mtu = min_t(int, mtu, dev->caps.eth_mtu_cap[port] + - ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN); - prev_mtu = slave_st->mtu[port]; - slave_st->mtu[port] = mtu; - if (mtu > master->max_mtu[port]) - master->max_mtu[port] = mtu; - if (mtu < prev_mtu && prev_mtu == - master->max_mtu[port]) { - slave_st->mtu[port] = mtu; - master->max_mtu[port] = mtu; - for (i = 0; i < dev->num_slaves; i++) { - master->max_mtu[port] = - max(master->max_mtu[port], - master->slave_state[i].mtu[port]); - } - } - gen_context->mtu = cpu_to_be16(master->max_mtu[port]); - /* Slave cannot change Global Pause configuration */ - if (slave != mlx4_master_func_num(dev) && - ((gen_context->pptx != master->pptx) || - (gen_context->pprx != master->pprx))) { - gen_context->pptx = master->pptx; - gen_context->pprx = master->pprx; - mlx4_warn(dev, - "denying Global Pause change for slave:%d\n", - slave); - } else { - master->pptx = gen_context->pptx; - master->pprx = gen_context->pprx; - } + if (gen_context->flags & MLX4_FLAG_V_MTU_MASK) + mlx4_en_set_port_mtu(dev, slave, port, + gen_context); + + if (gen_context->flags2 & MLX4_FLAG2_V_USER_MTU_MASK) + mlx4_en_set_port_user_mtu(dev, slave, port, + gen_context); + + if (gen_context->flags & + (MLX4_FLAG_V_PPRX_MASK | MLX4_FLAG_V_PPTX_MASK)) + mlx4_en_set_port_global_pause(dev, slave, + gen_context); + break; case MLX4_SET_PORT_GID_TABLE: /* change to MULTIPLE entries: number of guest's gids @@ -1608,6 +1676,30 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn, } EXPORT_SYMBOL(mlx4_SET_PORT_qpn_calc); +int mlx4_SET_PORT_user_mtu(struct mlx4_dev *dev, u8 port, u16 user_mtu) +{ + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_set_port_general_context *context; + u32 in_mod; + int err; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + context = mailbox->buf; + context->flags2 |= MLX4_FLAG2_V_USER_MTU_MASK; + context->user_mtu = cpu_to_be16(user_mtu); + + in_mod = MLX4_SET_PORT_GENERAL << 8 | port; + err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE, + MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_WRAPPED); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} +EXPORT_SYMBOL(mlx4_SET_PORT_user_mtu); + int mlx4_SET_PORT_fcs_check(struct mlx4_dev *dev, u8 port, u8 ignore_fcs_value) { struct mlx4_cmd_mailbox *mailbox; @@ -1619,7 +1711,7 @@ int mlx4_SET_PORT_fcs_check(struct mlx4_dev *dev, u8 port, u8 ignore_fcs_value) if (IS_ERR(mailbox)) return PTR_ERR(mailbox); context = mailbox->buf; - context->v_ignore_fcs |= MLX4_FLAG_V_IGNORE_FCS_MASK; + context->flags2 |= MLX4_FLAG2_V_IGNORE_FCS_MASK; if (ignore_fcs_value) context->ignore_fcs |= MLX4_IGNORE_FCS_MASK; else diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c index d1cd9c32a9ae..2d6abd4662b1 100644 --- a/drivers/net/ethernet/mellanox/mlx4/qp.c +++ b/drivers/net/ethernet/mellanox/mlx4/qp.c @@ -447,7 +447,7 @@ int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn, & MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB)) { mlx4_warn(dev, "Trying to set src check LB, but it isn't supported\n"); - err = -ENOTSUPP; + err = -EOPNOTSUPP; goto out; } pri_addr_path_mask |= diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 1822382212ee..d8d5d161b8c7 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -77,6 +77,7 @@ struct res_common { int from_state; int to_state; int removing; + const char *func_name; }; enum { @@ -236,8 +237,8 @@ static void *res_tracker_lookup(struct rb_root *root, u64 res_id) struct rb_node *node = root->rb_node; while (node) { - struct res_common *res = container_of(node, struct res_common, - node); + struct res_common *res = rb_entry(node, struct res_common, + node); if (res_id < res->res_id) node = node->rb_left; @@ -255,8 +256,8 @@ static int res_tracker_insert(struct rb_root *root, struct res_common *res) /* Figure out where to put new node */ while (*new) { - struct res_common *this = container_of(*new, struct res_common, - node); + struct res_common *this = rb_entry(*new, struct res_common, + node); parent = *new; if (res->res_id < this->res_id) @@ -837,6 +838,36 @@ static int mpt_mask(struct mlx4_dev *dev) return dev->caps.num_mpts - 1; } +static const char *mlx4_resource_type_to_str(enum mlx4_resource t) +{ + switch (t) { + case RES_QP: + return "QP"; + case RES_CQ: + return "CQ"; + case RES_SRQ: + return "SRQ"; + case RES_XRCD: + return "XRCD"; + case RES_MPT: + return "MPT"; + case RES_MTT: + return "MTT"; + case RES_MAC: + return "MAC"; + case RES_VLAN: + return "VLAN"; + case RES_COUNTER: + return "COUNTER"; + case RES_FS_RULE: + return "FS_RULE"; + case RES_EQ: + return "EQ"; + default: + return "INVALID RESOURCE"; + } +} + static void *find_res(struct mlx4_dev *dev, u64 res_id, enum mlx4_resource type) { @@ -846,9 +877,9 @@ static void *find_res(struct mlx4_dev *dev, u64 res_id, res_id); } -static int get_res(struct mlx4_dev *dev, int slave, u64 res_id, - enum mlx4_resource type, - void *res) +static int _get_res(struct mlx4_dev *dev, int slave, u64 res_id, + enum mlx4_resource type, + void *res, const char *func_name) { struct res_common *r; int err = 0; @@ -861,6 +892,10 @@ static int get_res(struct mlx4_dev *dev, int slave, u64 res_id, } if (r->state == RES_ANY_BUSY) { + mlx4_warn(dev, + "%s(%d) trying to get resource %llx of type %s, but it's already taken by %s\n", + func_name, slave, res_id, mlx4_resource_type_to_str(type), + r->func_name); err = -EBUSY; goto exit; } @@ -872,6 +907,7 @@ static int get_res(struct mlx4_dev *dev, int slave, u64 res_id, r->from_state = r->state; r->state = RES_ANY_BUSY; + r->func_name = func_name; if (res) *((struct res_common **)res) = r; @@ -881,6 +917,9 @@ exit: return err; } +#define get_res(dev, slave, res_id, type, res) \ + _get_res((dev), (slave), (res_id), (type), (res), __func__) + int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev, enum mlx4_resource type, u64 res_id, int *slave) @@ -911,8 +950,10 @@ static void put_res(struct mlx4_dev *dev, int slave, u64 res_id, spin_lock_irq(mlx4_tlock(dev)); r = find_res(dev, res_id, type); - if (r) + if (r) { r->state = r->from_state; + r->func_name = ""; + } spin_unlock_irq(mlx4_tlock(dev)); } @@ -1396,7 +1437,7 @@ static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra) case RES_MTT: return remove_mtt_ok((struct res_mtt *)res, extra); case RES_MAC: - return -ENOSYS; + return -EOPNOTSUPP; case RES_EQ: return remove_eq_ok((struct res_eq *)res); case RES_COUNTER: @@ -4256,7 +4297,7 @@ int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave, MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB)) { mlx4_warn(dev, "Src check LB for slave %d isn't supported\n", slave); - return -ENOTSUPP; + return -EOPNOTSUPP; } /* Just change the smac for the QP */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c index 32d4af9b594d..336d4738b807 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c @@ -179,6 +179,8 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, mlx5_core_dbg(dev, "failed adding CP 0x%x to debug file system\n", cq->cqn); + cq->uar = dev->priv.uar; + return 0; err_cmd: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c index a9dbc28f6b97..a62f4b6a21a5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c @@ -71,6 +71,16 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv) if (dev_ctx->context) { spin_lock_irq(&priv->ctx_lock); list_add_tail(&dev_ctx->list, &priv->ctx_list); +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING + if (dev_ctx->intf->pfault) { + if (priv->pfault) { + mlx5_core_err(dev, "multiple page fault handlers not supported"); + } else { + priv->pfault_ctx = dev_ctx->context; + priv->pfault = dev_ctx->intf->pfault; + } + } +#endif spin_unlock_irq(&priv->ctx_lock); } else { kfree(dev_ctx); @@ -97,6 +107,15 @@ void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv) if (!dev_ctx) return; +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING + spin_lock_irq(&priv->ctx_lock); + if (priv->pfault == dev_ctx->intf->pfault) + priv->pfault = NULL; + spin_unlock_irq(&priv->ctx_lock); + + synchronize_srcu(&priv->pfault_srcu); +#endif + spin_lock_irq(&priv->ctx_lock); list_del(&dev_ctx->list); spin_unlock_irq(&priv->ctx_lock); @@ -329,6 +348,20 @@ void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event, spin_unlock_irqrestore(&priv->ctx_lock, flags); } +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING +void mlx5_core_page_fault(struct mlx5_core_dev *dev, + struct mlx5_pagefault *pfault) +{ + struct mlx5_priv *priv = &dev->priv; + int srcu_idx; + + srcu_idx = srcu_read_lock(&priv->pfault_srcu); + if (priv->pfault) + priv->pfault(dev, priv->pfault_ctx, pfault); + srcu_read_unlock(&priv->pfault_srcu, srcu_idx); +} +#endif + void mlx5_dev_list_lock(void) { mutex_lock(&mlx5_intf_mutex); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index d5ecb8f53fd4..f6a6ded204f6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -51,6 +51,9 @@ #define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v) +#define MLX5E_HW2SW_MTU(hwmtu) ((hwmtu) - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)) +#define MLX5E_SW2HW_MTU(swmtu) ((swmtu) + (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)) + #define MLX5E_MAX_NUM_TC 8 #define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x6 @@ -67,8 +70,13 @@ #define MLX5_RX_HEADROOM NET_SKB_PAD -#define MLX5_MPWRQ_LOG_STRIDE_SIZE 6 /* >= 6, HW restriction */ -#define MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS 8 /* >= 6, HW restriction */ +#define MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev) \ + (6 + MLX5_CAP_GEN(mdev, cache_line_128byte)) /* HW restriction */ +#define MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, req) \ + max_t(u32, MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev), req) +#define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev) MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, 6) +#define MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, 8) + #define MLX5_MPWRQ_LOG_WQE_SZ 18 #define MLX5_MPWRQ_WQE_PAGE_ORDER (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \ MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0) @@ -98,6 +106,7 @@ #define MLX5E_LOG_INDIR_RQT_SIZE 0x7 #define MLX5E_INDIR_RQT_SIZE BIT(MLX5E_LOG_INDIR_RQT_SIZE) +#define MLX5E_MIN_NUM_CHANNELS 0x1 #define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE >> 1) #define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC) #define MLX5E_TX_CQ_POLL_BUDGET 128 @@ -111,8 +120,7 @@ #define MLX5E_XDP_IHS_DS_COUNT \ DIV_ROUND_UP(MLX5E_XDP_MIN_INLINE - 2, MLX5_SEND_WQE_DS) #define MLX5E_XDP_TX_DS_COUNT \ - (MLX5E_XDP_IHS_DS_COUNT + \ - (sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) + 1 /* SG DS */) + ((sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) + 1 /* SG DS */) #define MLX5E_XDP_TX_WQEBBS \ DIV_ROUND_UP(MLX5E_XDP_TX_DS_COUNT, MLX5_SEND_WQEBB_NUM_DS) @@ -259,6 +267,7 @@ struct mlx5e_tstamp { struct mlx5_core_dev *mdev; struct ptp_clock *ptp; struct ptp_clock_info ptp_info; + u8 *pps_pin_caps; }; enum { @@ -369,6 +378,7 @@ struct mlx5e_rq { unsigned long state; int ix; + u16 rx_headroom; struct mlx5e_rx_am am; /* Adaptive Moderation */ struct bpf_prog *xdp_prog; @@ -479,7 +489,7 @@ struct mlx5e_sq { /* control path */ struct mlx5_wq_ctrl wq_ctrl; - struct mlx5_uar uar; + struct mlx5_sq_bfreg bfreg; struct mlx5e_channel *channel; int tc; u32 rate_limit; @@ -568,8 +578,9 @@ struct mlx5e_vlan_table { unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; struct mlx5_flow_handle *active_vlans_rule[VLAN_N_VID]; struct mlx5_flow_handle *untagged_rule; - struct mlx5_flow_handle *any_vlan_rule; - bool filter_disabled; + struct mlx5_flow_handle *any_cvlan_rule; + struct mlx5_flow_handle *any_svlan_rule; + bool filter_disabled; }; struct mlx5e_l2_table { @@ -777,9 +788,11 @@ void mlx5e_fill_hwstamp(struct mlx5e_tstamp *clock, u64 timestamp, struct skb_shared_hwtstamps *hwts); void mlx5e_timestamp_init(struct mlx5e_priv *priv); void mlx5e_timestamp_cleanup(struct mlx5e_priv *priv); +void mlx5e_pps_event_handler(struct mlx5e_priv *priv, + struct ptp_clock_event *event); int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr); int mlx5e_hwstamp_get(struct net_device *dev, struct ifreq *ifr); -void mlx5e_modify_rx_cqe_compression(struct mlx5e_priv *priv, bool val); +void mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val); int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, u16 vid); @@ -803,11 +816,12 @@ int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed); void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode); +void mlx5e_set_rq_type_params(struct mlx5e_priv *priv, u8 rq_type); static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq, struct mlx5_wqe_ctrl_seg *ctrl, int bf_sz) { - u16 ofst = MLX5_BF_OFFSET + sq->bf_offset; + u16 ofst = sq->bf_offset; /* ensure wqe is visible to device before updating doorbell record */ dma_wmb(); @@ -833,7 +847,7 @@ static inline void mlx5e_cq_arm(struct mlx5e_cq *cq) struct mlx5_core_cq *mcq; mcq = &cq->mcq; - mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, NULL, cq->wq.cc); + mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, cq->wq.cc); } static inline u32 mlx5e_get_wqe_mtt_offset(struct mlx5e_rq *rq, u16 wqe_ix) @@ -841,12 +855,6 @@ static inline u32 mlx5e_get_wqe_mtt_offset(struct mlx5e_rq *rq, u16 wqe_ix) return wqe_ix * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8); } -static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev) -{ - return min_t(int, mdev->priv.eq_table.num_comp_vectors, - MLX5E_MAX_NUM_CHANNELS); -} - extern const struct ethtool_ops mlx5e_ethtool_ops; #ifdef CONFIG_MLX5_CORE_EN_DCB extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c index 746a92c13644..37e66eef6fb5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c @@ -37,6 +37,22 @@ enum { MLX5E_CYCLES_SHIFT = 23 }; +enum { + MLX5E_PIN_MODE_IN = 0x0, + MLX5E_PIN_MODE_OUT = 0x1, +}; + +enum { + MLX5E_OUT_PATTERN_PULSE = 0x0, + MLX5E_OUT_PATTERN_PERIODIC = 0x1, +}; + +enum { + MLX5E_EVENT_MODE_DISABLE = 0x0, + MLX5E_EVENT_MODE_REPETETIVE = 0x1, + MLX5E_EVENT_MODE_ONCE_TILL_ARM = 0x2, +}; + void mlx5e_fill_hwstamp(struct mlx5e_tstamp *tstamp, u64 timestamp, struct skb_shared_hwtstamps *hwts) { @@ -90,11 +106,12 @@ int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr) return -ERANGE; } + mutex_lock(&priv->state_lock); /* RX HW timestamp */ switch (config.rx_filter) { case HWTSTAMP_FILTER_NONE: /* Reset CQE compression to Admin default */ - mlx5e_modify_rx_cqe_compression(priv, priv->params.rx_cqe_compress_def); + mlx5e_modify_rx_cqe_compression_locked(priv, priv->params.rx_cqe_compress_def); break; case HWTSTAMP_FILTER_ALL: case HWTSTAMP_FILTER_SOME: @@ -112,14 +129,16 @@ int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr) case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: /* Disable CQE compression */ netdev_warn(dev, "Disabling cqe compression"); - mlx5e_modify_rx_cqe_compression(priv, false); + mlx5e_modify_rx_cqe_compression_locked(priv, false); config.rx_filter = HWTSTAMP_FILTER_ALL; break; default: + mutex_unlock(&priv->state_lock); return -ERANGE; } memcpy(&priv->tstamp.hwtstamp_config, &config, sizeof(config)); + mutex_unlock(&priv->state_lock); return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? -EFAULT : 0; @@ -189,6 +208,18 @@ static int mlx5e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta) int neg_adj = 0; struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp, ptp_info); + struct mlx5e_priv *priv = + container_of(tstamp, struct mlx5e_priv, tstamp); + + if (MLX5_CAP_GEN(priv->mdev, pps_modify)) { + u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; + + /* For future use need to add a loop for finding all 1PPS out pins */ + MLX5_SET(mtpps_reg, in, pin_mode, MLX5E_PIN_MODE_OUT); + MLX5_SET(mtpps_reg, in, out_periodic_adjustment, delta & 0xFFFF); + + mlx5_set_mtpps(priv->mdev, in, sizeof(in)); + } if (delta < 0) { neg_adj = 1; @@ -208,6 +239,124 @@ static int mlx5e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta) return 0; } +static int mlx5e_extts_configure(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, + int on) +{ + struct mlx5e_tstamp *tstamp = + container_of(ptp, struct mlx5e_tstamp, ptp_info); + struct mlx5e_priv *priv = + container_of(tstamp, struct mlx5e_priv, tstamp); + u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; + u8 pattern = 0; + int pin = -1; + int err = 0; + + if (!MLX5_CAP_GEN(priv->mdev, pps) || + !MLX5_CAP_GEN(priv->mdev, pps_modify)) + return -EOPNOTSUPP; + + if (rq->extts.index >= tstamp->ptp_info.n_pins) + return -EINVAL; + + if (on) { + pin = ptp_find_pin(tstamp->ptp, PTP_PF_EXTTS, rq->extts.index); + if (pin < 0) + return -EBUSY; + } + + if (rq->extts.flags & PTP_FALLING_EDGE) + pattern = 1; + + MLX5_SET(mtpps_reg, in, pin, pin); + MLX5_SET(mtpps_reg, in, pin_mode, MLX5E_PIN_MODE_IN); + MLX5_SET(mtpps_reg, in, pattern, pattern); + MLX5_SET(mtpps_reg, in, enable, on); + + err = mlx5_set_mtpps(priv->mdev, in, sizeof(in)); + if (err) + return err; + + return mlx5_set_mtppse(priv->mdev, pin, 0, + MLX5E_EVENT_MODE_REPETETIVE & on); +} + +static int mlx5e_perout_configure(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, + int on) +{ + struct mlx5e_tstamp *tstamp = + container_of(ptp, struct mlx5e_tstamp, ptp_info); + struct mlx5e_priv *priv = + container_of(tstamp, struct mlx5e_priv, tstamp); + u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; + u64 nsec_now, nsec_delta, time_stamp; + u64 cycles_now, cycles_delta; + struct timespec64 ts; + unsigned long flags; + int pin = -1; + s64 ns; + + if (!MLX5_CAP_GEN(priv->mdev, pps_modify)) + return -EOPNOTSUPP; + + if (rq->perout.index >= tstamp->ptp_info.n_pins) + return -EINVAL; + + if (on) { + pin = ptp_find_pin(tstamp->ptp, PTP_PF_PEROUT, + rq->perout.index); + if (pin < 0) + return -EBUSY; + } + + ts.tv_sec = rq->perout.period.sec; + ts.tv_nsec = rq->perout.period.nsec; + ns = timespec64_to_ns(&ts); + if (on) + if ((ns >> 1) != 500000000LL) + return -EINVAL; + ts.tv_sec = rq->perout.start.sec; + ts.tv_nsec = rq->perout.start.nsec; + ns = timespec64_to_ns(&ts); + cycles_now = mlx5_read_internal_timer(tstamp->mdev); + write_lock_irqsave(&tstamp->lock, flags); + nsec_now = timecounter_cyc2time(&tstamp->clock, cycles_now); + nsec_delta = ns - nsec_now; + cycles_delta = div64_u64(nsec_delta << tstamp->cycles.shift, + tstamp->cycles.mult); + write_unlock_irqrestore(&tstamp->lock, flags); + time_stamp = cycles_now + cycles_delta; + MLX5_SET(mtpps_reg, in, pin, pin); + MLX5_SET(mtpps_reg, in, pin_mode, MLX5E_PIN_MODE_OUT); + MLX5_SET(mtpps_reg, in, pattern, MLX5E_OUT_PATTERN_PERIODIC); + MLX5_SET(mtpps_reg, in, enable, on); + MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp); + + return mlx5_set_mtpps(priv->mdev, in, sizeof(in)); +} + +static int mlx5e_ptp_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, + int on) +{ + switch (rq->type) { + case PTP_CLK_REQ_EXTTS: + return mlx5e_extts_configure(ptp, rq, on); + case PTP_CLK_REQ_PEROUT: + return mlx5e_perout_configure(ptp, rq, on); + default: + return -EOPNOTSUPP; + } + return 0; +} + +static int mlx5e_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin, + enum ptp_pin_function func, unsigned int chan) +{ + return (func == PTP_PF_PHYSYNC) ? -EOPNOTSUPP : 0; +} + static const struct ptp_clock_info mlx5e_ptp_clock_info = { .owner = THIS_MODULE, .max_adj = 100000000, @@ -221,6 +370,7 @@ static const struct ptp_clock_info mlx5e_ptp_clock_info = { .gettime64 = mlx5e_ptp_gettime, .settime64 = mlx5e_ptp_settime, .enable = NULL, + .verify = NULL, }; static void mlx5e_timestamp_init_config(struct mlx5e_tstamp *tstamp) @@ -229,6 +379,62 @@ static void mlx5e_timestamp_init_config(struct mlx5e_tstamp *tstamp) tstamp->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; } +static int mlx5e_init_pin_config(struct mlx5e_tstamp *tstamp) +{ + int i; + + tstamp->ptp_info.pin_config = + kzalloc(sizeof(*tstamp->ptp_info.pin_config) * + tstamp->ptp_info.n_pins, GFP_KERNEL); + if (!tstamp->ptp_info.pin_config) + return -ENOMEM; + tstamp->ptp_info.enable = mlx5e_ptp_enable; + tstamp->ptp_info.verify = mlx5e_ptp_verify; + + for (i = 0; i < tstamp->ptp_info.n_pins; i++) { + snprintf(tstamp->ptp_info.pin_config[i].name, + sizeof(tstamp->ptp_info.pin_config[i].name), + "mlx5_pps%d", i); + tstamp->ptp_info.pin_config[i].index = i; + tstamp->ptp_info.pin_config[i].func = PTP_PF_NONE; + tstamp->ptp_info.pin_config[i].chan = i; + } + + return 0; +} + +static void mlx5e_get_pps_caps(struct mlx5e_priv *priv, + struct mlx5e_tstamp *tstamp) +{ + u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; + + mlx5_query_mtpps(priv->mdev, out, sizeof(out)); + + tstamp->ptp_info.n_pins = MLX5_GET(mtpps_reg, out, + cap_number_of_pps_pins); + tstamp->ptp_info.n_ext_ts = MLX5_GET(mtpps_reg, out, + cap_max_num_of_pps_in_pins); + tstamp->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out, + cap_max_num_of_pps_out_pins); + + tstamp->pps_pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode); + tstamp->pps_pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode); + tstamp->pps_pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode); + tstamp->pps_pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode); + tstamp->pps_pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode); + tstamp->pps_pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode); + tstamp->pps_pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode); + tstamp->pps_pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode); +} + +void mlx5e_pps_event_handler(struct mlx5e_priv *priv, + struct ptp_clock_event *event) +{ + struct mlx5e_tstamp *tstamp = &priv->tstamp; + + ptp_clock_event(tstamp->ptp, event); +} + void mlx5e_timestamp_init(struct mlx5e_priv *priv) { struct mlx5e_tstamp *tstamp = &priv->tstamp; @@ -272,6 +478,18 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv) tstamp->ptp_info = mlx5e_ptp_clock_info; snprintf(tstamp->ptp_info.name, 16, "mlx5 ptp"); + /* Initialize 1PPS data structures */ +#define MAX_PIN_NUM 8 + tstamp->pps_pin_caps = kzalloc(sizeof(u8) * MAX_PIN_NUM, GFP_KERNEL); + if (tstamp->pps_pin_caps) { + if (MLX5_CAP_GEN(priv->mdev, pps)) + mlx5e_get_pps_caps(priv, tstamp); + if (tstamp->ptp_info.n_pins) + mlx5e_init_pin_config(tstamp); + } else { + mlx5_core_warn(priv->mdev, "1PPS initialization failed\n"); + } + tstamp->ptp = ptp_clock_register(&tstamp->ptp_info, &priv->mdev->pdev->dev); if (IS_ERR(tstamp->ptp)) { @@ -293,5 +511,8 @@ void mlx5e_timestamp_cleanup(struct mlx5e_priv *priv) priv->tstamp.ptp = NULL; } + kfree(tstamp->pps_pin_caps); + kfree(tstamp->ptp_info.pin_config); + cancel_delayed_work_sync(&tstamp->overflow_work); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c index f175518ff07a..bd898d8deda0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c @@ -89,16 +89,10 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev) struct mlx5e_resources *res = &mdev->mlx5e_res; int err; - err = mlx5_alloc_map_uar(mdev, &res->cq_uar, false); - if (err) { - mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err); - return err; - } - err = mlx5_core_alloc_pd(mdev, &res->pdn); if (err) { mlx5_core_err(mdev, "alloc pd failed, %d\n", err); - goto err_unmap_free_uar; + return err; } err = mlx5_core_alloc_transport_domain(mdev, &res->td.tdn); @@ -121,9 +115,6 @@ err_dealloc_transport_domain: mlx5_core_dealloc_transport_domain(mdev, res->td.tdn); err_dealloc_pd: mlx5_core_dealloc_pd(mdev, res->pdn); -err_unmap_free_uar: - mlx5_unmap_free_uar(mdev, &res->cq_uar); - return err; } @@ -134,7 +125,6 @@ void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev) mlx5_core_destroy_mkey(mdev, &res->mkey); mlx5_core_dealloc_transport_domain(mdev, res->td.tdn); mlx5_core_dealloc_pd(mdev, res->pdn); - mlx5_unmap_free_uar(mdev, &res->cq_uar); } int mlx5e_refresh_tirs_self_loopback(struct mlx5_core_dev *mdev, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index bb67863aa361..a004a5a1a4c2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -170,7 +170,8 @@ static int mlx5e_get_sset_count(struct net_device *dev, int sset) case ETH_SS_STATS: return NUM_SW_COUNTERS + MLX5E_NUM_Q_CNTRS(priv) + - NUM_VPORT_COUNTERS + NUM_PPORT_COUNTERS + + NUM_VPORT_COUNTERS + NUM_PPORT_COUNTERS(priv) + + NUM_PCIE_COUNTERS(priv) + MLX5E_NUM_RQ_STATS(priv) + MLX5E_NUM_SQ_STATS(priv) + MLX5E_NUM_PFC_COUNTERS(priv) + @@ -218,6 +219,14 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data) strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2819_stats_desc[i].format); + for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv); i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, + pport_phy_statistical_stats_desc[i].format); + + for (i = 0; i < NUM_PCIE_PERF_COUNTERS(priv); i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, + pcie_perf_stats_desc[i].format); + for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++) sprintf(data + (idx++) * ETH_GSTRING_LEN, @@ -330,6 +339,14 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev, data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters, pport_2819_stats_desc, i); + for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv); i++) + data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters, + pport_phy_statistical_stats_desc, i); + + for (i = 0; i < NUM_PCIE_PERF_COUNTERS(priv); i++) + data[idx++] = MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters, + pcie_perf_stats_desc, i); + for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++) data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio], @@ -535,7 +552,7 @@ static void mlx5e_get_channels(struct net_device *dev, { struct mlx5e_priv *priv = netdev_priv(dev); - ch->max_combined = mlx5e_get_max_num_channels(priv->mdev); + ch->max_combined = priv->profile->max_nch(priv->mdev); ch->combined_count = priv->params.num_channels; } @@ -1459,8 +1476,6 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev, { struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; - int err = 0; - bool reset; if (!MLX5_CAP_GEN(mdev, cqe_compression)) return -EOPNOTSUPP; @@ -1470,17 +1485,11 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev, return -EINVAL; } - reset = test_bit(MLX5E_STATE_OPENED, &priv->state); - - if (reset) - mlx5e_close_locked(netdev); - - MLX5E_SET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS, enable); + mlx5e_modify_rx_cqe_compression_locked(priv, enable); priv->params.rx_cqe_compress_def = enable; + mlx5e_set_rq_type_params(priv, priv->params.rq_wq_type); - if (reset) - err = mlx5e_open_locked(netdev); - return err; + return 0; } static int mlx5e_handle_pflag(struct net_device *netdev, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index a0e5a69402b3..f2762e45c8ae 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -150,7 +150,8 @@ static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv) enum mlx5e_vlan_rule_type { MLX5E_VLAN_RULE_TYPE_UNTAGGED, - MLX5E_VLAN_RULE_TYPE_ANY_VID, + MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, + MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, MLX5E_VLAN_RULE_TYPE_MATCH_VID, }; @@ -172,19 +173,31 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv, dest.ft = priv->fs.l2.ft.t; spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag); + switch (rule_type) { case MLX5E_VLAN_RULE_TYPE_UNTAGGED: rule_p = &priv->fs.vlan.untagged_rule; + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, + outer_headers.cvlan_tag); break; - case MLX5E_VLAN_RULE_TYPE_ANY_VID: - rule_p = &priv->fs.vlan.any_vlan_rule; - MLX5_SET(fte_match_param, spec->match_value, outer_headers.vlan_tag, 1); + case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID: + rule_p = &priv->fs.vlan.any_cvlan_rule; + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, + outer_headers.cvlan_tag); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1); + break; + case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID: + rule_p = &priv->fs.vlan.any_svlan_rule; + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, + outer_headers.svlan_tag); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1); break; default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */ rule_p = &priv->fs.vlan.active_vlans_rule[vid]; - MLX5_SET(fte_match_param, spec->match_value, outer_headers.vlan_tag, 1); + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, + outer_headers.cvlan_tag); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1); MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid); MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, @@ -235,10 +248,16 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv, priv->fs.vlan.untagged_rule = NULL; } break; - case MLX5E_VLAN_RULE_TYPE_ANY_VID: - if (priv->fs.vlan.any_vlan_rule) { - mlx5_del_flow_rules(priv->fs.vlan.any_vlan_rule); - priv->fs.vlan.any_vlan_rule = NULL; + case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID: + if (priv->fs.vlan.any_cvlan_rule) { + mlx5_del_flow_rules(priv->fs.vlan.any_cvlan_rule); + priv->fs.vlan.any_cvlan_rule = NULL; + } + break; + case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID: + if (priv->fs.vlan.any_svlan_rule) { + mlx5_del_flow_rules(priv->fs.vlan.any_svlan_rule); + priv->fs.vlan.any_svlan_rule = NULL; } break; case MLX5E_VLAN_RULE_TYPE_MATCH_VID: @@ -252,6 +271,23 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv, } } +static void mlx5e_del_any_vid_rules(struct mlx5e_priv *priv) +{ + mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0); + mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0); +} + +static int mlx5e_add_any_vid_rules(struct mlx5e_priv *priv) +{ + int err; + + err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0); + if (err) + return err; + + return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0); +} + void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv) { if (!priv->fs.vlan.filter_disabled) @@ -260,7 +296,7 @@ void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv) priv->fs.vlan.filter_disabled = false; if (priv->netdev->flags & IFF_PROMISC) return; - mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0); + mlx5e_del_any_vid_rules(priv); } void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv) @@ -271,7 +307,7 @@ void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv) priv->fs.vlan.filter_disabled = true; if (priv->netdev->flags & IFF_PROMISC) return; - mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0); + mlx5e_add_any_vid_rules(priv); } int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, @@ -308,7 +344,7 @@ static void mlx5e_add_vlan_rules(struct mlx5e_priv *priv) if (priv->fs.vlan.filter_disabled && !(priv->netdev->flags & IFF_PROMISC)) - mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0); + mlx5e_add_any_vid_rules(priv); } static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv) @@ -323,7 +359,7 @@ static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv) if (priv->fs.vlan.filter_disabled && !(priv->netdev->flags & IFF_PROMISC)) - mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0); + mlx5e_del_any_vid_rules(priv); } #define mlx5e_for_each_hash_node(hn, tmp, hash, i) \ @@ -503,8 +539,7 @@ void mlx5e_set_rx_mode_work(struct work_struct *work) if (enable_promisc) { mlx5e_add_l2_flow_rule(priv, &ea->promisc, MLX5E_PROMISC); if (!priv->fs.vlan.filter_disabled) - mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, - 0); + mlx5e_add_any_vid_rules(priv); } if (enable_allmulti) mlx5e_add_l2_flow_rule(priv, &ea->allmulti, MLX5E_ALLMULTI); @@ -519,8 +554,7 @@ void mlx5e_set_rx_mode_work(struct work_struct *work) mlx5e_del_l2_flow_rule(priv, &ea->allmulti); if (disable_promisc) { if (!priv->fs.vlan.filter_disabled) - mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, - 0); + mlx5e_del_any_vid_rules(priv); mlx5e_del_l2_flow_rule(priv, &ea->promisc); } @@ -976,11 +1010,13 @@ err_destroy_flow_table: return err; } -#define MLX5E_NUM_VLAN_GROUPS 2 +#define MLX5E_NUM_VLAN_GROUPS 3 #define MLX5E_VLAN_GROUP0_SIZE BIT(12) #define MLX5E_VLAN_GROUP1_SIZE BIT(1) +#define MLX5E_VLAN_GROUP2_SIZE BIT(0) #define MLX5E_VLAN_TABLE_SIZE (MLX5E_VLAN_GROUP0_SIZE +\ - MLX5E_VLAN_GROUP1_SIZE) + MLX5E_VLAN_GROUP1_SIZE +\ + MLX5E_VLAN_GROUP2_SIZE) static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in, int inlen) @@ -991,7 +1027,7 @@ static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in memset(in, 0, inlen); MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); - MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag); + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag); MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid); MLX5_SET_CFG(in, start_flow_index, ix); ix += MLX5E_VLAN_GROUP0_SIZE; @@ -1003,7 +1039,7 @@ static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in memset(in, 0, inlen); MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); - MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag); + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag); MLX5_SET_CFG(in, start_flow_index, ix); ix += MLX5E_VLAN_GROUP1_SIZE; MLX5_SET_CFG(in, end_flow_index, ix - 1); @@ -1012,6 +1048,17 @@ static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in goto err_destroy_groups; ft->num_groups++; + memset(in, 0, inlen); + MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag); + MLX5_SET_CFG(in, start_flow_index, ix); + ix += MLX5E_VLAN_GROUP2_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (IS_ERR(ft->g[ft->num_groups])) + goto err_destroy_groups; + ft->num_groups++; + return 0; err_destroy_groups: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c index f33f72d0237c..d55fff0ba388 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c @@ -237,9 +237,9 @@ static int set_flow_attrs(u32 *match_c, u32 *match_v, if ((fs->flow_type & FLOW_EXT) && (fs->m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK))) { MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, - vlan_tag, 1); + cvlan_tag, 1); MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, - vlan_tag, 1); + cvlan_tag, 1); MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, first_vid, 0xfff); MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index f14ca3385fdd..8ef64c4db2c2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -31,6 +31,7 @@ */ #include <net/tc_act/tc_gact.h> +#include <linux/crash_dump.h> #include <net/pkt_cls.h> #include <linux/mlx5/fs.h> #include <net/vxlan.h> @@ -78,21 +79,30 @@ static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev) MLX5_CAP_ETH(mdev, reg_umr_sq); } -static void mlx5e_set_rq_type_params(struct mlx5e_priv *priv, u8 rq_type) +void mlx5e_set_rq_type_params(struct mlx5e_priv *priv, u8 rq_type) { priv->params.rq_wq_type = rq_type; + priv->params.lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; switch (priv->params.rq_wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: - priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW; + priv->params.log_rq_size = is_kdump_kernel() ? + MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW : + MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW; priv->params.mpwqe_log_stride_sz = MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS) ? - MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS : - MLX5_MPWRQ_LOG_STRIDE_SIZE; + MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(priv->mdev) : + MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(priv->mdev); priv->params.mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ - priv->params.mpwqe_log_stride_sz; break; default: /* MLX5_WQ_TYPE_LINKED_LIST */ - priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE; + priv->params.log_rq_size = is_kdump_kernel() ? + MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE : + MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE; + + /* Extra room needed for build_skb */ + priv->params.lro_wqe_sz -= MLX5_RX_HEADROOM + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); } priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type, BIT(priv->params.log_rq_size)); @@ -268,6 +278,12 @@ static void mlx5e_update_pport_counters(struct mlx5e_priv *priv) MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP); mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); + if (MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group)) { + out = pstats->phy_statistical_counters; + MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP); + mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); + } + MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP); for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { out = pstats->per_prio_counters[prio]; @@ -291,11 +307,34 @@ static void mlx5e_update_q_counter(struct mlx5e_priv *priv) &qcnt->rx_out_of_buffer); } +static void mlx5e_update_pcie_counters(struct mlx5e_priv *priv) +{ + struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie; + struct mlx5_core_dev *mdev = priv->mdev; + int sz = MLX5_ST_SZ_BYTES(mpcnt_reg); + void *out; + u32 *in; + + if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group)) + return; + + in = mlx5_vzalloc(sz); + if (!in) + return; + + out = pcie_stats->pcie_perf_counters; + MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP); + mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0); + + kvfree(in); +} + void mlx5e_update_stats(struct mlx5e_priv *priv) { - mlx5e_update_q_counter(priv); - mlx5e_update_vport_counters(priv); + mlx5e_update_pcie_counters(priv); mlx5e_update_pport_counters(priv); + mlx5e_update_vport_counters(priv); + mlx5e_update_q_counter(priv); mlx5e_update_sw_counters(priv); } @@ -317,6 +356,8 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv, enum mlx5_dev_event event, unsigned long param) { struct mlx5e_priv *priv = vpriv; + struct ptp_clock_event ptp_event; + struct mlx5_eqe *eqe = NULL; if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state)) return; @@ -326,7 +367,15 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv, case MLX5_DEV_EVENT_PORT_DOWN: queue_work(priv->wq, &priv->update_carrier_work); break; - + case MLX5_DEV_EVENT_PPS: + eqe = (struct mlx5_eqe *)param; + ptp_event.type = PTP_CLOCK_EXTTS; + ptp_event.index = eqe->data.pps.pin; + ptp_event.timestamp = + timecounter_cyc2time(&priv->tstamp.clock, + be64_to_cpu(eqe->data.pps.time_stamp)); + mlx5e_pps_event_handler(vpriv, &ptp_event); + break; default: break; } @@ -343,9 +392,6 @@ static void mlx5e_disable_async_events(struct mlx5e_priv *priv) synchronize_irq(mlx5_get_msix_vec(priv->mdev, MLX5_EQ_VEC_ASYNC)); } -#define MLX5E_HW2SW_MTU(hwmtu) (hwmtu - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)) -#define MLX5E_SW2HW_MTU(swmtu) (swmtu + (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)) - static inline int mlx5e_get_wqe_mtt_sz(void) { /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes. @@ -372,7 +418,7 @@ static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, struct mlx5e_sq *sq, cseg->imm = rq->mkey_be; ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN; - ucseg->klm_octowords = + ucseg->xlt_octowords = cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE)); ucseg->bsf_octowords = cpu_to_be16(MLX5_MTT_OCTW(umr_wqe_mtt_offset)); @@ -534,9 +580,13 @@ static int mlx5e_create_rq(struct mlx5e_channel *c, goto err_rq_wq_destroy; } - rq->buff.map_dir = DMA_FROM_DEVICE; - if (rq->xdp_prog) + if (rq->xdp_prog) { rq->buff.map_dir = DMA_BIDIRECTIONAL; + rq->rx_headroom = XDP_PACKET_HEADROOM; + } else { + rq->buff.map_dir = DMA_FROM_DEVICE; + rq->rx_headroom = MLX5_RX_HEADROOM; + } switch (priv->params.rq_wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: @@ -586,7 +636,7 @@ static int mlx5e_create_rq(struct mlx5e_channel *c, byte_count = rq->buff.wqe_sz; /* calc the required page order */ - frag_sz = MLX5_RX_HEADROOM + + frag_sz = rq->rx_headroom + byte_count /* packet data */ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); frag_sz = SKB_DATA_ALIGN(frag_sz); @@ -967,10 +1017,11 @@ static int mlx5e_create_sq(struct mlx5e_channel *c, sq->channel = c; sq->tc = tc; - err = mlx5_alloc_map_uar(mdev, &sq->uar, !!MLX5_CAP_GEN(mdev, bf)); + err = mlx5_alloc_bfreg(mdev, &sq->bfreg, MLX5_CAP_GEN(mdev, bf), false); if (err) return err; + sq->uar_map = sq->bfreg.map; param->wq.db_numa_node = cpu_to_node(c->cpu); err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, &sq->wq, @@ -979,17 +1030,12 @@ static int mlx5e_create_sq(struct mlx5e_channel *c, goto err_unmap_free_uar; sq->wq.db = &sq->wq.db[MLX5_SND_DBR]; - if (sq->uar.bf_map) { + if (sq->bfreg.wc) set_bit(MLX5E_SQ_STATE_BF_ENABLE, &sq->state); - sq->uar_map = sq->uar.bf_map; - } else { - sq->uar_map = sq->uar.map; - } + sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2; sq->max_inline = param->max_inline; - sq->min_inline_mode = - MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT ? - param->min_inline_mode : 0; + sq->min_inline_mode = param->min_inline_mode; err = mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu)); if (err) @@ -1012,7 +1058,7 @@ err_sq_wq_destroy: mlx5_wq_destroy(&sq->wq_ctrl); err_unmap_free_uar: - mlx5_unmap_free_uar(mdev, &sq->uar); + mlx5_free_bfreg(mdev, &sq->bfreg); return err; } @@ -1024,7 +1070,7 @@ static void mlx5e_destroy_sq(struct mlx5e_sq *sq) mlx5e_free_sq_db(sq); mlx5_wq_destroy(&sq->wq_ctrl); - mlx5_unmap_free_uar(priv->mdev, &sq->uar); + mlx5_free_bfreg(priv->mdev, &sq->bfreg); } static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param) @@ -1053,12 +1099,15 @@ static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param) MLX5_SET(sqc, sqc, tis_num_0, param->type == MLX5E_SQ_ICO ? 0 : priv->tisn[sq->tc]); MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn); - MLX5_SET(sqc, sqc, min_wqe_inline_mode, sq->min_inline_mode); + + if (MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT) + MLX5_SET(sqc, sqc, min_wqe_inline_mode, sq->min_inline_mode); + MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST); MLX5_SET(sqc, sqc, tis_lst_sz, param->type == MLX5E_SQ_ICO ? 0 : 1); MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); - MLX5_SET(wq, wq, uar_page, sq->uar.index); + MLX5_SET(wq, wq, uar_page, sq->bfreg.index); MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma); @@ -1216,7 +1265,6 @@ static int mlx5e_create_cq(struct mlx5e_channel *c, mcq->comp = mlx5e_completion_event; mcq->event = mlx5e_cq_error_event; mcq->irqn = irqn; - mcq->uar = &mdev->mlx5e_res.cq_uar; for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) { struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i); @@ -1265,7 +1313,7 @@ static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode); MLX5_SET(cqc, cqc, c_eqn, eqn); - MLX5_SET(cqc, cqc, uar_page, mcq->uar->index); + MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index); MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.frag_buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma); @@ -1472,6 +1520,14 @@ static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate) return err; } +static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev) +{ + return is_kdump_kernel() ? + MLX5E_MIN_NUM_CHANNELS : + min_t(int, mdev->priv.eq_table.num_comp_vectors, + MLX5E_MAX_NUM_CHANNELS); +} + static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, struct mlx5e_channel_param *cparam, struct mlx5e_channel **cp) @@ -1677,7 +1733,7 @@ static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv, { void *cqc = param->cqc; - MLX5_SET(cqc, cqc, uar_page, priv->mdev->mlx5e_res.cq_uar.index); + MLX5_SET(cqc, cqc, uar_page, priv->mdev->priv.uar->index); } static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv, @@ -1756,8 +1812,7 @@ static void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv, MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size); param->max_inline = priv->params.tx_max_inline; - /* FOR XDP SQs will support only L2 inline mode */ - param->min_inline_mode = MLX5_INLINE_MODE_NONE; + param->min_inline_mode = priv->params.tx_min_inline_mode; param->type = MLX5E_SQ_XDP; } @@ -2393,7 +2448,6 @@ static int mlx5e_create_drop_cq(struct mlx5e_priv *priv, mcq->comp = mlx5e_completion_event; mcq->event = mlx5e_cq_error_event; mcq->irqn = irqn; - mcq->uar = &mdev->mlx5e_res.cq_uar; cq->priv = priv; @@ -2686,7 +2740,7 @@ mqprio: return mlx5e_setup_tc(dev, tc->tc); } -static struct rtnl_link_stats64 * +static void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct mlx5e_priv *priv = netdev_priv(dev); @@ -2729,7 +2783,6 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) stats->multicast = VPORT_COUNTER_GET(vstats, received_eth_multicast.packets); - return stats; } static void mlx5e_set_rx_mode(struct net_device *dev) @@ -2987,11 +3040,8 @@ static int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5_core_dev *mdev = priv->mdev; - if (min_tx_rate) - return -EOPNOTSUPP; - return mlx5_eswitch_set_vport_rate(mdev->priv.eswitch, vf + 1, - max_tx_rate); + max_tx_rate, min_tx_rate); } static int mlx5_vport_link2ifla(u8 esw_link) @@ -3159,11 +3209,6 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog) bool reset, was_opened; int i; - if (prog && prog->xdp_adjust_head) { - netdev_err(netdev, "Does not support bpf_xdp_adjust_head()\n"); - return -EOPNOTSUPP; - } - mutex_lock(&priv->state_lock); if ((netdev->features & NETIF_F_LRO) && prog) { @@ -3432,22 +3477,6 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE; } -static void mlx5e_query_min_inline(struct mlx5_core_dev *mdev, - u8 *min_inline_mode) -{ - switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) { - case MLX5_CAP_INLINE_MODE_L2: - *min_inline_mode = MLX5_INLINE_MODE_L2; - break; - case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT: - mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode); - break; - case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: - *min_inline_mode = MLX5_INLINE_MODE_NONE; - break; - } -} - u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout) { int i; @@ -3481,7 +3510,9 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev, priv->params.lro_timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT); - priv->params.log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; + priv->params.log_sq_size = is_kdump_kernel() ? + MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE : + MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; /* set CQE compression */ priv->params.rx_cqe_compress_def = false; @@ -3495,6 +3526,9 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev, cqe_compress_heuristic(link_speed, pci_bw); } + MLX5E_SET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS, + priv->params.rx_cqe_compress_def); + mlx5e_set_rq_priv_params(priv); if (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) priv->params.lro_en = true; @@ -3507,7 +3541,11 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev, priv->params.tx_cq_moderation.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS; priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev); - mlx5e_query_min_inline(mdev, &priv->params.tx_min_inline_mode); + mlx5_query_min_inline(mdev, &priv->params.tx_min_inline_mode); + if (priv->params.tx_min_inline_mode == MLX5_INLINE_MODE_NONE && + !MLX5_CAP_ETH(mdev, wqe_vlan_insert)) + priv->params.tx_min_inline_mode = MLX5_INLINE_MODE_L2; + priv->params.num_tc = 1; priv->params.rss_hfunc = ETH_RSS_HASH_XOR; @@ -3517,16 +3555,9 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev, mlx5e_build_default_indir_rqt(mdev, priv->params.indirection_rqt, MLX5E_INDIR_RQT_SIZE, profile->max_nch(mdev)); - priv->params.lro_wqe_sz = - MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ - - /* Extra room needed for build_skb */ - MLX5_RX_HEADROOM - - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); - /* Initialize pflags */ MLX5E_SET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_BASED_MODER, priv->params.rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE); - MLX5E_SET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS, priv->params.rx_cqe_compress_def); mutex_init(&priv->state_lock); @@ -3940,6 +3971,19 @@ static void mlx5e_register_vport_rep(struct mlx5_core_dev *mdev) } } +static void mlx5e_unregister_vport_rep(struct mlx5_core_dev *mdev) +{ + struct mlx5_eswitch *esw = mdev->priv.eswitch; + int total_vfs = MLX5_TOTAL_VPORTS(mdev); + int vport; + + if (!MLX5_CAP_GEN(mdev, vport_group_manager)) + return; + + for (vport = 1; vport < total_vfs; vport++) + mlx5_eswitch_unregister_vport_rep(esw, vport); +} + void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); @@ -3986,6 +4030,7 @@ static int mlx5e_attach(struct mlx5_core_dev *mdev, void *vpriv) return err; } + mlx5e_register_vport_rep(mdev); return 0; } @@ -3997,6 +4042,7 @@ static void mlx5e_detach(struct mlx5_core_dev *mdev, void *vpriv) if (!netif_device_present(netdev)) return; + mlx5e_unregister_vport_rep(mdev); mlx5e_detach_netdev(mdev, netdev); mlx5e_destroy_mdev_resources(mdev); } @@ -4015,8 +4061,6 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev) if (err) return NULL; - mlx5e_register_vport_rep(mdev); - if (MLX5_CAP_GEN(mdev, vport_group_manager)) ppriv = &esw->offloads.vport_reps[0]; @@ -4068,13 +4112,7 @@ void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv) static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv) { - struct mlx5_eswitch *esw = mdev->priv.eswitch; - int total_vfs = MLX5_TOTAL_VPORTS(mdev); struct mlx5e_priv *priv = vpriv; - int vport; - - for (vport = 1; vport < total_vfs; vport++) - mlx5_eswitch_unregister_vport_rep(esw, vport); unregister_netdev(priv->netdev); mlx5e_detach(mdev, vpriv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 850378893b25..2c864574a9d5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -374,13 +374,12 @@ int mlx5e_get_offload_stats(int attr_id, const struct net_device *dev, return -EINVAL; } -static struct rtnl_link_stats64 * +static void mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct mlx5e_priv *priv = netdev_priv(dev); memcpy(stats, &priv->stats.vf_vport, sizeof(*stats)); - return stats; } static const struct switchdev_ops mlx5e_rep_switchdev_ops = { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 06d5e6fecb0a..3d371688fbbb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -30,9 +30,11 @@ * SOFTWARE. */ +#include <linux/prefetch.h> #include <linux/ip.h> #include <linux/ipv6.h> #include <linux/tcp.h> +#include <linux/bpf_trace.h> #include <net/busy_poll.h> #include "en.h" #include "en_tc.h" @@ -92,19 +94,18 @@ static inline void mlx5e_cqes_update_owner(struct mlx5e_cq *cq, u32 cqcc, int n) static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq, struct mlx5e_cq *cq, u32 cqcc) { - u16 wqe_cnt_step; - cq->title.byte_cnt = cq->mini_arr[cq->mini_arr_idx].byte_cnt; cq->title.check_sum = cq->mini_arr[cq->mini_arr_idx].checksum; cq->title.op_own &= 0xf0; cq->title.op_own |= 0x01 & (cqcc >> cq->wq.log_sz); cq->title.wqe_counter = cpu_to_be16(cq->decmprs_wqe_counter); - wqe_cnt_step = - rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ? - mpwrq_get_cqe_consumed_strides(&cq->title) : 1; - cq->decmprs_wqe_counter = - (cq->decmprs_wqe_counter + wqe_cnt_step) & rq->wq.sz_m1; + if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) + cq->decmprs_wqe_counter += + mpwrq_get_cqe_consumed_strides(&cq->title); + else + cq->decmprs_wqe_counter = + (cq->decmprs_wqe_counter + 1) & rq->wq.sz_m1; } static inline void mlx5e_decompress_cqe_no_hash(struct mlx5e_rq *rq, @@ -155,29 +156,26 @@ static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq, return mlx5e_decompress_cqes_cont(rq, cq, 1, budget_rem) - 1; } -void mlx5e_modify_rx_cqe_compression(struct mlx5e_priv *priv, bool val) +void mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val) { bool was_opened; if (!MLX5_CAP_GEN(priv->mdev, cqe_compression)) return; - mutex_lock(&priv->state_lock); - if (MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS) == val) - goto unlock; + return; was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); if (was_opened) mlx5e_close_locked(priv->netdev); MLX5E_SET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS, val); + mlx5e_set_rq_type_params(priv, priv->params.rq_wq_type); if (was_opened) mlx5e_open_locked(priv->netdev); -unlock: - mutex_unlock(&priv->state_lock); } #define RQ_PAGE_SIZE(rq) ((1 << rq->buff.page_order) << PAGE_SHIFT) @@ -267,7 +265,7 @@ int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix) if (unlikely(mlx5e_page_alloc_mapped(rq, di))) return -ENOMEM; - wqe->data.addr = cpu_to_be64(di->addr + MLX5_RX_HEADROOM); + wqe->data.addr = cpu_to_be64(di->addr + rq->rx_headroom); return 0; } @@ -647,10 +645,9 @@ static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_sq *sq) mlx5e_tx_notify_hw(sq, &wqe->ctrl, 0); } -static inline void mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, +static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, struct mlx5e_dma_info *di, - unsigned int data_offset, - int len) + const struct xdp_buff *xdp) { struct mlx5e_sq *sq = &rq->channel->xdp_sq; struct mlx5_wq_cyc *wq = &sq->wq; @@ -661,10 +658,18 @@ static inline void mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; struct mlx5_wqe_eth_seg *eseg = &wqe->eth; struct mlx5_wqe_data_seg *dseg; + u8 ds_cnt = MLX5E_XDP_TX_DS_COUNT; - dma_addr_t dma_addr = di->addr + data_offset + MLX5E_XDP_MIN_INLINE; - unsigned int dma_len = len - MLX5E_XDP_MIN_INLINE; - void *data = page_address(di->page) + data_offset; + ptrdiff_t data_offset = xdp->data - xdp->data_hard_start; + dma_addr_t dma_addr = di->addr + data_offset; + unsigned int dma_len = xdp->data_end - xdp->data; + + if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE || + MLX5E_SW2HW_MTU(rq->netdev->mtu) < dma_len)) { + rq->stats.xdp_drop++; + mlx5e_page_release(rq, di, true); + return false; + } if (unlikely(!mlx5e_sq_has_room_for(sq, MLX5E_XDP_TX_WQEBBS))) { if (sq->db.xdp.doorbell) { @@ -674,7 +679,7 @@ static inline void mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, } rq->stats.xdp_tx_full++; mlx5e_page_release(rq, di, true); - return; + return false; } dma_sync_single_for_device(sq->pdev, dma_addr, dma_len, @@ -682,11 +687,17 @@ static inline void mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, memset(wqe, 0, sizeof(*wqe)); - /* copy the inline part */ - memcpy(eseg->inline_hdr_start, data, MLX5E_XDP_MIN_INLINE); - eseg->inline_hdr_sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE); + dseg = (struct mlx5_wqe_data_seg *)eseg + 1; + /* copy the inline part if required */ + if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) { + memcpy(eseg->inline_hdr.start, xdp->data, MLX5E_XDP_MIN_INLINE); + eseg->inline_hdr.sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE); + dma_len -= MLX5E_XDP_MIN_INLINE; + dma_addr += MLX5E_XDP_MIN_INLINE; - dseg = (struct mlx5_wqe_data_seg *)cseg + (MLX5E_XDP_TX_DS_COUNT - 1); + ds_cnt += MLX5E_XDP_IHS_DS_COUNT; + dseg++; + } /* write the dma part */ dseg->addr = cpu_to_be64(dma_addr); @@ -694,7 +705,7 @@ static inline void mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, dseg->lkey = sq->mkey_be; cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND); - cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | MLX5E_XDP_TX_DS_COUNT); + cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); sq->db.xdp.di[pi] = *di; wi->opcode = MLX5_OPCODE_SEND; @@ -703,32 +714,39 @@ static inline void mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, sq->db.xdp.doorbell = true; rq->stats.xdp_tx++; + return true; } /* returns true if packet was consumed by xdp */ -static inline bool mlx5e_xdp_handle(struct mlx5e_rq *rq, - const struct bpf_prog *prog, - struct mlx5e_dma_info *di, - void *data, u16 len) +static inline int mlx5e_xdp_handle(struct mlx5e_rq *rq, + struct mlx5e_dma_info *di, + void *va, u16 *rx_headroom, u32 *len) { + const struct bpf_prog *prog = READ_ONCE(rq->xdp_prog); struct xdp_buff xdp; u32 act; if (!prog) return false; - xdp.data = data; - xdp.data_end = xdp.data + len; + xdp.data = va + *rx_headroom; + xdp.data_end = xdp.data + *len; + xdp.data_hard_start = va; + act = bpf_prog_run_xdp(prog, &xdp); switch (act) { case XDP_PASS: + *rx_headroom = xdp.data - xdp.data_hard_start; + *len = xdp.data_end - xdp.data; return false; case XDP_TX: - mlx5e_xmit_xdp_frame(rq, di, MLX5_RX_HEADROOM, len); + if (unlikely(!mlx5e_xmit_xdp_frame(rq, di, &xdp))) + trace_xdp_exception(rq->netdev, prog, act); return true; default: bpf_warn_invalid_xdp_action(act); case XDP_ABORTED: + trace_xdp_exception(rq->netdev, prog, act); case XDP_DROP: rq->stats.xdp_drop++; mlx5e_page_release(rq, di, true); @@ -743,15 +761,16 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, struct mlx5e_dma_info *di; struct sk_buff *skb; void *va, *data; + u16 rx_headroom = rq->rx_headroom; bool consumed; di = &rq->dma_info[wqe_counter]; va = page_address(di->page); - data = va + MLX5_RX_HEADROOM; + data = va + rx_headroom; dma_sync_single_range_for_cpu(rq->pdev, di->addr, - MLX5_RX_HEADROOM, + rx_headroom, rq->buff.wqe_sz, DMA_FROM_DEVICE); prefetch(data); @@ -763,8 +782,7 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, } rcu_read_lock(); - consumed = mlx5e_xdp_handle(rq, READ_ONCE(rq->xdp_prog), di, data, - cqe_bcnt); + consumed = mlx5e_xdp_handle(rq, di, va, &rx_headroom, &cqe_bcnt); rcu_read_unlock(); if (consumed) return NULL; /* page/packet was consumed by XDP */ @@ -780,7 +798,7 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, page_ref_inc(di->page); mlx5e_page_release(rq, di, true); - skb_reserve(skb, MLX5_RX_HEADROOM); + skb_reserve(skb, rx_headroom); skb_put(skb, cqe_bcnt); return skb; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c index 65442c36a6e1..31e3cb7ee5fe 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c @@ -30,6 +30,7 @@ * SOFTWARE. */ +#include <linux/prefetch.h> #include <linux/ip.h> #include <linux/udp.h> #include <net/udp.h> diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index ba5db1dd23a9..53e4992d6511 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -39,7 +39,7 @@ #define MLX5E_READ_CTR32_CPU(ptr, dsc, i) \ (*(u32 *)((char *)ptr + dsc[i].offset)) #define MLX5E_READ_CTR32_BE(ptr, dsc, i) \ - be64_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset)) + be32_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset)) #define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld) #define MLX5E_DECLARE_RX_STAT(type, fld) "rx%d_"#fld, offsetof(type, fld) @@ -201,6 +201,12 @@ static const struct counter_desc vport_stats_desc[] = { #define PPORT_2819_GET(pstats, c) \ MLX5_GET64(ppcnt_reg, pstats->RFC_2819_counters, \ counter_set.eth_2819_cntrs_grp_data_layout.c##_high) +#define PPORT_PHY_STATISTICAL_OFF(c) \ + MLX5_BYTE_OFF(ppcnt_reg, \ + counter_set.phys_layer_statistical_cntrs.c##_high) +#define PPORT_PHY_STATISTICAL_GET(pstats, c) \ + MLX5_GET64(ppcnt_reg, (pstats)->phy_statistical_counters, \ + counter_set.phys_layer_statistical_cntrs.c##_high) #define PPORT_PER_PRIO_OFF(c) \ MLX5_BYTE_OFF(ppcnt_reg, \ counter_set.eth_per_prio_grp_data_layout.c##_high) @@ -215,6 +221,7 @@ struct mlx5e_pport_stats { __be64 RFC_2819_counters[MLX5_ST_SZ_QW(ppcnt_reg)]; __be64 per_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)]; __be64 phy_counters[MLX5_ST_SZ_QW(ppcnt_reg)]; + __be64 phy_statistical_counters[MLX5_ST_SZ_QW(ppcnt_reg)]; }; static const struct counter_desc pport_802_3_stats_desc[] = { @@ -260,6 +267,11 @@ static const struct counter_desc pport_2819_stats_desc[] = { { "rx_8192_to_10239_bytes_phy", PPORT_2819_OFF(ether_stats_pkts8192to10239octets) }, }; +static const struct counter_desc pport_phy_statistical_stats_desc[] = { + { "rx_symbol_errors_phy", PPORT_PHY_STATISTICAL_OFF(phy_symbol_errors) }, + { "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) }, +}; + static const struct counter_desc pport_per_prio_traffic_stats_desc[] = { { "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) }, { "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) }, @@ -276,6 +288,21 @@ static const struct counter_desc pport_per_prio_pfc_stats_desc[] = { { "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) }, }; +#define PCIE_PERF_OFF(c) \ + MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c) +#define PCIE_PERF_GET(pcie_stats, c) \ + MLX5_GET(mpcnt_reg, (pcie_stats)->pcie_perf_counters, \ + counter_set.pcie_perf_cntrs_grp_data_layout.c) + +struct mlx5e_pcie_stats { + __be64 pcie_perf_counters[MLX5_ST_SZ_QW(mpcnt_reg)]; +}; + +static const struct counter_desc pcie_perf_stats_desc[] = { + { "rx_pci_signal_integrity", PCIE_PERF_OFF(rx_errors) }, + { "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) }, +}; + struct mlx5e_rq_stats { u64 packets; u64 bytes; @@ -360,15 +387,23 @@ static const struct counter_desc sq_stats_desc[] = { #define NUM_PPORT_802_3_COUNTERS ARRAY_SIZE(pport_802_3_stats_desc) #define NUM_PPORT_2863_COUNTERS ARRAY_SIZE(pport_2863_stats_desc) #define NUM_PPORT_2819_COUNTERS ARRAY_SIZE(pport_2819_stats_desc) +#define NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv) \ + (ARRAY_SIZE(pport_phy_statistical_stats_desc) * \ + MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group)) +#define NUM_PCIE_PERF_COUNTERS(priv) \ + (ARRAY_SIZE(pcie_perf_stats_desc) * \ + MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group)) #define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS \ ARRAY_SIZE(pport_per_prio_traffic_stats_desc) #define NUM_PPORT_PER_PRIO_PFC_COUNTERS \ ARRAY_SIZE(pport_per_prio_pfc_stats_desc) -#define NUM_PPORT_COUNTERS (NUM_PPORT_802_3_COUNTERS + \ +#define NUM_PPORT_COUNTERS(priv) (NUM_PPORT_802_3_COUNTERS + \ NUM_PPORT_2863_COUNTERS + \ NUM_PPORT_2819_COUNTERS + \ + NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv) + \ NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * \ NUM_PPORT_PRIO) +#define NUM_PCIE_COUNTERS(priv) NUM_PCIE_PERF_COUNTERS(priv) #define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc) #define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc) @@ -378,6 +413,7 @@ struct mlx5e_stats { struct mlx5e_vport_stats vport; struct mlx5e_pport_stats pport; struct rtnl_link_stats64 vf_vport; + struct mlx5e_pcie_stats pcie; }; static const struct counter_desc mlx5e_pme_status_desc[] = { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 2ebbe80d8126..44406a5ec15d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -298,6 +298,32 @@ vxlan_match_offload_err: MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype); MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP); + } else if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { + struct flow_dissector_key_ipv6_addrs *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, + f->key); + struct flow_dissector_key_ipv6_addrs *mask = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, + f->mask); + + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, + src_ipv4_src_ipv6.ipv6_layout.ipv6), + &mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6)); + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, + src_ipv4_src_ipv6.ipv6_layout.ipv6), + &key->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6)); + + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, + dst_ipv4_dst_ipv6.ipv6_layout.ipv6), + &mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6)); + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, + dst_ipv4_dst_ipv6.ipv6_layout.ipv6), + &key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6)); + + MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IPV6); } /* Enforce DMAC when offloading incoming tunneled flows. @@ -358,12 +384,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, f->key); switch (key->addr_type) { case FLOW_DISSECTOR_KEY_IPV4_ADDRS: + case FLOW_DISSECTOR_KEY_IPV6_ADDRS: if (parse_tunnel_attr(priv, spec, f)) return -EOPNOTSUPP; break; - case FLOW_DISSECTOR_KEY_IPV6_ADDRS: - netdev_warn(priv->netdev, - "IPv6 tunnel decap offload isn't supported\n"); default: return -EOPNOTSUPP; } @@ -460,8 +484,8 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, FLOW_DISSECTOR_KEY_VLAN, f->mask); if (mask->vlan_id || mask->vlan_priority) { - MLX5_SET(fte_match_set_lyr_2_4, headers_c, vlan_tag, 1); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, vlan_tag, 1); + MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1); MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id); MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id); @@ -644,15 +668,15 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, return 0; } -static inline int cmp_encap_info(struct mlx5_encap_info *a, - struct mlx5_encap_info *b) +static inline int cmp_encap_info(struct ip_tunnel_key *a, + struct ip_tunnel_key *b) { return memcmp(a, b, sizeof(*a)); } -static inline int hash_encap_info(struct mlx5_encap_info *info) +static inline int hash_encap_info(struct ip_tunnel_key *key) { - return jhash(info, sizeof(*info), 0); + return jhash(key, sizeof(*key), 0); } static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv, @@ -660,13 +684,11 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv, struct net_device **out_dev, struct flowi4 *fl4, struct neighbour **out_n, - __be32 *saddr, int *out_ttl) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct rtable *rt; struct neighbour *n = NULL; - int ttl; #if IS_ENABLED(CONFIG_INET) int ret; @@ -684,16 +706,54 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv, else *out_dev = rt->dst.dev; - ttl = ip4_dst_hoplimit(&rt->dst); + *out_ttl = ip4_dst_hoplimit(&rt->dst); n = dst_neigh_lookup(&rt->dst, &fl4->daddr); ip_rt_put(rt); if (!n) return -ENOMEM; *out_n = n; - *saddr = fl4->saddr; - *out_ttl = ttl; + return 0; +} + +static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv, + struct net_device *mirred_dev, + struct net_device **out_dev, + struct flowi6 *fl6, + struct neighbour **out_n, + int *out_ttl) +{ + struct neighbour *n = NULL; + struct dst_entry *dst; + +#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6) + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + int ret; + + dst = ip6_route_output(dev_net(mirred_dev), NULL, fl6); + ret = dst->error; + if (ret) { + dst_release(dst); + return ret; + } + + *out_ttl = ip6_dst_hoplimit(dst); + /* if the egress device isn't on the same HW e-switch, we use the uplink */ + if (!switchdev_port_same_parent_id(priv->netdev, dst->dev)) + *out_dev = mlx5_eswitch_get_uplink_netdev(esw); + else + *out_dev = dst->dev; +#else + return -EOPNOTSUPP; +#endif + + n = dst_neigh_lookup(dst, &fl6->daddr); + dst_release(dst); + if (!n) + return -ENOMEM; + + *out_n = n; return 0; } @@ -733,19 +793,52 @@ static int gen_vxlan_header_ipv4(struct net_device *out_dev, return encap_size; } +static int gen_vxlan_header_ipv6(struct net_device *out_dev, + char buf[], + unsigned char h_dest[ETH_ALEN], + int ttl, + struct in6_addr *daddr, + struct in6_addr *saddr, + __be16 udp_dst_port, + __be32 vx_vni) +{ + int encap_size = VXLAN_HLEN + sizeof(struct ipv6hdr) + ETH_HLEN; + struct ethhdr *eth = (struct ethhdr *)buf; + struct ipv6hdr *ip6h = (struct ipv6hdr *)((char *)eth + sizeof(struct ethhdr)); + struct udphdr *udp = (struct udphdr *)((char *)ip6h + sizeof(struct ipv6hdr)); + struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr)); + + memset(buf, 0, encap_size); + + ether_addr_copy(eth->h_dest, h_dest); + ether_addr_copy(eth->h_source, out_dev->dev_addr); + eth->h_proto = htons(ETH_P_IPV6); + + ip6_flow_hdr(ip6h, 0, 0); + /* the HW fills up ipv6 payload len */ + ip6h->nexthdr = IPPROTO_UDP; + ip6h->hop_limit = ttl; + ip6h->daddr = *daddr; + ip6h->saddr = *saddr; + + udp->dest = udp_dst_port; + vxh->vx_flags = VXLAN_HF_VNI; + vxh->vx_vni = vxlan_vni_field(vx_vni); + + return encap_size; +} + static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv, struct net_device *mirred_dev, struct mlx5_encap_entry *e, struct net_device **out_dev) { int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size); + struct ip_tunnel_key *tun_key = &e->tun_info.key; + int encap_size, ttl, err; struct neighbour *n = NULL; struct flowi4 fl4 = {}; char *encap_header; - int encap_size; - __be32 saddr; - int ttl; - int err; encap_header = kzalloc(max_encap_size, GFP_KERNEL); if (!encap_header) @@ -754,37 +847,108 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv, switch (e->tunnel_type) { case MLX5_HEADER_TYPE_VXLAN: fl4.flowi4_proto = IPPROTO_UDP; - fl4.fl4_dport = e->tun_info.tp_dst; + fl4.fl4_dport = tun_key->tp_dst; break; default: err = -EOPNOTSUPP; goto out; } - fl4.daddr = e->tun_info.daddr; + fl4.flowi4_tos = tun_key->tos; + fl4.daddr = tun_key->u.ipv4.dst; + fl4.saddr = tun_key->u.ipv4.src; err = mlx5e_route_lookup_ipv4(priv, mirred_dev, out_dev, - &fl4, &n, &saddr, &ttl); + &fl4, &n, &ttl); if (err) goto out; + if (!(n->nud_state & NUD_VALID)) { + pr_warn("%s: can't offload, neighbour to %pI4 invalid\n", __func__, &fl4.daddr); + err = -EOPNOTSUPP; + goto out; + } + e->n = n; e->out_dev = *out_dev; + neigh_ha_snapshot(e->h_dest, n, *out_dev); + + switch (e->tunnel_type) { + case MLX5_HEADER_TYPE_VXLAN: + encap_size = gen_vxlan_header_ipv4(*out_dev, encap_header, + e->h_dest, ttl, + fl4.daddr, + fl4.saddr, tun_key->tp_dst, + tunnel_id_to_key32(tun_key->tun_id)); + break; + default: + err = -EOPNOTSUPP; + goto out; + } + + err = mlx5_encap_alloc(priv->mdev, e->tunnel_type, + encap_size, encap_header, &e->encap_id); +out: + if (err && n) + neigh_release(n); + kfree(encap_header); + return err; +} + +static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv, + struct net_device *mirred_dev, + struct mlx5_encap_entry *e, + struct net_device **out_dev) + +{ + int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size); + struct ip_tunnel_key *tun_key = &e->tun_info.key; + int encap_size, err, ttl = 0; + struct neighbour *n = NULL; + struct flowi6 fl6 = {}; + char *encap_header; + + encap_header = kzalloc(max_encap_size, GFP_KERNEL); + if (!encap_header) + return -ENOMEM; + + switch (e->tunnel_type) { + case MLX5_HEADER_TYPE_VXLAN: + fl6.flowi6_proto = IPPROTO_UDP; + fl6.fl6_dport = tun_key->tp_dst; + break; + default: + err = -EOPNOTSUPP; + goto out; + } + + fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label); + fl6.daddr = tun_key->u.ipv6.dst; + fl6.saddr = tun_key->u.ipv6.src; + + err = mlx5e_route_lookup_ipv6(priv, mirred_dev, out_dev, + &fl6, &n, &ttl); + if (err) + goto out; + if (!(n->nud_state & NUD_VALID)) { - pr_warn("%s: can't offload, neighbour to %pI4 invalid\n", __func__, &fl4.daddr); + pr_warn("%s: can't offload, neighbour to %pI6 invalid\n", __func__, &fl6.daddr); err = -EOPNOTSUPP; goto out; } + e->n = n; + e->out_dev = *out_dev; + neigh_ha_snapshot(e->h_dest, n, *out_dev); switch (e->tunnel_type) { case MLX5_HEADER_TYPE_VXLAN: - encap_size = gen_vxlan_header_ipv4(*out_dev, encap_header, + encap_size = gen_vxlan_header_ipv6(*out_dev, encap_header, e->h_dest, ttl, - e->tun_info.daddr, - saddr, e->tun_info.tp_dst, - e->tun_info.tun_id); + &fl6.daddr, + &fl6.saddr, tun_key->tp_dst, + tunnel_id_to_key32(tun_key->tun_id)); break; default: err = -EOPNOTSUPP; @@ -808,13 +972,11 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv, struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; unsigned short family = ip_tunnel_info_af(tun_info); struct ip_tunnel_key *key = &tun_info->key; - struct mlx5_encap_info info; struct mlx5_encap_entry *e; struct net_device *out_dev; + int tunnel_type, err = -EOPNOTSUPP; uintptr_t hash_key; bool found = false; - int tunnel_type; - int err; /* udp dst port must be set */ if (!memchr_inv(&key->tp_dst, 0, sizeof(key->tp_dst))) @@ -830,8 +992,6 @@ vxlan_encap_offload_err: if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->tp_dst)) && MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) { - info.tp_dst = key->tp_dst; - info.tun_id = tunnel_id_to_key32(key->tun_id); tunnel_type = MLX5_HEADER_TYPE_VXLAN; } else { netdev_warn(priv->netdev, @@ -839,22 +999,11 @@ vxlan_encap_offload_err: return -EOPNOTSUPP; } - switch (family) { - case AF_INET: - info.daddr = key->u.ipv4.dst; - break; - case AF_INET6: - netdev_warn(priv->netdev, - "IPv6 tunnel encap offload isn't supported\n"); - default: - return -EOPNOTSUPP; - } - - hash_key = hash_encap_info(&info); + hash_key = hash_encap_info(key); hash_for_each_possible_rcu(esw->offloads.encap_tbl, e, encap_hlist, hash_key) { - if (!cmp_encap_info(&e->tun_info, &info)) { + if (!cmp_encap_info(&e->tun_info.key, key)) { found = true; break; } @@ -869,11 +1018,15 @@ vxlan_encap_offload_err: if (!e) return -ENOMEM; - e->tun_info = info; + e->tun_info = *tun_info; e->tunnel_type = tunnel_type; INIT_LIST_HEAD(&e->flows); - err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e, &out_dev); + if (family == AF_INET) + err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e, &out_dev); + else if (family == AF_INET6) + err = mlx5e_create_encap_header_ipv6(priv, mirred_dev, e, &out_dev); + if (err) goto out_err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index cfb68371c397..f193128bac4b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -154,6 +154,8 @@ static inline unsigned int mlx5e_calc_min_inline(enum mlx5_inline_modes mode, int hlen; switch (mode) { + case MLX5_INLINE_MODE_NONE: + return 0; case MLX5_INLINE_MODE_TCP_UDP: hlen = eth_get_headlen(skb->data, skb_headlen(skb)); if (hlen == ETH_HLEN && !skb_vlan_tag_present(skb)) @@ -283,21 +285,23 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) wi->num_bytes = num_bytes; - if (skb_vlan_tag_present(skb)) { - mlx5e_insert_vlan(eseg->inline_hdr_start, skb, ihs, &skb_data, - &skb_len); - ihs += VLAN_HLEN; - } else { - memcpy(eseg->inline_hdr_start, skb_data, ihs); - mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs); + ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS; + if (ihs) { + if (skb_vlan_tag_present(skb)) { + mlx5e_insert_vlan(eseg->inline_hdr.start, skb, ihs, &skb_data, &skb_len); + ihs += VLAN_HLEN; + } else { + memcpy(eseg->inline_hdr.start, skb_data, ihs); + mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs); + } + eseg->inline_hdr.sz = cpu_to_be16(ihs); + ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr.start), MLX5_SEND_WQE_DS); + } else if (skb_vlan_tag_present(skb)) { + eseg->insert.type = cpu_to_be16(MLX5_ETH_WQE_INSERT_VLAN); + eseg->insert.vlan_tci = cpu_to_be16(skb_vlan_tag_get(skb)); } - eseg->inline_hdr_sz = cpu_to_be16(ihs); - - ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS; - ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr_start), - MLX5_SEND_WQE_DS); - dseg = (struct mlx5_wqe_data_seg *)cseg + ds_cnt; + dseg = (struct mlx5_wqe_data_seg *)cseg + ds_cnt; wi->num_dma = 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 8ffcc8808e50..ea5d8d37a75c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -54,6 +54,7 @@ enum { MLX5_NUM_SPARE_EQE = 0x80, MLX5_NUM_ASYNC_EQE = 0x100, MLX5_NUM_CMD_EQE = 32, + MLX5_NUM_PF_DRAIN = 64, }; enum { @@ -153,6 +154,8 @@ static const char *eqe_type_str(u8 type) return "MLX5_EVENT_TYPE_PAGE_REQUEST"; case MLX5_EVENT_TYPE_PAGE_FAULT: return "MLX5_EVENT_TYPE_PAGE_FAULT"; + case MLX5_EVENT_TYPE_PPS_EVENT: + return "MLX5_EVENT_TYPE_PPS_EVENT"; default: return "Unrecognized event"; } @@ -188,10 +191,193 @@ static void eq_update_ci(struct mlx5_eq *eq, int arm) mb(); } -static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq) +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING +static void eqe_pf_action(struct work_struct *work) +{ + struct mlx5_pagefault *pfault = container_of(work, + struct mlx5_pagefault, + work); + struct mlx5_eq *eq = pfault->eq; + + mlx5_core_page_fault(eq->dev, pfault); + mempool_free(pfault, eq->pf_ctx.pool); +} + +static void eq_pf_process(struct mlx5_eq *eq) +{ + struct mlx5_core_dev *dev = eq->dev; + struct mlx5_eqe_page_fault *pf_eqe; + struct mlx5_pagefault *pfault; + struct mlx5_eqe *eqe; + int set_ci = 0; + + while ((eqe = next_eqe_sw(eq))) { + pfault = mempool_alloc(eq->pf_ctx.pool, GFP_ATOMIC); + if (!pfault) { + schedule_work(&eq->pf_ctx.work); + break; + } + + dma_rmb(); + pf_eqe = &eqe->data.page_fault; + pfault->event_subtype = eqe->sub_type; + pfault->bytes_committed = be32_to_cpu(pf_eqe->bytes_committed); + + mlx5_core_dbg(dev, + "PAGE_FAULT: subtype: 0x%02x, bytes_committed: 0x%06x\n", + eqe->sub_type, pfault->bytes_committed); + + switch (eqe->sub_type) { + case MLX5_PFAULT_SUBTYPE_RDMA: + /* RDMA based event */ + pfault->type = + be32_to_cpu(pf_eqe->rdma.pftype_token) >> 24; + pfault->token = + be32_to_cpu(pf_eqe->rdma.pftype_token) & + MLX5_24BIT_MASK; + pfault->rdma.r_key = + be32_to_cpu(pf_eqe->rdma.r_key); + pfault->rdma.packet_size = + be16_to_cpu(pf_eqe->rdma.packet_length); + pfault->rdma.rdma_op_len = + be32_to_cpu(pf_eqe->rdma.rdma_op_len); + pfault->rdma.rdma_va = + be64_to_cpu(pf_eqe->rdma.rdma_va); + mlx5_core_dbg(dev, + "PAGE_FAULT: type:0x%x, token: 0x%06x, r_key: 0x%08x\n", + pfault->type, pfault->token, + pfault->rdma.r_key); + mlx5_core_dbg(dev, + "PAGE_FAULT: rdma_op_len: 0x%08x, rdma_va: 0x%016llx\n", + pfault->rdma.rdma_op_len, + pfault->rdma.rdma_va); + break; + + case MLX5_PFAULT_SUBTYPE_WQE: + /* WQE based event */ + pfault->type = + be32_to_cpu(pf_eqe->wqe.pftype_wq) >> 24; + pfault->token = + be32_to_cpu(pf_eqe->wqe.token); + pfault->wqe.wq_num = + be32_to_cpu(pf_eqe->wqe.pftype_wq) & + MLX5_24BIT_MASK; + pfault->wqe.wqe_index = + be16_to_cpu(pf_eqe->wqe.wqe_index); + pfault->wqe.packet_size = + be16_to_cpu(pf_eqe->wqe.packet_length); + mlx5_core_dbg(dev, + "PAGE_FAULT: type:0x%x, token: 0x%06x, wq_num: 0x%06x, wqe_index: 0x%04x\n", + pfault->type, pfault->token, + pfault->wqe.wq_num, + pfault->wqe.wqe_index); + break; + + default: + mlx5_core_warn(dev, + "Unsupported page fault event sub-type: 0x%02hhx\n", + eqe->sub_type); + /* Unsupported page faults should still be + * resolved by the page fault handler + */ + } + + pfault->eq = eq; + INIT_WORK(&pfault->work, eqe_pf_action); + queue_work(eq->pf_ctx.wq, &pfault->work); + + ++eq->cons_index; + ++set_ci; + + if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) { + eq_update_ci(eq, 0); + set_ci = 0; + } + } + + eq_update_ci(eq, 1); +} + +static irqreturn_t mlx5_eq_pf_int(int irq, void *eq_ptr) +{ + struct mlx5_eq *eq = eq_ptr; + unsigned long flags; + + if (spin_trylock_irqsave(&eq->pf_ctx.lock, flags)) { + eq_pf_process(eq); + spin_unlock_irqrestore(&eq->pf_ctx.lock, flags); + } else { + schedule_work(&eq->pf_ctx.work); + } + + return IRQ_HANDLED; +} + +/* mempool_refill() was proposed but unfortunately wasn't accepted + * http://lkml.iu.edu/hypermail/linux/kernel/1512.1/05073.html + * Chip workaround. + */ +static void mempool_refill(mempool_t *pool) +{ + while (pool->curr_nr < pool->min_nr) + mempool_free(mempool_alloc(pool, GFP_KERNEL), pool); +} + +static void eq_pf_action(struct work_struct *work) +{ + struct mlx5_eq *eq = container_of(work, struct mlx5_eq, pf_ctx.work); + + mempool_refill(eq->pf_ctx.pool); + + spin_lock_irq(&eq->pf_ctx.lock); + eq_pf_process(eq); + spin_unlock_irq(&eq->pf_ctx.lock); +} + +static int init_pf_ctx(struct mlx5_eq_pagefault *pf_ctx, const char *name) +{ + spin_lock_init(&pf_ctx->lock); + INIT_WORK(&pf_ctx->work, eq_pf_action); + + pf_ctx->wq = alloc_ordered_workqueue(name, + WQ_MEM_RECLAIM); + if (!pf_ctx->wq) + return -ENOMEM; + + pf_ctx->pool = mempool_create_kmalloc_pool + (MLX5_NUM_PF_DRAIN, sizeof(struct mlx5_pagefault)); + if (!pf_ctx->pool) + goto err_wq; + + return 0; +err_wq: + destroy_workqueue(pf_ctx->wq); + return -ENOMEM; +} + +int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 token, + u32 wq_num, u8 type, int error) +{ + u32 out[MLX5_ST_SZ_DW(page_fault_resume_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(page_fault_resume_in)] = {0}; + + MLX5_SET(page_fault_resume_in, in, opcode, + MLX5_CMD_OP_PAGE_FAULT_RESUME); + MLX5_SET(page_fault_resume_in, in, error, !!error); + MLX5_SET(page_fault_resume_in, in, page_fault_type, type); + MLX5_SET(page_fault_resume_in, in, wq_number, wq_num); + MLX5_SET(page_fault_resume_in, in, token, token); + + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); +} +EXPORT_SYMBOL_GPL(mlx5_core_page_fault_resume); +#endif + +static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr) { + struct mlx5_eq *eq = eq_ptr; + struct mlx5_core_dev *dev = eq->dev; struct mlx5_eqe *eqe; - int eqes_found = 0; int set_ci = 0; u32 cqn = -1; u32 rsn; @@ -276,12 +462,6 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq) } break; -#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING - case MLX5_EVENT_TYPE_PAGE_FAULT: - mlx5_eq_pagefault(dev, eqe); - break; -#endif - #ifdef CONFIG_MLX5_CORE_EN case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE: mlx5_eswitch_vport_event(dev->priv.eswitch, eqe); @@ -292,6 +472,10 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq) mlx5_port_module_event(dev, eqe); break; + case MLX5_EVENT_TYPE_PPS_EVENT: + if (dev->event) + dev->event(dev, MLX5_DEV_EVENT_PPS, (unsigned long)eqe); + break; default: mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n", eqe->type, eq->eqn); @@ -299,7 +483,6 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq) } ++eq->cons_index; - eqes_found = 1; ++set_ci; /* The HCA will think the queue has overflowed if we @@ -319,17 +502,6 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq) if (cqn != -1) tasklet_schedule(&eq->tasklet_ctx.task); - return eqes_found; -} - -static irqreturn_t mlx5_msix_handler(int irq, void *eq_ptr) -{ - struct mlx5_eq *eq = eq_ptr; - struct mlx5_core_dev *dev = eq->dev; - - mlx5_eq_int(dev, eq); - - /* MSI-X vectors always belong to us */ return IRQ_HANDLED; } @@ -345,22 +517,32 @@ static void init_eq_buf(struct mlx5_eq *eq) } int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, - int nent, u64 mask, const char *name, struct mlx5_uar *uar) + int nent, u64 mask, const char *name, + enum mlx5_eq_type type) { u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0}; struct mlx5_priv *priv = &dev->priv; + irq_handler_t handler; __be64 *pas; void *eqc; int inlen; u32 *in; int err; + eq->type = type; eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE); eq->cons_index = 0; err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, &eq->buf); if (err) return err; +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING + if (type == MLX5_EQ_TYPE_PF) + handler = mlx5_eq_pf_int; + else +#endif + handler = mlx5_eq_int; + init_eq_buf(eq); inlen = MLX5_ST_SZ_BYTES(create_eq_in) + @@ -380,7 +562,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry); MLX5_SET(eqc, eqc, log_eq_size, ilog2(eq->nent)); - MLX5_SET(eqc, eqc, uar_page, uar->index); + MLX5_SET(eqc, eqc, uar_page, priv->uar->index); MLX5_SET(eqc, eqc, intr, vecidx); MLX5_SET(eqc, eqc, log_page_size, eq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); @@ -395,8 +577,8 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, eq->eqn = MLX5_GET(create_eq_out, out, eq_number); eq->irqn = priv->msix_arr[vecidx].vector; eq->dev = dev; - eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET; - err = request_irq(eq->irqn, mlx5_msix_handler, 0, + eq->doorbell = priv->uar->map + MLX5_EQ_DOORBEL_OFFSET; + err = request_irq(eq->irqn, handler, 0, priv->irq_info[vecidx].name, eq); if (err) goto err_eq; @@ -405,11 +587,20 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, if (err) goto err_irq; - INIT_LIST_HEAD(&eq->tasklet_ctx.list); - INIT_LIST_HEAD(&eq->tasklet_ctx.process_list); - spin_lock_init(&eq->tasklet_ctx.lock); - tasklet_init(&eq->tasklet_ctx.task, mlx5_cq_tasklet_cb, - (unsigned long)&eq->tasklet_ctx); +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING + if (type == MLX5_EQ_TYPE_PF) { + err = init_pf_ctx(&eq->pf_ctx, name); + if (err) + goto err_irq; + } else +#endif + { + INIT_LIST_HEAD(&eq->tasklet_ctx.list); + INIT_LIST_HEAD(&eq->tasklet_ctx.process_list); + spin_lock_init(&eq->tasklet_ctx.lock); + tasklet_init(&eq->tasklet_ctx.task, mlx5_cq_tasklet_cb, + (unsigned long)&eq->tasklet_ctx); + } /* EQs are created in ARMED state */ @@ -444,7 +635,16 @@ int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq) mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n", eq->eqn); synchronize_irq(eq->irqn); - tasklet_disable(&eq->tasklet_ctx.task); + + if (eq->type == MLX5_EQ_TYPE_COMP) { + tasklet_disable(&eq->tasklet_ctx.task); +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING + } else if (eq->type == MLX5_EQ_TYPE_PF) { + cancel_work_sync(&eq->pf_ctx.work); + destroy_workqueue(eq->pf_ctx.wq); + mempool_destroy(eq->pf_ctx.pool); +#endif + } mlx5_buf_free(dev, &eq->buf); return err; @@ -479,8 +679,6 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev) u64 async_event_mask = MLX5_ASYNC_EVENT_MASK; int err; - if (MLX5_CAP_GEN(dev, pg)) - async_event_mask |= (1ull << MLX5_EVENT_TYPE_PAGE_FAULT); if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH && MLX5_CAP_GEN(dev, vport_group_manager) && @@ -492,9 +690,12 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev) else mlx5_core_dbg(dev, "port_module_event is not set\n"); + if (MLX5_CAP_GEN(dev, pps)) + async_event_mask |= (1ull << MLX5_EVENT_TYPE_PPS_EVENT); + err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD, MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD, - "mlx5_cmd_eq", &dev->priv.uuari.uars[0]); + "mlx5_cmd_eq", MLX5_EQ_TYPE_ASYNC); if (err) { mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err); return err; @@ -504,7 +705,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev) err = mlx5_create_map_eq(dev, &table->async_eq, MLX5_EQ_VEC_ASYNC, MLX5_NUM_ASYNC_EQE, async_event_mask, - "mlx5_async_eq", &dev->priv.uuari.uars[0]); + "mlx5_async_eq", MLX5_EQ_TYPE_ASYNC); if (err) { mlx5_core_warn(dev, "failed to create async EQ %d\n", err); goto err1; @@ -514,13 +715,33 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev) MLX5_EQ_VEC_PAGES, /* TODO: sriov max_vf + */ 1, 1 << MLX5_EVENT_TYPE_PAGE_REQUEST, "mlx5_pages_eq", - &dev->priv.uuari.uars[0]); + MLX5_EQ_TYPE_ASYNC); if (err) { mlx5_core_warn(dev, "failed to create pages EQ %d\n", err); goto err2; } +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING + if (MLX5_CAP_GEN(dev, pg)) { + err = mlx5_create_map_eq(dev, &table->pfault_eq, + MLX5_EQ_VEC_PFAULT, + MLX5_NUM_ASYNC_EQE, + 1 << MLX5_EVENT_TYPE_PAGE_FAULT, + "mlx5_page_fault_eq", + MLX5_EQ_TYPE_PF); + if (err) { + mlx5_core_warn(dev, "failed to create page fault EQ %d\n", + err); + goto err3; + } + } + return err; +err3: + mlx5_destroy_unmap_eq(dev, &table->pages_eq); +#else + return err; +#endif err2: mlx5_destroy_unmap_eq(dev, &table->async_eq); @@ -536,6 +757,14 @@ int mlx5_stop_eqs(struct mlx5_core_dev *dev) struct mlx5_eq_table *table = &dev->priv.eq_table; int err; +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING + if (MLX5_CAP_GEN(dev, pg)) { + err = mlx5_destroy_unmap_eq(dev, &table->pfault_eq); + if (err) + return err; + } +#endif + err = mlx5_destroy_unmap_eq(dev, &table->pages_eq); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index d0c8bf014453..fcd5bc7e31db 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -979,7 +979,7 @@ static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw, MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria); - MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag); + MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag); MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.first_vid); MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0); @@ -1098,7 +1098,7 @@ static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria); MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); - MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag); + MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag); MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16); MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0); MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); @@ -1115,7 +1115,7 @@ static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, memset(flow_group_in, 0, inlen); MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); - MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag); + MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag); MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1); MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1); @@ -1254,7 +1254,7 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, } if (vport->info.vlan || vport->info.qos) - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag); + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag); if (vport->info.spoofchk) { MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_47_16); @@ -1335,8 +1335,8 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw, } /* Allowed vlan rule */ - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag); - MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.vlan_tag); + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag); + MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag); MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid); MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vport->info.vlan); @@ -1415,7 +1415,7 @@ static void esw_destroy_tsar(struct mlx5_eswitch *esw) } static int esw_vport_enable_qos(struct mlx5_eswitch *esw, int vport_num, - u32 initial_max_rate) + u32 initial_max_rate, u32 initial_bw_share) { u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0}; struct mlx5_vport *vport = &esw->vports[vport_num]; @@ -1439,6 +1439,7 @@ static int esw_vport_enable_qos(struct mlx5_eswitch *esw, int vport_num, esw->qos.root_tsar_id); MLX5_SET(scheduling_context, &sched_ctx, max_average_bw, initial_max_rate); + MLX5_SET(scheduling_context, &sched_ctx, bw_share, initial_bw_share); err = mlx5_create_scheduling_element_cmd(dev, SCHEDULING_HIERARCHY_E_SWITCH, @@ -1473,7 +1474,7 @@ static void esw_vport_disable_qos(struct mlx5_eswitch *esw, int vport_num) } static int esw_vport_qos_config(struct mlx5_eswitch *esw, int vport_num, - u32 max_rate) + u32 max_rate, u32 bw_share) { u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0}; struct mlx5_vport *vport = &esw->vports[vport_num]; @@ -1497,7 +1498,9 @@ static int esw_vport_qos_config(struct mlx5_eswitch *esw, int vport_num, esw->qos.root_tsar_id); MLX5_SET(scheduling_context, &sched_ctx, max_average_bw, max_rate); + MLX5_SET(scheduling_context, &sched_ctx, bw_share, bw_share); bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW; + bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE; err = mlx5_modify_scheduling_element_cmd(dev, SCHEDULING_HIERARCHY_E_SWITCH, @@ -1563,7 +1566,8 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num, esw_apply_vport_conf(esw, vport); /* Attach vport to the eswitch rate limiter */ - if (esw_vport_enable_qos(esw, vport_num, vport->info.max_rate)) + if (esw_vport_enable_qos(esw, vport_num, vport->info.max_rate, + vport->qos.bw_share)) esw_warn(esw->dev, "Failed to attach vport %d to eswitch rate limiter", vport_num); /* Sync with current vport context */ @@ -1952,6 +1956,7 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, ivi->qos = evport->info.qos; ivi->spoofchk = evport->info.spoofchk; ivi->trusted = evport->info.trusted; + ivi->min_tx_rate = evport->info.min_rate; ivi->max_tx_rate = evport->info.max_rate; mutex_unlock(&esw->state_lock); @@ -2046,23 +2051,103 @@ int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw, return 0; } -int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, - int vport, u32 max_rate) +static u32 calculate_vports_min_rate_divider(struct mlx5_eswitch *esw) { + u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share); struct mlx5_vport *evport; + u32 max_guarantee = 0; + int i; + + for (i = 0; i <= esw->total_vports; i++) { + evport = &esw->vports[i]; + if (!evport->enabled || evport->info.min_rate < max_guarantee) + continue; + max_guarantee = evport->info.min_rate; + } + + return max_t(u32, max_guarantee / fw_max_bw_share, 1); +} + +static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider) +{ + u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share); + struct mlx5_vport *evport; + u32 vport_max_rate; + u32 vport_min_rate; + u32 bw_share; + int err; + int i; + + for (i = 0; i <= esw->total_vports; i++) { + evport = &esw->vports[i]; + if (!evport->enabled) + continue; + vport_min_rate = evport->info.min_rate; + vport_max_rate = evport->info.max_rate; + bw_share = MLX5_MIN_BW_SHARE; + + if (vport_min_rate) + bw_share = MLX5_RATE_TO_BW_SHARE(vport_min_rate, + divider, + fw_max_bw_share); + + if (bw_share == evport->qos.bw_share) + continue; + + err = esw_vport_qos_config(esw, i, vport_max_rate, + bw_share); + if (!err) + evport->qos.bw_share = bw_share; + else + return err; + } + + return 0; +} + +int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, int vport, + u32 max_rate, u32 min_rate) +{ + u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share); + bool min_rate_supported = MLX5_CAP_QOS(esw->dev, esw_bw_share) && + fw_max_bw_share >= MLX5_MIN_BW_SHARE; + bool max_rate_supported = MLX5_CAP_QOS(esw->dev, esw_rate_limit); + struct mlx5_vport *evport; + u32 previous_min_rate; + u32 divider; int err = 0; if (!ESW_ALLOWED(esw)) return -EPERM; if (!LEGAL_VPORT(esw, vport)) return -EINVAL; + if ((min_rate && !min_rate_supported) || (max_rate && !max_rate_supported)) + return -EOPNOTSUPP; mutex_lock(&esw->state_lock); evport = &esw->vports[vport]; - err = esw_vport_qos_config(esw, vport, max_rate); + + if (min_rate == evport->info.min_rate) + goto set_max_rate; + + previous_min_rate = evport->info.min_rate; + evport->info.min_rate = min_rate; + divider = calculate_vports_min_rate_divider(esw); + err = normalize_vports_min_rate(esw, divider); + if (err) { + evport->info.min_rate = previous_min_rate; + goto unlock; + } + +set_max_rate: + if (max_rate == evport->info.max_rate) + goto unlock; + + err = esw_vport_qos_config(esw, vport, max_rate, evport->qos.bw_share); if (!err) evport->info.max_rate = max_rate; +unlock: mutex_unlock(&esw->state_lock); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 8661dd3f542c..5b78883d5654 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -36,6 +36,7 @@ #include <linux/if_ether.h> #include <linux/if_link.h> #include <net/devlink.h> +#include <net/ip_tunnels.h> #include <linux/mlx5/device.h> #define MLX5_MAX_UC_PER_VPORT(dev) \ @@ -49,6 +50,11 @@ #define FDB_UPLINK_VPORT 0xffff +#define MLX5_MIN_BW_SHARE 1 + +#define MLX5_RATE_TO_BW_SHARE(rate, divider, limit) \ + min_t(u32, max_t(u32, (rate) / (divider), MLX5_MIN_BW_SHARE), limit) + /* L2 -mac address based- hash helpers */ struct l2addr_node { struct hlist_node hlist; @@ -115,6 +121,7 @@ struct mlx5_vport_info { u8 qos; u64 node_guid; int link_state; + u32 min_rate; u32 max_rate; bool spoofchk; bool trusted; @@ -137,6 +144,7 @@ struct mlx5_vport { struct { bool enabled; u32 esw_tsar_ix; + u32 bw_share; } qos; bool enabled; @@ -248,8 +256,8 @@ int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw, int vport, bool spoofchk); int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw, int vport_num, bool setting); -int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, - int vport, u32 max_rate); +int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, int vport, + u32 max_rate, u32 min_rate); int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, int vport, struct ifla_vf_info *ivi); int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, @@ -274,18 +282,12 @@ enum { #define MLX5_FLOW_CONTEXT_ACTION_VLAN_POP 0x40 #define MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH 0x80 -struct mlx5_encap_info { - __be32 daddr; - __be32 tun_id; - __be16 tp_dst; -}; - struct mlx5_encap_entry { struct hlist_node encap_hlist; struct list_head flows; u32 encap_id; struct neighbour *n; - struct mlx5_encap_info tun_info; + struct ip_tunnel_info tun_info; unsigned char h_dest[ETH_ALEN]; /* destination eth addr */ struct net_device *out_dev; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 595f7c7383b3..4f5b0d47d5f3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -402,19 +402,18 @@ out: } #define MAX_PF_SQ 256 -#define ESW_OFFLOADS_NUM_ENTRIES (1 << 13) /* 8K */ #define ESW_OFFLOADS_NUM_GROUPS 4 static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports) { int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + int table_size, ix, esw_size, err = 0; struct mlx5_core_dev *dev = esw->dev; struct mlx5_flow_namespace *root_ns; struct mlx5_flow_table *fdb = NULL; struct mlx5_flow_group *g; u32 *flow_group_in; void *match_criteria; - int table_size, ix, err = 0; u32 flags = 0; flow_group_in = mlx5_vzalloc(inlen); @@ -428,15 +427,19 @@ static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports) goto ns_err; } - esw_debug(dev, "Create offloads FDB table, log_max_size(%d)\n", - MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)); + esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d)*groups(%d))\n", + MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size), + MLX5_CAP_GEN(dev, max_flow_counter), ESW_OFFLOADS_NUM_GROUPS); + + esw_size = min_t(int, MLX5_CAP_GEN(dev, max_flow_counter) * ESW_OFFLOADS_NUM_GROUPS, + 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)); if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) && MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap)) flags |= MLX5_FLOW_TABLE_TUNNEL_EN; fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH, - ESW_OFFLOADS_NUM_ENTRIES, + esw_size, ESW_OFFLOADS_NUM_GROUPS, 0, flags); if (IS_ERR(fdb)) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index b53fc85a2375..b64a781c7e85 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -473,10 +473,13 @@ int mlx5_encap_alloc(struct mlx5_core_dev *dev, int err; u32 *in; - if (size > MLX5_CAP_ESW(dev, max_encap_header_size)) + if (size > max_encap_size) { + mlx5_core_warn(dev, "encap size %zd too big, max supported is %d\n", + size, max_encap_size); return -EINVAL; + } - in = kzalloc(MLX5_ST_SZ_BYTES(alloc_encap_header_in) + max_encap_size, + in = kzalloc(MLX5_ST_SZ_BYTES(alloc_encap_header_in) + size, GFP_KERNEL); if (!in) return -ENOMEM; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 6346a8f5883b..2478516a61e2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -1232,10 +1232,18 @@ static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg, fs_for_each_fte(fte, fg) { nested_lock_ref_node(&fte->node, FS_MUTEX_CHILD); if (compare_match_value(&fg->mask, match_value, &fte->val) && - (flow_act->action & fte->action) && - flow_act->flow_tag == fte->flow_tag) { + (flow_act->action & fte->action)) { int old_action = fte->action; + if (fte->flow_tag != flow_act->flow_tag) { + mlx5_core_warn(get_dev(&fte->node), + "FTE flow tag %u already exists with different flow tag %u\n", + fte->flow_tag, + flow_act->flow_tag); + handle = ERR_PTR(-EEXIST); + goto unlock_fte; + } + fte->action |= flow_act->action; handle = add_rule_fte(fte, fg, dest, dest_num, old_action != flow_act->action); @@ -1665,7 +1673,7 @@ static int create_leaf_prios(struct mlx5_flow_namespace *ns, int prio, #define FLOW_TABLE_BIT_SZ 1 #define GET_FLOW_TABLE_CAP(dev, offset) \ - ((be32_to_cpu(*((__be32 *)(dev->hca_caps_cur[MLX5_CAP_FLOW_TABLE]) + \ + ((be32_to_cpu(*((__be32 *)(dev->caps.hca_cur[MLX5_CAP_FLOW_TABLE]) + \ offset / 32)) >> \ (32 - FLOW_TABLE_BIT_SZ - (offset & 0x1f))) & FLOW_TABLE_BIT_SZ) static bool has_required_caps(struct mlx5_core_dev *dev, struct node_caps *caps) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index 5718aada6605..d0bbefa08af7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -91,6 +91,20 @@ out: } EXPORT_SYMBOL(mlx5_core_query_vendor_id); +static int mlx5_get_pcam_reg(struct mlx5_core_dev *dev) +{ + return mlx5_query_pcam_reg(dev, dev->caps.pcam, + MLX5_PCAM_FEATURE_ENHANCED_FEATURES, + MLX5_PCAM_REGS_5000_TO_507F); +} + +static int mlx5_get_mcam_reg(struct mlx5_core_dev *dev) +{ + return mlx5_query_mcam_reg(dev, dev->caps.mcam, + MLX5_MCAM_FEATURE_ENHANCED_FEATURES, + MLX5_MCAM_REGS_FIRST_128); +} + int mlx5_query_hca_caps(struct mlx5_core_dev *dev) { int err; @@ -154,6 +168,12 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev) return err; } + if (MLX5_CAP_GEN(dev, pcam_reg)) + mlx5_get_pcam_reg(dev); + + if (MLX5_CAP_GEN(dev, mcam_reg)) + mlx5_get_mcam_reg(dev); + return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 5bcf93422ee0..d0515391d33b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -231,21 +231,6 @@ static const char *hsynd_str(u8 synd) } } -static u16 get_maj(u32 fw) -{ - return fw >> 28; -} - -static u16 get_min(u32 fw) -{ - return fw >> 16 & 0xfff; -} - -static u16 get_sub(u32 fw) -{ - return fw & 0xffff; -} - static void print_health_info(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; @@ -263,13 +248,14 @@ static void print_health_info(struct mlx5_core_dev *dev) dev_err(&dev->pdev->dev, "assert_exit_ptr 0x%08x\n", ioread32be(&h->assert_exit_ptr)); dev_err(&dev->pdev->dev, "assert_callra 0x%08x\n", ioread32be(&h->assert_callra)); - fw = ioread32be(&h->fw_ver); - sprintf(fw_str, "%d.%d.%d", get_maj(fw), get_min(fw), get_sub(fw)); + sprintf(fw_str, "%d.%d.%d", fw_rev_maj(dev), fw_rev_min(dev), fw_rev_sub(dev)); dev_err(&dev->pdev->dev, "fw_ver %s\n", fw_str); dev_err(&dev->pdev->dev, "hw_id 0x%08x\n", ioread32be(&h->hw_id)); dev_err(&dev->pdev->dev, "irisc_index %d\n", ioread8(&h->irisc_index)); dev_err(&dev->pdev->dev, "synd 0x%x: %s\n", ioread8(&h->synd), hsynd_str(ioread8(&h->synd))); dev_err(&dev->pdev->dev, "ext_synd 0x%04x\n", ioread16be(&h->ext_synd)); + fw = ioread32be(&h->fw_ver); + dev_err(&dev->pdev->dev, "raw fw_ver 0x%08x\n", fw); } static unsigned long get_next_poll_jiffies(void) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 3c315eb8d270..c4242a4e8130 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -152,6 +152,26 @@ static struct mlx5_profile profile[] = { .size = 8, .limit = 4 }, + .mr_cache[16] = { + .size = 8, + .limit = 4 + }, + .mr_cache[17] = { + .size = 8, + .limit = 4 + }, + .mr_cache[18] = { + .size = 8, + .limit = 4 + }, + .mr_cache[19] = { + .size = 4, + .limit = 2 + }, + .mr_cache[20] = { + .size = 4, + .limit = 2 + }, }, }; @@ -398,11 +418,11 @@ static int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev, switch (cap_mode) { case HCA_CAP_OPMOD_GET_MAX: - memcpy(dev->hca_caps_max[cap_type], hca_caps, + memcpy(dev->caps.hca_max[cap_type], hca_caps, MLX5_UN_SZ_BYTES(hca_cap_union)); break; case HCA_CAP_OPMOD_GET_CUR: - memcpy(dev->hca_caps_cur[cap_type], hca_caps, + memcpy(dev->caps.hca_cur[cap_type], hca_caps, MLX5_UN_SZ_BYTES(hca_cap_union)); break; default: @@ -493,7 +513,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev) set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability); - memcpy(set_hca_cap, dev->hca_caps_cur[MLX5_CAP_GENERAL], + memcpy(set_hca_cap, dev->caps.hca_cur[MLX5_CAP_GENERAL], MLX5_ST_SZ_BYTES(cmd_hca_cap)); mlx5_core_dbg(dev, "Current Pkey table size %d Setting new size %d\n", @@ -517,8 +537,18 @@ static int handle_hca_cap(struct mlx5_core_dev *dev) /* disable cmdif checksum */ MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0); + /* If the HCA supports 4K UARs use it */ + if (MLX5_CAP_GEN_MAX(dev, uar_4k)) + MLX5_SET(cmd_hca_cap, set_hca_cap, uar_4k, 1); + MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12); + if (MLX5_CAP_GEN_MAX(dev, cache_line_128byte)) + MLX5_SET(cmd_hca_cap, + set_hca_cap, + cache_line_128byte, + cache_line_size() == 128 ? 1 : 0); + err = set_caps(dev, set_ctx, set_sz, MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE); @@ -739,7 +769,7 @@ static int alloc_comp_eqs(struct mlx5_core_dev *dev) snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", i); err = mlx5_create_map_eq(dev, eq, i + MLX5_EQ_VEC_COMP_BASE, nent, 0, - name, &dev->priv.uuari.uars[0]); + name, MLX5_EQ_TYPE_COMP); if (err) { kfree(eq); goto clean; @@ -899,8 +929,6 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv) goto out; } - MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock); - err = mlx5_init_cq_table(dev); if (err) { dev_err(&pdev->dev, "failed to initialize cq table\n"); @@ -1079,8 +1107,8 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, goto err_cleanup_once; } - err = mlx5_alloc_uuars(dev, &priv->uuari); - if (err) { + dev->priv.uar = mlx5_get_uars_page(dev); + if (!dev->priv.uar) { dev_err(&pdev->dev, "Failed allocating uar, aborting\n"); goto err_disable_msix; } @@ -1088,7 +1116,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, err = mlx5_start_eqs(dev); if (err) { dev_err(&pdev->dev, "Failed to start pages and async EQs\n"); - goto err_free_uar; + goto err_put_uars; } err = alloc_comp_eqs(dev); @@ -1154,8 +1182,8 @@ err_affinity_hints: err_stop_eqs: mlx5_stop_eqs(dev); -err_free_uar: - mlx5_free_uuars(dev, &priv->uuari); +err_put_uars: + mlx5_put_uars_page(dev, priv->uar); err_disable_msix: mlx5_disable_msix(dev); @@ -1218,7 +1246,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, mlx5_irq_clear_affinity_hints(dev); free_comp_eqs(dev); mlx5_stop_eqs(dev); - mlx5_free_uuars(dev, &priv->uuari); + mlx5_put_uars_page(dev, priv->uar); mlx5_disable_msix(dev); if (cleanup) mlx5_cleanup_once(dev); @@ -1284,10 +1312,24 @@ static int init_one(struct pci_dev *pdev, spin_lock_init(&priv->ctx_lock); mutex_init(&dev->pci_status_mutex); mutex_init(&dev->intf_state_mutex); + +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING + err = init_srcu_struct(&priv->pfault_srcu); + if (err) { + dev_err(&pdev->dev, "init_srcu_struct failed with error code %d\n", + err); + goto clean_dev; + } +#endif + mutex_init(&priv->bfregs.reg_head.lock); + mutex_init(&priv->bfregs.wc_head.lock); + INIT_LIST_HEAD(&priv->bfregs.reg_head.list); + INIT_LIST_HEAD(&priv->bfregs.wc_head.list); + err = mlx5_pci_init(dev, priv); if (err) { dev_err(&pdev->dev, "mlx5_pci_init failed with error code %d\n", err); - goto clean_dev; + goto clean_srcu; } err = mlx5_health_init(dev); @@ -1304,9 +1346,7 @@ static int init_one(struct pci_dev *pdev, goto clean_health; } - err = request_module_nowait(MLX5_IB_MOD); - if (err) - pr_info("failed request module on %s\n", MLX5_IB_MOD); + request_module_nowait(MLX5_IB_MOD); err = devlink_register(devlink, &pdev->dev); if (err) @@ -1321,7 +1361,11 @@ clean_health: mlx5_health_cleanup(dev); close_pci: mlx5_pci_close(dev, priv); +clean_srcu: +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING + cleanup_srcu_struct(&priv->pfault_srcu); clean_dev: +#endif pci_set_drvdata(pdev, NULL); devlink_free(devlink); @@ -1346,6 +1390,9 @@ static void remove_one(struct pci_dev *pdev) mlx5_pagealloc_cleanup(dev); mlx5_health_cleanup(dev); mlx5_pci_close(dev, priv); +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING + cleanup_srcu_struct(&priv->pfault_srcu); +#endif pci_set_drvdata(pdev, NULL); devlink_free(devlink); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index d4a99c9757cb..b3dabe6e8836 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -86,6 +86,8 @@ int mlx5_cmd_init_hca(struct mlx5_core_dev *dev); int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev); void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event, unsigned long param); +void mlx5_core_page_fault(struct mlx5_core_dev *dev, + struct mlx5_pagefault *pfault); void mlx5_port_module_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe); void mlx5_enter_error_state(struct mlx5_core_dev *dev); void mlx5_disable_device(struct mlx5_core_dev *dev); @@ -111,6 +113,11 @@ u32 mlx5_get_msix_vec(struct mlx5_core_dev *dev, int vecidx); struct mlx5_eq *mlx5_eqn2eq(struct mlx5_core_dev *dev, int eqn); void mlx5_cq_tasklet_cb(unsigned long data); +int mlx5_query_pcam_reg(struct mlx5_core_dev *dev, u32 *pcam, u8 feature_group, + u8 access_reg_group); +int mlx5_query_mcam_reg(struct mlx5_core_dev *dev, u32 *mcap, u8 feature_group, + u8 access_reg_group); + void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev); void mlx5_lag_remove(struct mlx5_core_dev *dev); @@ -136,6 +143,11 @@ void mlx5_encap_dealloc(struct mlx5_core_dev *dev, u32 encap_id); bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv); +int mlx5_query_mtpps(struct mlx5_core_dev *dev, u32 *mtpps, u32 mtpps_size); +int mlx5_set_mtpps(struct mlx5_core_dev *mdev, u32 *mtpps, u32 mtpps_size); +int mlx5_query_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 *arm, u8 *mode); +int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode); + void mlx5e_init(void); void mlx5e_cleanup(void); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index fd12e0a377a5..141583daf5a2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -74,6 +74,30 @@ out: } EXPORT_SYMBOL_GPL(mlx5_core_access_reg); +int mlx5_query_pcam_reg(struct mlx5_core_dev *dev, u32 *pcam, u8 feature_group, + u8 access_reg_group) +{ + u32 in[MLX5_ST_SZ_DW(pcam_reg)] = {0}; + int sz = MLX5_ST_SZ_BYTES(pcam_reg); + + MLX5_SET(pcam_reg, in, feature_group, feature_group); + MLX5_SET(pcam_reg, in, access_reg_group, access_reg_group); + + return mlx5_core_access_reg(dev, in, sz, pcam, sz, MLX5_REG_PCAM, 0, 0); +} + +int mlx5_query_mcam_reg(struct mlx5_core_dev *dev, u32 *mcam, u8 feature_group, + u8 access_reg_group) +{ + u32 in[MLX5_ST_SZ_DW(mcam_reg)] = {0}; + int sz = MLX5_ST_SZ_BYTES(mcam_reg); + + MLX5_SET(mcam_reg, in, feature_group, feature_group); + MLX5_SET(mcam_reg, in, access_reg_group, access_reg_group); + + return mlx5_core_access_reg(dev, in, sz, mcam, sz, MLX5_REG_MCAM, 0, 0); +} + struct mlx5_reg_pcap { u8 rsvd0; u8 port_num; @@ -866,3 +890,51 @@ void mlx5_port_module_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe) module_num, mlx5_pme_status[module_status - 1], mlx5_pme_error[error_type]); } + +int mlx5_query_mtpps(struct mlx5_core_dev *mdev, u32 *mtpps, u32 mtpps_size) +{ + u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; + + return mlx5_core_access_reg(mdev, in, sizeof(in), mtpps, + mtpps_size, MLX5_REG_MTPPS, 0, 0); +} + +int mlx5_set_mtpps(struct mlx5_core_dev *mdev, u32 *mtpps, u32 mtpps_size) +{ + u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; + + return mlx5_core_access_reg(mdev, mtpps, mtpps_size, out, + sizeof(out), MLX5_REG_MTPPS, 0, 1); +} + +int mlx5_query_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 *arm, u8 *mode) +{ + u32 out[MLX5_ST_SZ_DW(mtppse_reg)] = {0}; + u32 in[MLX5_ST_SZ_DW(mtppse_reg)] = {0}; + int err = 0; + + MLX5_SET(mtppse_reg, in, pin, pin); + + err = mlx5_core_access_reg(mdev, in, sizeof(in), out, + sizeof(out), MLX5_REG_MTPPSE, 0, 0); + if (err) + return err; + + *arm = MLX5_GET(mtppse_reg, in, event_arm); + *mode = MLX5_GET(mtppse_reg, in, event_generation_mode); + + return err; +} + +int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode) +{ + u32 out[MLX5_ST_SZ_DW(mtppse_reg)] = {0}; + u32 in[MLX5_ST_SZ_DW(mtppse_reg)] = {0}; + + MLX5_SET(mtppse_reg, in, pin, pin); + MLX5_SET(mtppse_reg, in, event_arm, arm); + MLX5_SET(mtppse_reg, in, event_generation_mode, mode); + + return mlx5_core_access_reg(mdev, in, sizeof(in), out, + sizeof(out), MLX5_REG_MTPPSE, 0, 1); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c index d0a4005fe63a..cbbcef2884be 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c @@ -143,95 +143,6 @@ void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type) mlx5_core_put_rsc(common); } -#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING -void mlx5_eq_pagefault(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe) -{ - struct mlx5_eqe_page_fault *pf_eqe = &eqe->data.page_fault; - int qpn = be32_to_cpu(pf_eqe->flags_qpn) & MLX5_QPN_MASK; - struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, qpn); - struct mlx5_core_qp *qp = - container_of(common, struct mlx5_core_qp, common); - struct mlx5_pagefault pfault; - - if (!qp) { - mlx5_core_warn(dev, "ODP event for non-existent QP %06x\n", - qpn); - return; - } - - pfault.event_subtype = eqe->sub_type; - pfault.flags = (be32_to_cpu(pf_eqe->flags_qpn) >> MLX5_QPN_BITS) & - (MLX5_PFAULT_REQUESTOR | MLX5_PFAULT_WRITE | MLX5_PFAULT_RDMA); - pfault.bytes_committed = be32_to_cpu( - pf_eqe->bytes_committed); - - mlx5_core_dbg(dev, - "PAGE_FAULT: subtype: 0x%02x, flags: 0x%02x,\n", - eqe->sub_type, pfault.flags); - - switch (eqe->sub_type) { - case MLX5_PFAULT_SUBTYPE_RDMA: - /* RDMA based event */ - pfault.rdma.r_key = - be32_to_cpu(pf_eqe->rdma.r_key); - pfault.rdma.packet_size = - be16_to_cpu(pf_eqe->rdma.packet_length); - pfault.rdma.rdma_op_len = - be32_to_cpu(pf_eqe->rdma.rdma_op_len); - pfault.rdma.rdma_va = - be64_to_cpu(pf_eqe->rdma.rdma_va); - mlx5_core_dbg(dev, - "PAGE_FAULT: qpn: 0x%06x, r_key: 0x%08x,\n", - qpn, pfault.rdma.r_key); - mlx5_core_dbg(dev, - "PAGE_FAULT: rdma_op_len: 0x%08x,\n", - pfault.rdma.rdma_op_len); - mlx5_core_dbg(dev, - "PAGE_FAULT: rdma_va: 0x%016llx,\n", - pfault.rdma.rdma_va); - mlx5_core_dbg(dev, - "PAGE_FAULT: bytes_committed: 0x%06x\n", - pfault.bytes_committed); - break; - - case MLX5_PFAULT_SUBTYPE_WQE: - /* WQE based event */ - pfault.wqe.wqe_index = - be16_to_cpu(pf_eqe->wqe.wqe_index); - pfault.wqe.packet_size = - be16_to_cpu(pf_eqe->wqe.packet_length); - mlx5_core_dbg(dev, - "PAGE_FAULT: qpn: 0x%06x, wqe_index: 0x%04x,\n", - qpn, pfault.wqe.wqe_index); - mlx5_core_dbg(dev, - "PAGE_FAULT: bytes_committed: 0x%06x\n", - pfault.bytes_committed); - break; - - default: - mlx5_core_warn(dev, - "Unsupported page fault event sub-type: 0x%02hhx, QP %06x\n", - eqe->sub_type, qpn); - /* Unsupported page faults should still be resolved by the - * page fault handler - */ - } - - if (qp->pfault_handler) { - qp->pfault_handler(qp, &pfault); - } else { - mlx5_core_err(dev, - "ODP event for QP %08x, without a fault handler in QP\n", - qpn); - /* Page fault will remain unresolved. QP will hang until it is - * destroyed - */ - } - - mlx5_core_put_rsc(common); -} -#endif - static int create_qprqsq_common(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, int rsc_type) @@ -506,31 +417,6 @@ int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn) } EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc); -#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING -int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 qpn, - u8 flags, int error) -{ - u32 out[MLX5_ST_SZ_DW(page_fault_resume_out)] = {0}; - u32 in[MLX5_ST_SZ_DW(page_fault_resume_in)] = {0}; - - MLX5_SET(page_fault_resume_in, in, opcode, - MLX5_CMD_OP_PAGE_FAULT_RESUME); - MLX5_SET(page_fault_resume_in, in, qpn, qpn); - - if (flags & MLX5_PAGE_FAULT_RESUME_REQUESTOR) - MLX5_SET(page_fault_resume_in, in, req_res, 1); - if (flags & MLX5_PAGE_FAULT_RESUME_WRITE) - MLX5_SET(page_fault_resume_in, in, read_write, 1); - if (flags & MLX5_PAGE_FAULT_RESUME_RDMA) - MLX5_SET(page_fault_resume_in, in, rdma, 1); - if (error) - MLX5_SET(page_fault_resume_in, in, error, 1); - - return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); -} -EXPORT_SYMBOL_GPL(mlx5_core_page_fault_resume); -#endif - int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen, struct mlx5_core_qp *rq) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c index ab0b896621a0..2e6b0f290ddc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c @@ -37,11 +37,6 @@ #include <linux/mlx5/cmd.h> #include "mlx5_core.h" -enum { - NUM_DRIVER_UARS = 4, - NUM_LOW_LAT_UUARS = 4, -}; - int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn) { u32 out[MLX5_ST_SZ_DW(alloc_uar_out)] = {0}; @@ -67,167 +62,269 @@ int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn) } EXPORT_SYMBOL(mlx5_cmd_free_uar); -static int need_uuar_lock(int uuarn) +static int uars_per_sys_page(struct mlx5_core_dev *mdev) { - int tot_uuars = NUM_DRIVER_UARS * MLX5_BF_REGS_PER_PAGE; - - if (uuarn == 0 || tot_uuars - NUM_LOW_LAT_UUARS) - return 0; + if (MLX5_CAP_GEN(mdev, uar_4k)) + return MLX5_CAP_GEN(mdev, num_of_uars_per_page); return 1; } -int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari) +static u64 uar2pfn(struct mlx5_core_dev *mdev, u32 index) { - int tot_uuars = NUM_DRIVER_UARS * MLX5_BF_REGS_PER_PAGE; - struct mlx5_bf *bf; - phys_addr_t addr; - int err; + u32 system_page_index; + + if (MLX5_CAP_GEN(mdev, uar_4k)) + system_page_index = index >> (PAGE_SHIFT - MLX5_ADAPTER_PAGE_SHIFT); + else + system_page_index = index; + + return (pci_resource_start(mdev->pdev, 0) >> PAGE_SHIFT) + system_page_index; +} + +static void up_rel_func(struct kref *kref) +{ + struct mlx5_uars_page *up = container_of(kref, struct mlx5_uars_page, ref_count); + + list_del(&up->list); + if (mlx5_cmd_free_uar(up->mdev, up->index)) + mlx5_core_warn(up->mdev, "failed to free uar index %d\n", up->index); + kfree(up->reg_bitmap); + kfree(up->fp_bitmap); + kfree(up); +} + +static struct mlx5_uars_page *alloc_uars_page(struct mlx5_core_dev *mdev, + bool map_wc) +{ + struct mlx5_uars_page *up; + int err = -ENOMEM; + phys_addr_t pfn; + int bfregs; int i; - uuari->num_uars = NUM_DRIVER_UARS; - uuari->num_low_latency_uuars = NUM_LOW_LAT_UUARS; + bfregs = uars_per_sys_page(mdev) * MLX5_BFREGS_PER_UAR; + up = kzalloc(sizeof(*up), GFP_KERNEL); + if (!up) + return ERR_PTR(err); - mutex_init(&uuari->lock); - uuari->uars = kcalloc(uuari->num_uars, sizeof(*uuari->uars), GFP_KERNEL); - if (!uuari->uars) - return -ENOMEM; + up->mdev = mdev; + up->reg_bitmap = kcalloc(BITS_TO_LONGS(bfregs), sizeof(unsigned long), GFP_KERNEL); + if (!up->reg_bitmap) + goto error1; - uuari->bfs = kcalloc(tot_uuars, sizeof(*uuari->bfs), GFP_KERNEL); - if (!uuari->bfs) { - err = -ENOMEM; - goto out_uars; - } + up->fp_bitmap = kcalloc(BITS_TO_LONGS(bfregs), sizeof(unsigned long), GFP_KERNEL); + if (!up->fp_bitmap) + goto error1; - uuari->bitmap = kcalloc(BITS_TO_LONGS(tot_uuars), sizeof(*uuari->bitmap), - GFP_KERNEL); - if (!uuari->bitmap) { - err = -ENOMEM; - goto out_bfs; - } + for (i = 0; i < bfregs; i++) + if ((i % MLX5_BFREGS_PER_UAR) < MLX5_NON_FP_BFREGS_PER_UAR) + set_bit(i, up->reg_bitmap); + else + set_bit(i, up->fp_bitmap); - uuari->count = kcalloc(tot_uuars, sizeof(*uuari->count), GFP_KERNEL); - if (!uuari->count) { - err = -ENOMEM; - goto out_bitmap; - } + up->bfregs = bfregs; + up->fp_avail = bfregs * MLX5_FP_BFREGS_PER_UAR / MLX5_BFREGS_PER_UAR; + up->reg_avail = bfregs * MLX5_NON_FP_BFREGS_PER_UAR / MLX5_BFREGS_PER_UAR; - for (i = 0; i < uuari->num_uars; i++) { - err = mlx5_cmd_alloc_uar(dev, &uuari->uars[i].index); - if (err) - goto out_count; + err = mlx5_cmd_alloc_uar(mdev, &up->index); + if (err) { + mlx5_core_warn(mdev, "mlx5_cmd_alloc_uar() failed, %d\n", err); + goto error1; + } - addr = dev->iseg_base + ((phys_addr_t)(uuari->uars[i].index) << PAGE_SHIFT); - uuari->uars[i].map = ioremap(addr, PAGE_SIZE); - if (!uuari->uars[i].map) { - mlx5_cmd_free_uar(dev, uuari->uars[i].index); + pfn = uar2pfn(mdev, up->index); + if (map_wc) { + up->map = ioremap_wc(pfn << PAGE_SHIFT, PAGE_SIZE); + if (!up->map) { + err = -EAGAIN; + goto error2; + } + } else { + up->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE); + if (!up->map) { err = -ENOMEM; - goto out_count; + goto error2; } - mlx5_core_dbg(dev, "allocated uar index 0x%x, mmaped at %p\n", - uuari->uars[i].index, uuari->uars[i].map); - } - - for (i = 0; i < tot_uuars; i++) { - bf = &uuari->bfs[i]; - - bf->buf_size = (1 << MLX5_CAP_GEN(dev, log_bf_reg_size)) / 2; - bf->uar = &uuari->uars[i / MLX5_BF_REGS_PER_PAGE]; - bf->regreg = uuari->uars[i / MLX5_BF_REGS_PER_PAGE].map; - bf->reg = NULL; /* Add WC support */ - bf->offset = (i % MLX5_BF_REGS_PER_PAGE) * - (1 << MLX5_CAP_GEN(dev, log_bf_reg_size)) + - MLX5_BF_OFFSET; - bf->need_lock = need_uuar_lock(i); - spin_lock_init(&bf->lock); - spin_lock_init(&bf->lock32); - bf->uuarn = i; } + kref_init(&up->ref_count); + mlx5_core_dbg(mdev, "allocated UAR page: index %d, total bfregs %d\n", + up->index, up->bfregs); + return up; + +error2: + if (mlx5_cmd_free_uar(mdev, up->index)) + mlx5_core_warn(mdev, "failed to free uar index %d\n", up->index); +error1: + kfree(up->fp_bitmap); + kfree(up->reg_bitmap); + kfree(up); + return ERR_PTR(err); +} - return 0; - -out_count: - for (i--; i >= 0; i--) { - iounmap(uuari->uars[i].map); - mlx5_cmd_free_uar(dev, uuari->uars[i].index); +struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev) +{ + struct mlx5_uars_page *ret; + + mutex_lock(&mdev->priv.bfregs.reg_head.lock); + if (list_empty(&mdev->priv.bfregs.reg_head.list)) { + ret = alloc_uars_page(mdev, false); + if (IS_ERR(ret)) { + ret = NULL; + goto out; + } + list_add(&ret->list, &mdev->priv.bfregs.reg_head.list); + } else { + ret = list_first_entry(&mdev->priv.bfregs.reg_head.list, + struct mlx5_uars_page, list); + kref_get(&ret->ref_count); } - kfree(uuari->count); +out: + mutex_unlock(&mdev->priv.bfregs.reg_head.lock); -out_bitmap: - kfree(uuari->bitmap); - -out_bfs: - kfree(uuari->bfs); + return ret; +} +EXPORT_SYMBOL(mlx5_get_uars_page); -out_uars: - kfree(uuari->uars); - return err; +void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up) +{ + mutex_lock(&mdev->priv.bfregs.reg_head.lock); + kref_put(&up->ref_count, up_rel_func); + mutex_unlock(&mdev->priv.bfregs.reg_head.lock); } +EXPORT_SYMBOL(mlx5_put_uars_page); -int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari) +static unsigned long map_offset(struct mlx5_core_dev *mdev, int dbi) { - int i = uuari->num_uars; + /* return the offset in bytes from the start of the page to the + * blue flame area of the UAR + */ + return dbi / MLX5_BFREGS_PER_UAR * MLX5_ADAPTER_PAGE_SIZE + + (dbi % MLX5_BFREGS_PER_UAR) * + (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) + MLX5_BF_OFFSET; +} - for (i--; i >= 0; i--) { - iounmap(uuari->uars[i].map); - mlx5_cmd_free_uar(dev, uuari->uars[i].index); +static int alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg, + bool map_wc, bool fast_path) +{ + struct mlx5_bfreg_data *bfregs; + struct mlx5_uars_page *up; + struct list_head *head; + unsigned long *bitmap; + unsigned int *avail; + struct mutex *lock; /* pointer to right mutex */ + int dbi; + + bfregs = &mdev->priv.bfregs; + if (map_wc) { + head = &bfregs->wc_head.list; + lock = &bfregs->wc_head.lock; + } else { + head = &bfregs->reg_head.list; + lock = &bfregs->reg_head.lock; } - - kfree(uuari->count); - kfree(uuari->bitmap); - kfree(uuari->bfs); - kfree(uuari->uars); + mutex_lock(lock); + if (list_empty(head)) { + up = alloc_uars_page(mdev, map_wc); + if (IS_ERR(up)) { + mutex_unlock(lock); + return PTR_ERR(up); + } + list_add(&up->list, head); + } else { + up = list_entry(head->next, struct mlx5_uars_page, list); + kref_get(&up->ref_count); + } + if (fast_path) { + bitmap = up->fp_bitmap; + avail = &up->fp_avail; + } else { + bitmap = up->reg_bitmap; + avail = &up->reg_avail; + } + dbi = find_first_bit(bitmap, up->bfregs); + clear_bit(dbi, bitmap); + (*avail)--; + if (!(*avail)) + list_del(&up->list); + + bfreg->map = up->map + map_offset(mdev, dbi); + bfreg->up = up; + bfreg->wc = map_wc; + bfreg->index = up->index + dbi / MLX5_BFREGS_PER_UAR; + mutex_unlock(lock); return 0; } -int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar, - bool map_wc) +int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg, + bool map_wc, bool fast_path) { - phys_addr_t pfn; - phys_addr_t uar_bar_start; int err; - err = mlx5_cmd_alloc_uar(mdev, &uar->index); - if (err) { - mlx5_core_warn(mdev, "mlx5_cmd_alloc_uar() failed, %d\n", err); - return err; - } + err = alloc_bfreg(mdev, bfreg, map_wc, fast_path); + if (!err) + return 0; - uar_bar_start = pci_resource_start(mdev->pdev, 0); - pfn = (uar_bar_start >> PAGE_SHIFT) + uar->index; + if (err == -EAGAIN && map_wc) + return alloc_bfreg(mdev, bfreg, false, fast_path); - if (map_wc) { - uar->bf_map = ioremap_wc(pfn << PAGE_SHIFT, PAGE_SIZE); - if (!uar->bf_map) { - mlx5_core_warn(mdev, "ioremap_wc() failed\n"); - uar->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE); - if (!uar->map) - goto err_free_uar; - } - } else { - uar->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE); - if (!uar->map) - goto err_free_uar; - } + return err; +} +EXPORT_SYMBOL(mlx5_alloc_bfreg); - return 0; +static unsigned int addr_to_dbi_in_syspage(struct mlx5_core_dev *dev, + struct mlx5_uars_page *up, + struct mlx5_sq_bfreg *bfreg) +{ + unsigned int uar_idx; + unsigned int bfreg_idx; + unsigned int bf_reg_size; -err_free_uar: - mlx5_core_warn(mdev, "ioremap() failed\n"); - err = -ENOMEM; - mlx5_cmd_free_uar(mdev, uar->index); + bf_reg_size = 1 << MLX5_CAP_GEN(dev, log_bf_reg_size); - return err; + uar_idx = (bfreg->map - up->map) >> MLX5_ADAPTER_PAGE_SHIFT; + bfreg_idx = (((uintptr_t)bfreg->map % MLX5_ADAPTER_PAGE_SIZE) - MLX5_BF_OFFSET) / bf_reg_size; + + return uar_idx * MLX5_BFREGS_PER_UAR + bfreg_idx; } -EXPORT_SYMBOL(mlx5_alloc_map_uar); -void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar) +void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg) { - if (uar->map) - iounmap(uar->map); - else - iounmap(uar->bf_map); - mlx5_cmd_free_uar(mdev, uar->index); + struct mlx5_bfreg_data *bfregs; + struct mlx5_uars_page *up; + struct mutex *lock; /* pointer to right mutex */ + unsigned int dbi; + bool fp; + unsigned int *avail; + unsigned long *bitmap; + struct list_head *head; + + bfregs = &mdev->priv.bfregs; + if (bfreg->wc) { + head = &bfregs->wc_head.list; + lock = &bfregs->wc_head.lock; + } else { + head = &bfregs->reg_head.list; + lock = &bfregs->reg_head.lock; + } + up = bfreg->up; + dbi = addr_to_dbi_in_syspage(mdev, up, bfreg); + fp = (dbi % MLX5_BFREGS_PER_UAR) >= MLX5_NON_FP_BFREGS_PER_UAR; + if (fp) { + avail = &up->fp_avail; + bitmap = up->fp_bitmap; + } else { + avail = &up->reg_avail; + bitmap = up->reg_bitmap; + } + mutex_lock(lock); + (*avail)++; + set_bit(dbi, bitmap); + if (*avail == 1) + list_add_tail(&up->list, head); + + kref_put(&up->ref_count, up_rel_func); + mutex_unlock(lock); } -EXPORT_SYMBOL(mlx5_unmap_free_uar); +EXPORT_SYMBOL(mlx5_free_bfreg); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index 7129c30a2ab4..15c2294dd2b4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -127,6 +127,23 @@ int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev, } EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_min_inline); +void mlx5_query_min_inline(struct mlx5_core_dev *mdev, + u8 *min_inline_mode) +{ + switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) { + case MLX5_CAP_INLINE_MODE_L2: + *min_inline_mode = MLX5_INLINE_MODE_L2; + break; + case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT: + mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode); + break; + case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: + *min_inline_mode = MLX5_INLINE_MODE_NONE; + break; + } +} +EXPORT_SYMBOL_GPL(mlx5_query_min_inline); + int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev, u16 vport, u8 min_inline) { diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig index 16f44b9aa076..ef23eaedc2ff 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig +++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig @@ -73,6 +73,8 @@ config MLXSW_SWITCHX2 config MLXSW_SPECTRUM tristate "Mellanox Technologies Spectrum support" depends on MLXSW_CORE && MLXSW_PCI && NET_SWITCHDEV && VLAN_8021Q + depends on PSAMPLE || PSAMPLE=n + select PARMAN default m ---help--- This driver supports Mellanox Technologies Spectrum Ethernet diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile index fe8dadba15ab..6b6c30deee83 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Makefile +++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile @@ -1,5 +1,6 @@ obj-$(CONFIG_MLXSW_CORE) += mlxsw_core.o -mlxsw_core-objs := core.o +mlxsw_core-objs := core.o core_acl_flex_keys.o \ + core_acl_flex_actions.o mlxsw_core-$(CONFIG_MLXSW_CORE_HWMON) += core_hwmon.o mlxsw_core-$(CONFIG_MLXSW_CORE_THERMAL) += core_thermal.o obj-$(CONFIG_MLXSW_PCI) += mlxsw_pci.o @@ -13,7 +14,8 @@ mlxsw_switchx2-objs := switchx2.o obj-$(CONFIG_MLXSW_SPECTRUM) += mlxsw_spectrum.o mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \ spectrum_switchdev.o spectrum_router.o \ - spectrum_kvdl.o + spectrum_kvdl.o spectrum_acl_tcam.o \ + spectrum_acl.o spectrum_flower.o mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o obj-$(CONFIG_MLXSW_MINIMAL) += mlxsw_minimal.o mlxsw_minimal-objs := minimal.o diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h index 56e19b0d2f8f..a1b48421648a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/cmd.h +++ b/drivers/net/ethernet/mellanox/mlxsw/cmd.h @@ -1132,12 +1132,12 @@ static inline int mlxsw_cmd_sw2hw_eq(struct mlxsw_core *mlxsw_core, */ MLXSW_ITEM32(cmd_mbox, sw2hw_eq, int_msix, 0x00, 24, 1); -/* cmd_mbox_sw2hw_eq_int_oi +/* cmd_mbox_sw2hw_eq_oi * When set, overrun ignore is enabled. */ MLXSW_ITEM32(cmd_mbox, sw2hw_eq, oi, 0x00, 12, 1); -/* cmd_mbox_sw2hw_eq_int_st +/* cmd_mbox_sw2hw_eq_st * Event delivery state machine * 0x0 - FIRED * 0x1 - ARMED (Request for Notification) @@ -1146,19 +1146,19 @@ MLXSW_ITEM32(cmd_mbox, sw2hw_eq, oi, 0x00, 12, 1); */ MLXSW_ITEM32(cmd_mbox, sw2hw_eq, st, 0x00, 8, 2); -/* cmd_mbox_sw2hw_eq_int_log_eq_size +/* cmd_mbox_sw2hw_eq_log_eq_size * Log (base 2) of the EQ size (in entries). */ MLXSW_ITEM32(cmd_mbox, sw2hw_eq, log_eq_size, 0x00, 0, 4); -/* cmd_mbox_sw2hw_eq_int_producer_counter +/* cmd_mbox_sw2hw_eq_producer_counter * Producer Counter. The counter is incremented for each EQE that is written * by the HW to the EQ. * Maintained by HW (valid for the QUERY_EQ command only) */ MLXSW_ITEM32(cmd_mbox, sw2hw_eq, producer_counter, 0x04, 0, 16); -/* cmd_mbox_sw2hw_eq_int_pa +/* cmd_mbox_sw2hw_eq_pa * Physical Address. */ MLXSW_ITEM64_INDEXED(cmd_mbox, sw2hw_eq, pa, 0x10, 11, 53, 0x08, 0x00, true); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index 57a98849551b..a4c07841aaf6 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -1901,11 +1901,11 @@ int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay) } EXPORT_SYMBOL(mlxsw_core_schedule_dw); -int mlxsw_core_schedule_odw(struct delayed_work *dwork, unsigned long delay) +bool mlxsw_core_schedule_work(struct work_struct *work) { - return queue_delayed_work(mlxsw_owq, dwork, delay); + return queue_work(mlxsw_owq, work); } -EXPORT_SYMBOL(mlxsw_core_schedule_odw); +EXPORT_SYMBOL(mlxsw_core_schedule_work); void mlxsw_core_flush_owq(void) { diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index a7f94fbc898b..cf38cf9027f8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -207,7 +207,7 @@ enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core, u8 local_port); int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay); -int mlxsw_core_schedule_odw(struct delayed_work *dwork, unsigned long delay); +bool mlxsw_core_schedule_work(struct work_struct *work); void mlxsw_core_flush_owq(void); #define MLXSW_CONFIG_PROFILE_SWID_COUNT 8 diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c new file mode 100644 index 000000000000..5f337715a4da --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c @@ -0,0 +1,679 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c + * Copyright (c) 2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/errno.h> +#include <linux/rhashtable.h> +#include <linux/list.h> + +#include "item.h" +#include "core_acl_flex_actions.h" + +enum mlxsw_afa_set_type { + MLXSW_AFA_SET_TYPE_NEXT, + MLXSW_AFA_SET_TYPE_GOTO, +}; + +/* afa_set_type + * Type of the record at the end of the action set. + */ +MLXSW_ITEM32(afa, set, type, 0xA0, 28, 4); + +/* afa_set_next_action_set_ptr + * A pointer to the next action set in the KVD Centralized database. + */ +MLXSW_ITEM32(afa, set, next_action_set_ptr, 0xA4, 0, 24); + +/* afa_set_goto_g + * group - When set, the binding is of an ACL group. When cleared, + * the binding is of an ACL. + * Must be set to 1 for Spectrum. + */ +MLXSW_ITEM32(afa, set, goto_g, 0xA4, 29, 1); + +enum mlxsw_afa_set_goto_binding_cmd { + /* continue go the next binding point */ + MLXSW_AFA_SET_GOTO_BINDING_CMD_NONE, + /* jump to the next binding point no return */ + MLXSW_AFA_SET_GOTO_BINDING_CMD_JUMP, + /* terminate the acl binding */ + MLXSW_AFA_SET_GOTO_BINDING_CMD_TERM = 4, +}; + +/* afa_set_goto_binding_cmd */ +MLXSW_ITEM32(afa, set, goto_binding_cmd, 0xA4, 24, 3); + +/* afa_set_goto_next_binding + * ACL/ACL group identifier. If the g bit is set, this field should hold + * the acl_group_id, else it should hold the acl_id. + */ +MLXSW_ITEM32(afa, set, goto_next_binding, 0xA4, 0, 16); + +/* afa_all_action_type + * Action Type. + */ +MLXSW_ITEM32(afa, all, action_type, 0x00, 24, 6); + +struct mlxsw_afa { + unsigned int max_acts_per_set; + const struct mlxsw_afa_ops *ops; + void *ops_priv; + struct rhashtable set_ht; + struct rhashtable fwd_entry_ht; +}; + +#define MLXSW_AFA_SET_LEN 0xA8 + +struct mlxsw_afa_set_ht_key { + char enc_actions[MLXSW_AFA_SET_LEN]; /* Encoded set */ + bool is_first; +}; + +/* Set structure holds one action set record. It contains up to three + * actions (depends on size of particular actions). The set is either + * put directly to a rule, or it is stored in KVD linear area. + * To prevent duplicate entries in KVD linear area, a hashtable is + * used to track sets that were previously inserted and may be shared. + */ + +struct mlxsw_afa_set { + struct rhash_head ht_node; + struct mlxsw_afa_set_ht_key ht_key; + u32 kvdl_index; + bool shared; /* Inserted in hashtable (doesn't mean that + * kvdl_index is valid). + */ + unsigned int ref_count; + struct mlxsw_afa_set *next; /* Pointer to the next set. */ + struct mlxsw_afa_set *prev; /* Pointer to the previous set, + * note that set may have multiple + * sets from multiple blocks + * pointing at it. This is only + * usable until commit. + */ +}; + +static const struct rhashtable_params mlxsw_afa_set_ht_params = { + .key_len = sizeof(struct mlxsw_afa_set_ht_key), + .key_offset = offsetof(struct mlxsw_afa_set, ht_key), + .head_offset = offsetof(struct mlxsw_afa_set, ht_node), + .automatic_shrinking = true, +}; + +struct mlxsw_afa_fwd_entry_ht_key { + u8 local_port; +}; + +struct mlxsw_afa_fwd_entry { + struct rhash_head ht_node; + struct mlxsw_afa_fwd_entry_ht_key ht_key; + u32 kvdl_index; + unsigned int ref_count; +}; + +static const struct rhashtable_params mlxsw_afa_fwd_entry_ht_params = { + .key_len = sizeof(struct mlxsw_afa_fwd_entry_ht_key), + .key_offset = offsetof(struct mlxsw_afa_fwd_entry, ht_key), + .head_offset = offsetof(struct mlxsw_afa_fwd_entry, ht_node), + .automatic_shrinking = true, +}; + +struct mlxsw_afa *mlxsw_afa_create(unsigned int max_acts_per_set, + const struct mlxsw_afa_ops *ops, + void *ops_priv) +{ + struct mlxsw_afa *mlxsw_afa; + int err; + + mlxsw_afa = kzalloc(sizeof(*mlxsw_afa), GFP_KERNEL); + if (!mlxsw_afa) + return ERR_PTR(-ENOMEM); + err = rhashtable_init(&mlxsw_afa->set_ht, &mlxsw_afa_set_ht_params); + if (err) + goto err_set_rhashtable_init; + err = rhashtable_init(&mlxsw_afa->fwd_entry_ht, + &mlxsw_afa_fwd_entry_ht_params); + if (err) + goto err_fwd_entry_rhashtable_init; + mlxsw_afa->max_acts_per_set = max_acts_per_set; + mlxsw_afa->ops = ops; + mlxsw_afa->ops_priv = ops_priv; + return mlxsw_afa; + +err_fwd_entry_rhashtable_init: + rhashtable_destroy(&mlxsw_afa->set_ht); +err_set_rhashtable_init: + kfree(mlxsw_afa); + return ERR_PTR(err); +} +EXPORT_SYMBOL(mlxsw_afa_create); + +void mlxsw_afa_destroy(struct mlxsw_afa *mlxsw_afa) +{ + rhashtable_destroy(&mlxsw_afa->fwd_entry_ht); + rhashtable_destroy(&mlxsw_afa->set_ht); + kfree(mlxsw_afa); +} +EXPORT_SYMBOL(mlxsw_afa_destroy); + +static void mlxsw_afa_set_goto_set(struct mlxsw_afa_set *set, + enum mlxsw_afa_set_goto_binding_cmd cmd, + u16 group_id) +{ + char *actions = set->ht_key.enc_actions; + + mlxsw_afa_set_type_set(actions, MLXSW_AFA_SET_TYPE_GOTO); + mlxsw_afa_set_goto_g_set(actions, true); + mlxsw_afa_set_goto_binding_cmd_set(actions, cmd); + mlxsw_afa_set_goto_next_binding_set(actions, group_id); +} + +static void mlxsw_afa_set_next_set(struct mlxsw_afa_set *set, + u32 next_set_kvdl_index) +{ + char *actions = set->ht_key.enc_actions; + + mlxsw_afa_set_type_set(actions, MLXSW_AFA_SET_TYPE_NEXT); + mlxsw_afa_set_next_action_set_ptr_set(actions, next_set_kvdl_index); +} + +static struct mlxsw_afa_set *mlxsw_afa_set_create(bool is_first) +{ + struct mlxsw_afa_set *set; + + set = kzalloc(sizeof(*set), GFP_KERNEL); + if (!set) + return NULL; + /* Need to initialize the set to pass by default */ + mlxsw_afa_set_goto_set(set, MLXSW_AFA_SET_GOTO_BINDING_CMD_TERM, 0); + set->ht_key.is_first = is_first; + set->ref_count = 1; + return set; +} + +static void mlxsw_afa_set_destroy(struct mlxsw_afa_set *set) +{ + kfree(set); +} + +static int mlxsw_afa_set_share(struct mlxsw_afa *mlxsw_afa, + struct mlxsw_afa_set *set) +{ + int err; + + err = rhashtable_insert_fast(&mlxsw_afa->set_ht, &set->ht_node, + mlxsw_afa_set_ht_params); + if (err) + return err; + err = mlxsw_afa->ops->kvdl_set_add(mlxsw_afa->ops_priv, + &set->kvdl_index, + set->ht_key.enc_actions, + set->ht_key.is_first); + if (err) + goto err_kvdl_set_add; + set->shared = true; + set->prev = NULL; + return 0; + +err_kvdl_set_add: + rhashtable_remove_fast(&mlxsw_afa->set_ht, &set->ht_node, + mlxsw_afa_set_ht_params); + return err; +} + +static void mlxsw_afa_set_unshare(struct mlxsw_afa *mlxsw_afa, + struct mlxsw_afa_set *set) +{ + mlxsw_afa->ops->kvdl_set_del(mlxsw_afa->ops_priv, + set->kvdl_index, + set->ht_key.is_first); + rhashtable_remove_fast(&mlxsw_afa->set_ht, &set->ht_node, + mlxsw_afa_set_ht_params); + set->shared = false; +} + +static void mlxsw_afa_set_put(struct mlxsw_afa *mlxsw_afa, + struct mlxsw_afa_set *set) +{ + if (--set->ref_count) + return; + if (set->shared) + mlxsw_afa_set_unshare(mlxsw_afa, set); + mlxsw_afa_set_destroy(set); +} + +static struct mlxsw_afa_set *mlxsw_afa_set_get(struct mlxsw_afa *mlxsw_afa, + struct mlxsw_afa_set *orig_set) +{ + struct mlxsw_afa_set *set; + int err; + + /* There is a hashtable of sets maintained. If a set with the exact + * same encoding exists, we reuse it. Otherwise, the current set + * is shared by making it available to others using the hash table. + */ + set = rhashtable_lookup_fast(&mlxsw_afa->set_ht, &orig_set->ht_key, + mlxsw_afa_set_ht_params); + if (set) { + set->ref_count++; + mlxsw_afa_set_put(mlxsw_afa, orig_set); + } else { + set = orig_set; + err = mlxsw_afa_set_share(mlxsw_afa, set); + if (err) + return ERR_PTR(err); + } + return set; +} + +/* Block structure holds a list of action sets. One action block + * represents one chain of actions executed upon match of a rule. + */ + +struct mlxsw_afa_block { + struct mlxsw_afa *afa; + bool finished; + struct mlxsw_afa_set *first_set; + struct mlxsw_afa_set *cur_set; + unsigned int cur_act_index; /* In current set. */ + struct list_head fwd_entry_ref_list; +}; + +struct mlxsw_afa_block *mlxsw_afa_block_create(struct mlxsw_afa *mlxsw_afa) +{ + struct mlxsw_afa_block *block; + + block = kzalloc(sizeof(*block), GFP_KERNEL); + if (!block) + return NULL; + INIT_LIST_HEAD(&block->fwd_entry_ref_list); + block->afa = mlxsw_afa; + + /* At least one action set is always present, so just create it here */ + block->first_set = mlxsw_afa_set_create(true); + if (!block->first_set) + goto err_first_set_create; + block->cur_set = block->first_set; + return block; + +err_first_set_create: + kfree(block); + return NULL; +} +EXPORT_SYMBOL(mlxsw_afa_block_create); + +static void mlxsw_afa_fwd_entry_refs_destroy(struct mlxsw_afa_block *block); + +void mlxsw_afa_block_destroy(struct mlxsw_afa_block *block) +{ + struct mlxsw_afa_set *set = block->first_set; + struct mlxsw_afa_set *next_set; + + do { + next_set = set->next; + mlxsw_afa_set_put(block->afa, set); + set = next_set; + } while (set); + mlxsw_afa_fwd_entry_refs_destroy(block); + kfree(block); +} +EXPORT_SYMBOL(mlxsw_afa_block_destroy); + +int mlxsw_afa_block_commit(struct mlxsw_afa_block *block) +{ + struct mlxsw_afa_set *set = block->cur_set; + struct mlxsw_afa_set *prev_set; + + block->cur_set = NULL; + block->finished = true; + + /* Go over all linked sets starting from last + * and try to find existing set in the hash table. + * In case it is not there, assign a KVD linear index + * and insert it. + */ + do { + prev_set = set->prev; + set = mlxsw_afa_set_get(block->afa, set); + if (IS_ERR(set)) + /* No rollback is needed since the chain is + * in consistent state and mlxsw_afa_block_destroy + * will take care of putting it away. + */ + return PTR_ERR(set); + if (prev_set) { + prev_set->next = set; + mlxsw_afa_set_next_set(prev_set, set->kvdl_index); + set = prev_set; + } + } while (prev_set); + + block->first_set = set; + return 0; +} +EXPORT_SYMBOL(mlxsw_afa_block_commit); + +char *mlxsw_afa_block_first_set(struct mlxsw_afa_block *block) +{ + return block->first_set->ht_key.enc_actions; +} +EXPORT_SYMBOL(mlxsw_afa_block_first_set); + +u32 mlxsw_afa_block_first_set_kvdl_index(struct mlxsw_afa_block *block) +{ + return block->first_set->kvdl_index; +} +EXPORT_SYMBOL(mlxsw_afa_block_first_set_kvdl_index); + +void mlxsw_afa_block_continue(struct mlxsw_afa_block *block) +{ + if (WARN_ON(block->finished)) + return; + mlxsw_afa_set_goto_set(block->cur_set, + MLXSW_AFA_SET_GOTO_BINDING_CMD_NONE, 0); + block->finished = true; +} +EXPORT_SYMBOL(mlxsw_afa_block_continue); + +void mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id) +{ + if (WARN_ON(block->finished)) + return; + mlxsw_afa_set_goto_set(block->cur_set, + MLXSW_AFA_SET_GOTO_BINDING_CMD_JUMP, group_id); + block->finished = true; +} +EXPORT_SYMBOL(mlxsw_afa_block_jump); + +static struct mlxsw_afa_fwd_entry * +mlxsw_afa_fwd_entry_create(struct mlxsw_afa *mlxsw_afa, u8 local_port) +{ + struct mlxsw_afa_fwd_entry *fwd_entry; + int err; + + fwd_entry = kzalloc(sizeof(*fwd_entry), GFP_KERNEL); + if (!fwd_entry) + return ERR_PTR(-ENOMEM); + fwd_entry->ht_key.local_port = local_port; + fwd_entry->ref_count = 1; + + err = rhashtable_insert_fast(&mlxsw_afa->fwd_entry_ht, + &fwd_entry->ht_node, + mlxsw_afa_fwd_entry_ht_params); + if (err) + goto err_rhashtable_insert; + + err = mlxsw_afa->ops->kvdl_fwd_entry_add(mlxsw_afa->ops_priv, + &fwd_entry->kvdl_index, + local_port); + if (err) + goto err_kvdl_fwd_entry_add; + return fwd_entry; + +err_kvdl_fwd_entry_add: + rhashtable_remove_fast(&mlxsw_afa->fwd_entry_ht, &fwd_entry->ht_node, + mlxsw_afa_fwd_entry_ht_params); +err_rhashtable_insert: + kfree(fwd_entry); + return ERR_PTR(err); +} + +static void mlxsw_afa_fwd_entry_destroy(struct mlxsw_afa *mlxsw_afa, + struct mlxsw_afa_fwd_entry *fwd_entry) +{ + mlxsw_afa->ops->kvdl_fwd_entry_del(mlxsw_afa->ops_priv, + fwd_entry->kvdl_index); + rhashtable_remove_fast(&mlxsw_afa->fwd_entry_ht, &fwd_entry->ht_node, + mlxsw_afa_fwd_entry_ht_params); + kfree(fwd_entry); +} + +static struct mlxsw_afa_fwd_entry * +mlxsw_afa_fwd_entry_get(struct mlxsw_afa *mlxsw_afa, u8 local_port) +{ + struct mlxsw_afa_fwd_entry_ht_key ht_key = {0}; + struct mlxsw_afa_fwd_entry *fwd_entry; + + ht_key.local_port = local_port; + fwd_entry = rhashtable_lookup_fast(&mlxsw_afa->fwd_entry_ht, &ht_key, + mlxsw_afa_fwd_entry_ht_params); + if (fwd_entry) { + fwd_entry->ref_count++; + return fwd_entry; + } + return mlxsw_afa_fwd_entry_create(mlxsw_afa, local_port); +} + +static void mlxsw_afa_fwd_entry_put(struct mlxsw_afa *mlxsw_afa, + struct mlxsw_afa_fwd_entry *fwd_entry) +{ + if (--fwd_entry->ref_count) + return; + mlxsw_afa_fwd_entry_destroy(mlxsw_afa, fwd_entry); +} + +struct mlxsw_afa_fwd_entry_ref { + struct list_head list; + struct mlxsw_afa_fwd_entry *fwd_entry; +}; + +static struct mlxsw_afa_fwd_entry_ref * +mlxsw_afa_fwd_entry_ref_create(struct mlxsw_afa_block *block, u8 local_port) +{ + struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref; + struct mlxsw_afa_fwd_entry *fwd_entry; + int err; + + fwd_entry_ref = kzalloc(sizeof(*fwd_entry_ref), GFP_KERNEL); + if (!fwd_entry_ref) + return ERR_PTR(-ENOMEM); + fwd_entry = mlxsw_afa_fwd_entry_get(block->afa, local_port); + if (IS_ERR(fwd_entry)) { + err = PTR_ERR(fwd_entry); + goto err_fwd_entry_get; + } + fwd_entry_ref->fwd_entry = fwd_entry; + list_add(&fwd_entry_ref->list, &block->fwd_entry_ref_list); + return fwd_entry_ref; + +err_fwd_entry_get: + kfree(fwd_entry_ref); + return ERR_PTR(err); +} + +static void +mlxsw_afa_fwd_entry_ref_destroy(struct mlxsw_afa_block *block, + struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref) +{ + list_del(&fwd_entry_ref->list); + mlxsw_afa_fwd_entry_put(block->afa, fwd_entry_ref->fwd_entry); + kfree(fwd_entry_ref); +} + +static void mlxsw_afa_fwd_entry_refs_destroy(struct mlxsw_afa_block *block) +{ + struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref; + struct mlxsw_afa_fwd_entry_ref *tmp; + + list_for_each_entry_safe(fwd_entry_ref, tmp, + &block->fwd_entry_ref_list, list) + mlxsw_afa_fwd_entry_ref_destroy(block, fwd_entry_ref); +} + +#define MLXSW_AFA_ONE_ACTION_LEN 32 +#define MLXSW_AFA_PAYLOAD_OFFSET 4 + +static char *mlxsw_afa_block_append_action(struct mlxsw_afa_block *block, + u8 action_code, u8 action_size) +{ + char *oneact; + char *actions; + + if (WARN_ON(block->finished)) + return NULL; + if (block->cur_act_index + action_size > + block->afa->max_acts_per_set) { + struct mlxsw_afa_set *set; + + /* The appended action won't fit into the current action set, + * so create a new set. + */ + set = mlxsw_afa_set_create(false); + if (!set) + return NULL; + set->prev = block->cur_set; + block->cur_act_index = 0; + block->cur_set->next = set; + block->cur_set = set; + } + + actions = block->cur_set->ht_key.enc_actions; + oneact = actions + block->cur_act_index * MLXSW_AFA_ONE_ACTION_LEN; + block->cur_act_index += action_size; + mlxsw_afa_all_action_type_set(oneact, action_code); + return oneact + MLXSW_AFA_PAYLOAD_OFFSET; +} + +/* Trap / Discard Action + * --------------------- + * The Trap / Discard action enables trapping / mirroring packets to the CPU + * as well as discarding packets. + * The ACL Trap / Discard separates the forward/discard control from CPU + * trap control. In addition, the Trap / Discard action enables activating + * SPAN (port mirroring). + */ + +#define MLXSW_AFA_TRAPDISC_CODE 0x03 +#define MLXSW_AFA_TRAPDISC_SIZE 1 + +enum mlxsw_afa_trapdisc_forward_action { + MLXSW_AFA_TRAPDISC_FORWARD_ACTION_DISCARD = 3, +}; + +/* afa_trapdisc_forward_action + * Forward Action. + */ +MLXSW_ITEM32(afa, trapdisc, forward_action, 0x00, 0, 4); + +static inline void +mlxsw_afa_trapdisc_pack(char *payload, + enum mlxsw_afa_trapdisc_forward_action forward_action) +{ + mlxsw_afa_trapdisc_forward_action_set(payload, forward_action); +} + +int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block) +{ + char *act = mlxsw_afa_block_append_action(block, + MLXSW_AFA_TRAPDISC_CODE, + MLXSW_AFA_TRAPDISC_SIZE); + + if (!act) + return -ENOBUFS; + mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_FORWARD_ACTION_DISCARD); + return 0; +} +EXPORT_SYMBOL(mlxsw_afa_block_append_drop); + +/* Forwarding Action + * ----------------- + * Forwarding Action can be used to implement Policy Based Switching (PBS) + * as well as OpenFlow related "Output" action. + */ + +#define MLXSW_AFA_FORWARD_CODE 0x07 +#define MLXSW_AFA_FORWARD_SIZE 1 + +enum mlxsw_afa_forward_type { + /* PBS, Policy Based Switching */ + MLXSW_AFA_FORWARD_TYPE_PBS, + /* Output, OpenFlow output type */ + MLXSW_AFA_FORWARD_TYPE_OUTPUT, +}; + +/* afa_forward_type */ +MLXSW_ITEM32(afa, forward, type, 0x00, 24, 2); + +/* afa_forward_pbs_ptr + * A pointer to the PBS entry configured by PPBS register. + * Reserved when in_port is set. + */ +MLXSW_ITEM32(afa, forward, pbs_ptr, 0x08, 0, 24); + +/* afa_forward_in_port + * Packet is forwarded back to the ingress port. + */ +MLXSW_ITEM32(afa, forward, in_port, 0x0C, 0, 1); + +static inline void +mlxsw_afa_forward_pack(char *payload, enum mlxsw_afa_forward_type type, + u32 pbs_ptr, bool in_port) +{ + mlxsw_afa_forward_type_set(payload, type); + mlxsw_afa_forward_pbs_ptr_set(payload, pbs_ptr); + mlxsw_afa_forward_in_port_set(payload, in_port); +} + +int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block, + u8 local_port, bool in_port) +{ + struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref; + u32 kvdl_index; + char *act; + int err; + + if (in_port) + return -EOPNOTSUPP; + fwd_entry_ref = mlxsw_afa_fwd_entry_ref_create(block, local_port); + if (IS_ERR(fwd_entry_ref)) + return PTR_ERR(fwd_entry_ref); + kvdl_index = fwd_entry_ref->fwd_entry->kvdl_index; + + act = mlxsw_afa_block_append_action(block, MLXSW_AFA_FORWARD_CODE, + MLXSW_AFA_FORWARD_SIZE); + if (!act) { + err = -ENOBUFS; + goto err_append_action; + } + mlxsw_afa_forward_pack(act, MLXSW_AFA_FORWARD_TYPE_PBS, + kvdl_index, in_port); + return 0; + +err_append_action: + mlxsw_afa_fwd_entry_ref_destroy(block, fwd_entry_ref); + return err; +} +EXPORT_SYMBOL(mlxsw_afa_block_append_fwd); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h new file mode 100644 index 000000000000..43f78dcfe394 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h @@ -0,0 +1,66 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h + * Copyright (c) 2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MLXSW_CORE_ACL_FLEX_ACTIONS_H +#define _MLXSW_CORE_ACL_FLEX_ACTIONS_H + +#include <linux/types.h> + +struct mlxsw_afa; +struct mlxsw_afa_block; + +struct mlxsw_afa_ops { + int (*kvdl_set_add)(void *priv, u32 *p_kvdl_index, + char *enc_actions, bool is_first); + void (*kvdl_set_del)(void *priv, u32 kvdl_index, bool is_first); + int (*kvdl_fwd_entry_add)(void *priv, u32 *p_kvdl_index, u8 local_port); + void (*kvdl_fwd_entry_del)(void *priv, u32 kvdl_index); +}; + +struct mlxsw_afa *mlxsw_afa_create(unsigned int max_acts_per_set, + const struct mlxsw_afa_ops *ops, + void *ops_priv); +void mlxsw_afa_destroy(struct mlxsw_afa *mlxsw_afa); +struct mlxsw_afa_block *mlxsw_afa_block_create(struct mlxsw_afa *mlxsw_afa); +void mlxsw_afa_block_destroy(struct mlxsw_afa_block *block); +int mlxsw_afa_block_commit(struct mlxsw_afa_block *block); +char *mlxsw_afa_block_first_set(struct mlxsw_afa_block *block); +u32 mlxsw_afa_block_first_set_kvdl_index(struct mlxsw_afa_block *block); +void mlxsw_afa_block_continue(struct mlxsw_afa_block *block); +void mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id); +int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block); +int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block, + u8 local_port, bool in_port); + +#endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c new file mode 100644 index 000000000000..b32a00972e83 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c @@ -0,0 +1,475 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c + * Copyright (c) 2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/list.h> +#include <linux/errno.h> + +#include "item.h" +#include "core_acl_flex_keys.h" + +struct mlxsw_afk { + struct list_head key_info_list; + unsigned int max_blocks; + const struct mlxsw_afk_block *blocks; + unsigned int blocks_count; +}; + +static bool mlxsw_afk_blocks_check(struct mlxsw_afk *mlxsw_afk) +{ + int i; + int j; + + for (i = 0; i < mlxsw_afk->blocks_count; i++) { + const struct mlxsw_afk_block *block = &mlxsw_afk->blocks[i]; + + for (j = 0; j < block->instances_count; j++) { + struct mlxsw_afk_element_inst *elinst; + + elinst = &block->instances[j]; + if (elinst->type != elinst->info->type || + elinst->item.size.bits != + elinst->info->item.size.bits) + return false; + } + } + return true; +} + +struct mlxsw_afk *mlxsw_afk_create(unsigned int max_blocks, + const struct mlxsw_afk_block *blocks, + unsigned int blocks_count) +{ + struct mlxsw_afk *mlxsw_afk; + + mlxsw_afk = kzalloc(sizeof(*mlxsw_afk), GFP_KERNEL); + if (!mlxsw_afk) + return NULL; + INIT_LIST_HEAD(&mlxsw_afk->key_info_list); + mlxsw_afk->max_blocks = max_blocks; + mlxsw_afk->blocks = blocks; + mlxsw_afk->blocks_count = blocks_count; + WARN_ON(!mlxsw_afk_blocks_check(mlxsw_afk)); + return mlxsw_afk; +} +EXPORT_SYMBOL(mlxsw_afk_create); + +void mlxsw_afk_destroy(struct mlxsw_afk *mlxsw_afk) +{ + WARN_ON(!list_empty(&mlxsw_afk->key_info_list)); + kfree(mlxsw_afk); +} +EXPORT_SYMBOL(mlxsw_afk_destroy); + +struct mlxsw_afk_key_info { + struct list_head list; + unsigned int ref_count; + unsigned int blocks_count; + int element_to_block[MLXSW_AFK_ELEMENT_MAX]; /* index is element, value + * is index inside "blocks" + */ + struct mlxsw_afk_element_usage elusage; + const struct mlxsw_afk_block *blocks[0]; +}; + +static bool +mlxsw_afk_key_info_elements_eq(struct mlxsw_afk_key_info *key_info, + struct mlxsw_afk_element_usage *elusage) +{ + return memcmp(&key_info->elusage, elusage, sizeof(*elusage)) == 0; +} + +static struct mlxsw_afk_key_info * +mlxsw_afk_key_info_find(struct mlxsw_afk *mlxsw_afk, + struct mlxsw_afk_element_usage *elusage) +{ + struct mlxsw_afk_key_info *key_info; + + list_for_each_entry(key_info, &mlxsw_afk->key_info_list, list) { + if (mlxsw_afk_key_info_elements_eq(key_info, elusage)) + return key_info; + } + return NULL; +} + +struct mlxsw_afk_picker { + struct { + DECLARE_BITMAP(element, MLXSW_AFK_ELEMENT_MAX); + unsigned int total; + } hits[0]; +}; + +static void mlxsw_afk_picker_count_hits(struct mlxsw_afk *mlxsw_afk, + struct mlxsw_afk_picker *picker, + enum mlxsw_afk_element element) +{ + int i; + int j; + + for (i = 0; i < mlxsw_afk->blocks_count; i++) { + const struct mlxsw_afk_block *block = &mlxsw_afk->blocks[i]; + + for (j = 0; j < block->instances_count; j++) { + struct mlxsw_afk_element_inst *elinst; + + elinst = &block->instances[j]; + if (elinst->info->element == element) { + __set_bit(element, picker->hits[i].element); + picker->hits[i].total++; + } + } + } +} + +static void mlxsw_afk_picker_subtract_hits(struct mlxsw_afk *mlxsw_afk, + struct mlxsw_afk_picker *picker, + int block_index) +{ + DECLARE_BITMAP(hits_element, MLXSW_AFK_ELEMENT_MAX); + int i; + int j; + + memcpy(&hits_element, &picker->hits[block_index].element, + sizeof(hits_element)); + + for (i = 0; i < mlxsw_afk->blocks_count; i++) { + for_each_set_bit(j, hits_element, MLXSW_AFK_ELEMENT_MAX) { + if (__test_and_clear_bit(j, picker->hits[i].element)) + picker->hits[i].total--; + } + } +} + +static int mlxsw_afk_picker_most_hits_get(struct mlxsw_afk *mlxsw_afk, + struct mlxsw_afk_picker *picker) +{ + int most_index = -EINVAL; /* Should never happen to return this */ + int most_hits = 0; + int i; + + for (i = 0; i < mlxsw_afk->blocks_count; i++) { + if (picker->hits[i].total > most_hits) { + most_hits = picker->hits[i].total; + most_index = i; + } + } + return most_index; +} + +static int mlxsw_afk_picker_key_info_add(struct mlxsw_afk *mlxsw_afk, + struct mlxsw_afk_picker *picker, + int block_index, + struct mlxsw_afk_key_info *key_info) +{ + enum mlxsw_afk_element element; + + if (key_info->blocks_count == mlxsw_afk->max_blocks) + return -EINVAL; + + for_each_set_bit(element, picker->hits[block_index].element, + MLXSW_AFK_ELEMENT_MAX) { + key_info->element_to_block[element] = key_info->blocks_count; + mlxsw_afk_element_usage_add(&key_info->elusage, element); + } + + key_info->blocks[key_info->blocks_count] = + &mlxsw_afk->blocks[block_index]; + key_info->blocks_count++; + return 0; +} + +static int mlxsw_afk_picker(struct mlxsw_afk *mlxsw_afk, + struct mlxsw_afk_key_info *key_info, + struct mlxsw_afk_element_usage *elusage) +{ + struct mlxsw_afk_picker *picker; + enum mlxsw_afk_element element; + size_t alloc_size; + int err; + + alloc_size = sizeof(picker->hits[0]) * mlxsw_afk->blocks_count; + picker = kzalloc(alloc_size, GFP_KERNEL); + if (!picker) + return -ENOMEM; + + /* Since the same elements could be present in multiple blocks, + * we must find out optimal block list in order to make the + * block count as low as possible. + * + * First, we count hits. We go over all available blocks and count + * how many of requested elements are covered by each. + * + * Then in loop, we find block with most hits and add it to + * output key_info. Then we have to subtract this block hits so + * the next iteration will find most suitable block for + * the rest of requested elements. + */ + + mlxsw_afk_element_usage_for_each(element, elusage) + mlxsw_afk_picker_count_hits(mlxsw_afk, picker, element); + + do { + int block_index; + + block_index = mlxsw_afk_picker_most_hits_get(mlxsw_afk, picker); + if (block_index < 0) { + err = block_index; + goto out; + } + err = mlxsw_afk_picker_key_info_add(mlxsw_afk, picker, + block_index, key_info); + if (err) + goto out; + mlxsw_afk_picker_subtract_hits(mlxsw_afk, picker, block_index); + } while (!mlxsw_afk_key_info_elements_eq(key_info, elusage)); + + err = 0; +out: + kfree(picker); + return err; +} + +static struct mlxsw_afk_key_info * +mlxsw_afk_key_info_create(struct mlxsw_afk *mlxsw_afk, + struct mlxsw_afk_element_usage *elusage) +{ + struct mlxsw_afk_key_info *key_info; + size_t alloc_size; + int err; + + alloc_size = sizeof(*key_info) + + sizeof(key_info->blocks[0]) * mlxsw_afk->max_blocks; + key_info = kzalloc(alloc_size, GFP_KERNEL); + if (!key_info) + return ERR_PTR(-ENOMEM); + err = mlxsw_afk_picker(mlxsw_afk, key_info, elusage); + if (err) + goto err_picker; + list_add(&key_info->list, &mlxsw_afk->key_info_list); + key_info->ref_count = 1; + return key_info; + +err_picker: + kfree(key_info); + return ERR_PTR(err); +} + +static void mlxsw_afk_key_info_destroy(struct mlxsw_afk_key_info *key_info) +{ + list_del(&key_info->list); + kfree(key_info); +} + +struct mlxsw_afk_key_info * +mlxsw_afk_key_info_get(struct mlxsw_afk *mlxsw_afk, + struct mlxsw_afk_element_usage *elusage) +{ + struct mlxsw_afk_key_info *key_info; + + key_info = mlxsw_afk_key_info_find(mlxsw_afk, elusage); + if (key_info) { + key_info->ref_count++; + return key_info; + } + return mlxsw_afk_key_info_create(mlxsw_afk, elusage); +} +EXPORT_SYMBOL(mlxsw_afk_key_info_get); + +void mlxsw_afk_key_info_put(struct mlxsw_afk_key_info *key_info) +{ + if (--key_info->ref_count) + return; + mlxsw_afk_key_info_destroy(key_info); +} +EXPORT_SYMBOL(mlxsw_afk_key_info_put); + +bool mlxsw_afk_key_info_subset(struct mlxsw_afk_key_info *key_info, + struct mlxsw_afk_element_usage *elusage) +{ + return mlxsw_afk_element_usage_subset(elusage, &key_info->elusage); +} +EXPORT_SYMBOL(mlxsw_afk_key_info_subset); + +static const struct mlxsw_afk_element_inst * +mlxsw_afk_block_elinst_get(const struct mlxsw_afk_block *block, + enum mlxsw_afk_element element) +{ + int i; + + for (i = 0; i < block->instances_count; i++) { + struct mlxsw_afk_element_inst *elinst; + + elinst = &block->instances[i]; + if (elinst->info->element == element) + return elinst; + } + return NULL; +} + +static const struct mlxsw_afk_element_inst * +mlxsw_afk_key_info_elinst_get(struct mlxsw_afk_key_info *key_info, + enum mlxsw_afk_element element, + int *p_block_index) +{ + const struct mlxsw_afk_element_inst *elinst; + const struct mlxsw_afk_block *block; + int block_index; + + if (WARN_ON(!test_bit(element, key_info->elusage.usage))) + return NULL; + block_index = key_info->element_to_block[element]; + block = key_info->blocks[block_index]; + + elinst = mlxsw_afk_block_elinst_get(block, element); + if (WARN_ON(!elinst)) + return NULL; + + *p_block_index = block_index; + return elinst; +} + +u16 +mlxsw_afk_key_info_block_encoding_get(const struct mlxsw_afk_key_info *key_info, + int block_index) +{ + return key_info->blocks[block_index]->encoding; +} +EXPORT_SYMBOL(mlxsw_afk_key_info_block_encoding_get); + +unsigned int +mlxsw_afk_key_info_blocks_count_get(const struct mlxsw_afk_key_info *key_info) +{ + return key_info->blocks_count; +} +EXPORT_SYMBOL(mlxsw_afk_key_info_blocks_count_get); + +void mlxsw_afk_values_add_u32(struct mlxsw_afk_element_values *values, + enum mlxsw_afk_element element, + u32 key_value, u32 mask_value) +{ + const struct mlxsw_afk_element_info *elinfo = + &mlxsw_afk_element_infos[element]; + const struct mlxsw_item *storage_item = &elinfo->item; + + if (!mask_value) + return; + if (WARN_ON(elinfo->type != MLXSW_AFK_ELEMENT_TYPE_U32)) + return; + __mlxsw_item_set32(values->storage.key, storage_item, 0, key_value); + __mlxsw_item_set32(values->storage.mask, storage_item, 0, mask_value); + mlxsw_afk_element_usage_add(&values->elusage, element); +} +EXPORT_SYMBOL(mlxsw_afk_values_add_u32); + +void mlxsw_afk_values_add_buf(struct mlxsw_afk_element_values *values, + enum mlxsw_afk_element element, + const char *key_value, const char *mask_value, + unsigned int len) +{ + const struct mlxsw_afk_element_info *elinfo = + &mlxsw_afk_element_infos[element]; + const struct mlxsw_item *storage_item = &elinfo->item; + + if (!memchr_inv(mask_value, 0, len)) /* If mask is zero */ + return; + if (WARN_ON(elinfo->type != MLXSW_AFK_ELEMENT_TYPE_BUF) || + WARN_ON(elinfo->item.size.bytes != len)) + return; + __mlxsw_item_memcpy_to(values->storage.key, key_value, + storage_item, 0); + __mlxsw_item_memcpy_to(values->storage.mask, mask_value, + storage_item, 0); + mlxsw_afk_element_usage_add(&values->elusage, element); +} +EXPORT_SYMBOL(mlxsw_afk_values_add_buf); + +static void mlxsw_afk_encode_u32(const struct mlxsw_item *storage_item, + const struct mlxsw_item *output_item, + char *storage, char *output_indexed) +{ + u32 value; + + value = __mlxsw_item_get32(storage, storage_item, 0); + __mlxsw_item_set32(output_indexed, output_item, 0, value); +} + +static void mlxsw_afk_encode_buf(const struct mlxsw_item *storage_item, + const struct mlxsw_item *output_item, + char *storage, char *output_indexed) +{ + char *storage_data = __mlxsw_item_data(storage, storage_item, 0); + char *output_data = __mlxsw_item_data(output_indexed, output_item, 0); + size_t len = output_item->size.bytes; + + memcpy(output_data, storage_data, len); +} + +#define MLXSW_AFK_KEY_BLOCK_SIZE 16 + +static void mlxsw_afk_encode_one(const struct mlxsw_afk_element_inst *elinst, + int block_index, char *storage, char *output) +{ + char *output_indexed = output + block_index * MLXSW_AFK_KEY_BLOCK_SIZE; + const struct mlxsw_item *storage_item = &elinst->info->item; + const struct mlxsw_item *output_item = &elinst->item; + + if (elinst->type == MLXSW_AFK_ELEMENT_TYPE_U32) + mlxsw_afk_encode_u32(storage_item, output_item, + storage, output_indexed); + else if (elinst->type == MLXSW_AFK_ELEMENT_TYPE_BUF) + mlxsw_afk_encode_buf(storage_item, output_item, + storage, output_indexed); +} + +void mlxsw_afk_encode(struct mlxsw_afk_key_info *key_info, + struct mlxsw_afk_element_values *values, + char *key, char *mask) +{ + const struct mlxsw_afk_element_inst *elinst; + enum mlxsw_afk_element element; + int block_index; + + mlxsw_afk_element_usage_for_each(element, &values->elusage) { + elinst = mlxsw_afk_key_info_elinst_get(key_info, element, + &block_index); + if (!elinst) + continue; + mlxsw_afk_encode_one(elinst, block_index, + values->storage.key, key); + mlxsw_afk_encode_one(elinst, block_index, + values->storage.mask, mask); + } +} +EXPORT_SYMBOL(mlxsw_afk_encode); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h new file mode 100644 index 000000000000..e4fcba7c2af2 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h @@ -0,0 +1,238 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h + * Copyright (c) 2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MLXSW_CORE_ACL_FLEX_KEYS_H +#define _MLXSW_CORE_ACL_FLEX_KEYS_H + +#include <linux/types.h> +#include <linux/bitmap.h> + +#include "item.h" + +enum mlxsw_afk_element { + MLXSW_AFK_ELEMENT_SRC_SYS_PORT, + MLXSW_AFK_ELEMENT_DMAC, + MLXSW_AFK_ELEMENT_SMAC, + MLXSW_AFK_ELEMENT_ETHERTYPE, + MLXSW_AFK_ELEMENT_IP_PROTO, + MLXSW_AFK_ELEMENT_SRC_IP4, + MLXSW_AFK_ELEMENT_DST_IP4, + MLXSW_AFK_ELEMENT_SRC_IP6_HI, + MLXSW_AFK_ELEMENT_SRC_IP6_LO, + MLXSW_AFK_ELEMENT_DST_IP6_HI, + MLXSW_AFK_ELEMENT_DST_IP6_LO, + MLXSW_AFK_ELEMENT_DST_L4_PORT, + MLXSW_AFK_ELEMENT_SRC_L4_PORT, + MLXSW_AFK_ELEMENT_MAX, +}; + +enum mlxsw_afk_element_type { + MLXSW_AFK_ELEMENT_TYPE_U32, + MLXSW_AFK_ELEMENT_TYPE_BUF, +}; + +struct mlxsw_afk_element_info { + enum mlxsw_afk_element element; /* element ID */ + enum mlxsw_afk_element_type type; + struct mlxsw_item item; /* element geometry in internal storage */ +}; + +#define MLXSW_AFK_ELEMENT_INFO(_type, _element, _offset, _shift, _size) \ + [MLXSW_AFK_ELEMENT_##_element] = { \ + .element = MLXSW_AFK_ELEMENT_##_element, \ + .type = _type, \ + .item = { \ + .offset = _offset, \ + .shift = _shift, \ + .size = {.bits = _size}, \ + .name = #_element, \ + }, \ + } + +#define MLXSW_AFK_ELEMENT_INFO_U32(_element, _offset, _shift, _size) \ + MLXSW_AFK_ELEMENT_INFO(MLXSW_AFK_ELEMENT_TYPE_U32, \ + _element, _offset, _shift, _size) + +#define MLXSW_AFK_ELEMENT_INFO_BUF(_element, _offset, _size) \ + MLXSW_AFK_ELEMENT_INFO(MLXSW_AFK_ELEMENT_TYPE_BUF, \ + _element, _offset, 0, _size) + +/* For the purpose of the driver, define a internal storage scratchpad + * that will be used to store key/mask values. For each defined element type + * define an internal storage geometry. + */ +static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = { + MLXSW_AFK_ELEMENT_INFO_U32(SRC_SYS_PORT, 0x00, 16, 16), + MLXSW_AFK_ELEMENT_INFO_BUF(DMAC, 0x04, 6), + MLXSW_AFK_ELEMENT_INFO_BUF(SMAC, 0x0A, 6), + MLXSW_AFK_ELEMENT_INFO_U32(ETHERTYPE, 0x00, 0, 16), + MLXSW_AFK_ELEMENT_INFO_U32(IP_PROTO, 0x10, 0, 8), + MLXSW_AFK_ELEMENT_INFO_U32(SRC_IP4, 0x18, 0, 32), + MLXSW_AFK_ELEMENT_INFO_U32(DST_IP4, 0x1C, 0, 32), + MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_HI, 0x18, 8), + MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_LO, 0x20, 8), + MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_HI, 0x28, 8), + MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_LO, 0x30, 8), + MLXSW_AFK_ELEMENT_INFO_U32(DST_L4_PORT, 0x14, 0, 16), + MLXSW_AFK_ELEMENT_INFO_U32(SRC_L4_PORT, 0x14, 16, 16), +}; + +#define MLXSW_AFK_ELEMENT_STORAGE_SIZE 0x38 + +struct mlxsw_afk_element_inst { /* element instance in actual block */ + const struct mlxsw_afk_element_info *info; + enum mlxsw_afk_element_type type; + struct mlxsw_item item; /* element geometry in block */ +}; + +#define MLXSW_AFK_ELEMENT_INST(_type, _element, _offset, _shift, _size) \ + { \ + .info = &mlxsw_afk_element_infos[MLXSW_AFK_ELEMENT_##_element], \ + .type = _type, \ + .item = { \ + .offset = _offset, \ + .shift = _shift, \ + .size = {.bits = _size}, \ + .name = #_element, \ + }, \ + } + +#define MLXSW_AFK_ELEMENT_INST_U32(_element, _offset, _shift, _size) \ + MLXSW_AFK_ELEMENT_INST(MLXSW_AFK_ELEMENT_TYPE_U32, \ + _element, _offset, _shift, _size) + +#define MLXSW_AFK_ELEMENT_INST_BUF(_element, _offset, _size) \ + MLXSW_AFK_ELEMENT_INST(MLXSW_AFK_ELEMENT_TYPE_BUF, \ + _element, _offset, 0, _size) + +struct mlxsw_afk_block { + u16 encoding; /* block ID */ + struct mlxsw_afk_element_inst *instances; + unsigned int instances_count; +}; + +#define MLXSW_AFK_BLOCK(_encoding, _instances) \ + { \ + .encoding = _encoding, \ + .instances = _instances, \ + .instances_count = ARRAY_SIZE(_instances), \ + } + +struct mlxsw_afk_element_usage { + DECLARE_BITMAP(usage, MLXSW_AFK_ELEMENT_MAX); +}; + +#define mlxsw_afk_element_usage_for_each(element, elusage) \ + for_each_set_bit(element, (elusage)->usage, MLXSW_AFK_ELEMENT_MAX) + +static inline void +mlxsw_afk_element_usage_add(struct mlxsw_afk_element_usage *elusage, + enum mlxsw_afk_element element) +{ + __set_bit(element, elusage->usage); +} + +static inline void +mlxsw_afk_element_usage_zero(struct mlxsw_afk_element_usage *elusage) +{ + bitmap_zero(elusage->usage, MLXSW_AFK_ELEMENT_MAX); +} + +static inline void +mlxsw_afk_element_usage_fill(struct mlxsw_afk_element_usage *elusage, + const enum mlxsw_afk_element *elements, + unsigned int elements_count) +{ + int i; + + mlxsw_afk_element_usage_zero(elusage); + for (i = 0; i < elements_count; i++) + mlxsw_afk_element_usage_add(elusage, elements[i]); +} + +static inline bool +mlxsw_afk_element_usage_subset(struct mlxsw_afk_element_usage *elusage_small, + struct mlxsw_afk_element_usage *elusage_big) +{ + int i; + + for (i = 0; i < MLXSW_AFK_ELEMENT_MAX; i++) + if (test_bit(i, elusage_small->usage) && + !test_bit(i, elusage_big->usage)) + return false; + return true; +} + +struct mlxsw_afk; + +struct mlxsw_afk *mlxsw_afk_create(unsigned int max_blocks, + const struct mlxsw_afk_block *blocks, + unsigned int blocks_count); +void mlxsw_afk_destroy(struct mlxsw_afk *mlxsw_afk); + +struct mlxsw_afk_key_info; + +struct mlxsw_afk_key_info * +mlxsw_afk_key_info_get(struct mlxsw_afk *mlxsw_afk, + struct mlxsw_afk_element_usage *elusage); +void mlxsw_afk_key_info_put(struct mlxsw_afk_key_info *key_info); +bool mlxsw_afk_key_info_subset(struct mlxsw_afk_key_info *key_info, + struct mlxsw_afk_element_usage *elusage); + +u16 +mlxsw_afk_key_info_block_encoding_get(const struct mlxsw_afk_key_info *key_info, + int block_index); +unsigned int +mlxsw_afk_key_info_blocks_count_get(const struct mlxsw_afk_key_info *key_info); + +struct mlxsw_afk_element_values { + struct mlxsw_afk_element_usage elusage; + struct { + char key[MLXSW_AFK_ELEMENT_STORAGE_SIZE]; + char mask[MLXSW_AFK_ELEMENT_STORAGE_SIZE]; + } storage; +}; + +void mlxsw_afk_values_add_u32(struct mlxsw_afk_element_values *values, + enum mlxsw_afk_element element, + u32 key_value, u32 mask_value); +void mlxsw_afk_values_add_buf(struct mlxsw_afk_element_values *values, + enum mlxsw_afk_element element, + const char *key_value, const char *mask_value, + unsigned int len); +void mlxsw_afk_encode(struct mlxsw_afk_key_info *key_info, + struct mlxsw_afk_element_values *values, + char *key, char *mask); + +#endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.c b/drivers/net/ethernet/mellanox/mlxsw/i2c.c index e50c8db2602a..12c3a4449120 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/i2c.c +++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.c @@ -338,7 +338,7 @@ mlxsw_i2c_write(struct device *dev, size_t in_mbox_size, u8 *in_mbox, int num, return -EIO; } - return err > 0 ? 0 : err; + return 0; } /* Routine executes I2C command. */ diff --git a/drivers/net/ethernet/mellanox/mlxsw/item.h b/drivers/net/ethernet/mellanox/mlxsw/item.h index 3c95e3ddd9c2..28427f0758c7 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/item.h +++ b/drivers/net/ethernet/mellanox/mlxsw/item.h @@ -1,7 +1,7 @@ /* * drivers/net/ethernet/mellanox/mlxsw/item.h - * Copyright (c) 2015 Mellanox Technologies. All rights reserved. - * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com> + * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com> * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com> * * Redistribution and use in source and binary forms, with or without @@ -72,6 +72,40 @@ __mlxsw_item_offset(const struct mlxsw_item *item, unsigned short index, typesize); } +static inline u8 __mlxsw_item_get8(const char *buf, + const struct mlxsw_item *item, + unsigned short index) +{ + unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u8)); + u8 *b = (u8 *) buf; + u8 tmp; + + tmp = b[offset]; + tmp >>= item->shift; + tmp &= GENMASK(item->size.bits - 1, 0); + if (item->no_real_shift) + tmp <<= item->shift; + return tmp; +} + +static inline void __mlxsw_item_set8(char *buf, const struct mlxsw_item *item, + unsigned short index, u8 val) +{ + unsigned int offset = __mlxsw_item_offset(item, index, + sizeof(u8)); + u8 *b = (u8 *) buf; + u8 mask = GENMASK(item->size.bits - 1, 0) << item->shift; + u8 tmp; + + if (!item->no_real_shift) + val <<= item->shift; + val &= mask; + tmp = b[offset]; + tmp &= ~mask; + tmp |= val; + b[offset] = tmp; +} + static inline u16 __mlxsw_item_get16(const char *buf, const struct mlxsw_item *item, unsigned short index) @@ -191,6 +225,14 @@ static inline void __mlxsw_item_memcpy_to(char *buf, const char *src, memcpy(&buf[offset], src, item->size.bytes); } +static inline char *__mlxsw_item_data(char *buf, const struct mlxsw_item *item, + unsigned short index) +{ + unsigned int offset = __mlxsw_item_offset(item, index, sizeof(char)); + + return &buf[offset]; +} + static inline u16 __mlxsw_item_bit_array_offset(const struct mlxsw_item *item, u16 index, u8 *shift) @@ -253,6 +295,47 @@ static inline void __mlxsw_item_bit_array_set(char *buf, * _iname: item name within the container */ +#define MLXSW_ITEM8(_type, _cname, _iname, _offset, _shift, _sizebits) \ +static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ + .offset = _offset, \ + .shift = _shift, \ + .size = {.bits = _sizebits,}, \ + .name = #_type "_" #_cname "_" #_iname, \ +}; \ +static inline u8 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \ +{ \ + return __mlxsw_item_get8(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \ +} \ +static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u8 val)\ +{ \ + __mlxsw_item_set8(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \ +} + +#define MLXSW_ITEM8_INDEXED(_type, _cname, _iname, _offset, _shift, _sizebits, \ + _step, _instepoffset, _norealshift) \ +static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ + .offset = _offset, \ + .step = _step, \ + .in_step_offset = _instepoffset, \ + .shift = _shift, \ + .no_real_shift = _norealshift, \ + .size = {.bits = _sizebits,}, \ + .name = #_type "_" #_cname "_" #_iname, \ +}; \ +static inline u8 \ +mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, unsigned short index)\ +{ \ + return __mlxsw_item_get8(buf, &__ITEM_NAME(_type, _cname, _iname), \ + index); \ +} \ +static inline void \ +mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \ + u8 val) \ +{ \ + __mlxsw_item_set8(buf, &__ITEM_NAME(_type, _cname, _iname), \ + index, val); \ +} + #define MLXSW_ITEM16(_type, _cname, _iname, _offset, _shift, _sizebits) \ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ .offset = _offset, \ @@ -393,6 +476,11 @@ mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, const char *src) \ { \ __mlxsw_item_memcpy_to(buf, src, \ &__ITEM_NAME(_type, _cname, _iname), 0); \ +} \ +static inline char * \ +mlxsw_##_type##_##_cname##_##_iname##_data(char *buf) \ +{ \ + return __mlxsw_item_data(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \ } #define MLXSW_ITEM_BUF_INDEXED(_type, _cname, _iname, _offset, _sizebytes, \ @@ -419,6 +507,12 @@ mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, \ { \ __mlxsw_item_memcpy_to(buf, src, \ &__ITEM_NAME(_type, _cname, _iname), index); \ +} \ +static inline char * \ +mlxsw_##_type##_##_cname##_##_iname##_data(char *buf, unsigned short index) \ +{ \ + return __mlxsw_item_data(buf, \ + &__ITEM_NAME(_type, _cname, _iname), index); \ } #define MLXSW_ITEM_BIT_ARRAY(_type, _cname, _iname, _offset, _sizebytes, \ diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 1357fe04391b..0899e2d310e2 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -1,9 +1,9 @@ /* * drivers/net/ethernet/mellanox/mlxsw/reg.h - * Copyright (c) 2015 Mellanox Technologies. All rights reserved. + * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved. * Copyright (c) 2015-2016 Ido Schimmel <idosch@mellanox.com> * Copyright (c) 2015 Elad Raz <eladr@mellanox.com> - * Copyright (c) 2015-2016 Jiri Pirko <jiri@mellanox.com> + * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com> * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com> * * Redistribution and use in source and binary forms, with or without @@ -1757,6 +1757,505 @@ static inline void mlxsw_reg_spvmlr_pack(char *payload, u8 local_port, } } +/* PPBT - Policy-Engine Port Binding Table + * --------------------------------------- + * This register is used for configuration of the Port Binding Table. + */ +#define MLXSW_REG_PPBT_ID 0x3002 +#define MLXSW_REG_PPBT_LEN 0x14 + +MLXSW_REG_DEFINE(ppbt, MLXSW_REG_PPBT_ID, MLXSW_REG_PPBT_LEN); + +enum mlxsw_reg_pxbt_e { + MLXSW_REG_PXBT_E_IACL, + MLXSW_REG_PXBT_E_EACL, +}; + +/* reg_ppbt_e + * Access: Index + */ +MLXSW_ITEM32(reg, ppbt, e, 0x00, 31, 1); + +enum mlxsw_reg_pxbt_op { + MLXSW_REG_PXBT_OP_BIND, + MLXSW_REG_PXBT_OP_UNBIND, +}; + +/* reg_ppbt_op + * Access: RW + */ +MLXSW_ITEM32(reg, ppbt, op, 0x00, 28, 3); + +/* reg_ppbt_local_port + * Local port. Not including CPU port. + * Access: Index + */ +MLXSW_ITEM32(reg, ppbt, local_port, 0x00, 16, 8); + +/* reg_ppbt_g + * group - When set, the binding is of an ACL group. When cleared, + * the binding is of an ACL. + * Must be set to 1 for Spectrum. + * Access: RW + */ +MLXSW_ITEM32(reg, ppbt, g, 0x10, 31, 1); + +/* reg_ppbt_acl_info + * ACL/ACL group identifier. If the g bit is set, this field should hold + * the acl_group_id, else it should hold the acl_id. + * Access: RW + */ +MLXSW_ITEM32(reg, ppbt, acl_info, 0x10, 0, 16); + +static inline void mlxsw_reg_ppbt_pack(char *payload, enum mlxsw_reg_pxbt_e e, + enum mlxsw_reg_pxbt_op op, + u8 local_port, u16 acl_info) +{ + MLXSW_REG_ZERO(ppbt, payload); + mlxsw_reg_ppbt_e_set(payload, e); + mlxsw_reg_ppbt_op_set(payload, op); + mlxsw_reg_ppbt_local_port_set(payload, local_port); + mlxsw_reg_ppbt_g_set(payload, true); + mlxsw_reg_ppbt_acl_info_set(payload, acl_info); +} + +/* PACL - Policy-Engine ACL Register + * --------------------------------- + * This register is used for configuration of the ACL. + */ +#define MLXSW_REG_PACL_ID 0x3004 +#define MLXSW_REG_PACL_LEN 0x70 + +MLXSW_REG_DEFINE(pacl, MLXSW_REG_PACL_ID, MLXSW_REG_PACL_LEN); + +/* reg_pacl_v + * Valid. Setting the v bit makes the ACL valid. It should not be cleared + * while the ACL is bounded to either a port, VLAN or ACL rule. + * Access: RW + */ +MLXSW_ITEM32(reg, pacl, v, 0x00, 24, 1); + +/* reg_pacl_acl_id + * An identifier representing the ACL (managed by software) + * Range 0 .. cap_max_acl_regions - 1 + * Access: Index + */ +MLXSW_ITEM32(reg, pacl, acl_id, 0x08, 0, 16); + +#define MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN 16 + +/* reg_pacl_tcam_region_info + * Opaque object that represents a TCAM region. + * Obtained through PTAR register. + * Access: RW + */ +MLXSW_ITEM_BUF(reg, pacl, tcam_region_info, 0x30, + MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN); + +static inline void mlxsw_reg_pacl_pack(char *payload, u16 acl_id, + bool valid, const char *tcam_region_info) +{ + MLXSW_REG_ZERO(pacl, payload); + mlxsw_reg_pacl_acl_id_set(payload, acl_id); + mlxsw_reg_pacl_v_set(payload, valid); + mlxsw_reg_pacl_tcam_region_info_memcpy_to(payload, tcam_region_info); +} + +/* PAGT - Policy-Engine ACL Group Table + * ------------------------------------ + * This register is used for configuration of the ACL Group Table. + */ +#define MLXSW_REG_PAGT_ID 0x3005 +#define MLXSW_REG_PAGT_BASE_LEN 0x30 +#define MLXSW_REG_PAGT_ACL_LEN 4 +#define MLXSW_REG_PAGT_ACL_MAX_NUM 16 +#define MLXSW_REG_PAGT_LEN (MLXSW_REG_PAGT_BASE_LEN + \ + MLXSW_REG_PAGT_ACL_MAX_NUM * MLXSW_REG_PAGT_ACL_LEN) + +MLXSW_REG_DEFINE(pagt, MLXSW_REG_PAGT_ID, MLXSW_REG_PAGT_LEN); + +/* reg_pagt_size + * Number of ACLs in the group. + * Size 0 invalidates a group. + * Range 0 .. cap_max_acl_group_size (hard coded to 16 for now) + * Total number of ACLs in all groups must be lower or equal + * to cap_max_acl_tot_groups + * Note: a group which is binded must not be invalidated + * Access: Index + */ +MLXSW_ITEM32(reg, pagt, size, 0x00, 0, 8); + +/* reg_pagt_acl_group_id + * An identifier (numbered from 0..cap_max_acl_groups-1) representing + * the ACL Group identifier (managed by software). + * Access: Index + */ +MLXSW_ITEM32(reg, pagt, acl_group_id, 0x08, 0, 16); + +/* reg_pagt_acl_id + * ACL identifier + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, pagt, acl_id, 0x30, 0, 16, 0x04, 0x00, false); + +static inline void mlxsw_reg_pagt_pack(char *payload, u16 acl_group_id) +{ + MLXSW_REG_ZERO(pagt, payload); + mlxsw_reg_pagt_acl_group_id_set(payload, acl_group_id); +} + +static inline void mlxsw_reg_pagt_acl_id_pack(char *payload, int index, + u16 acl_id) +{ + u8 size = mlxsw_reg_pagt_size_get(payload); + + if (index >= size) + mlxsw_reg_pagt_size_set(payload, index + 1); + mlxsw_reg_pagt_acl_id_set(payload, index, acl_id); +} + +/* PTAR - Policy-Engine TCAM Allocation Register + * --------------------------------------------- + * This register is used for allocation of regions in the TCAM. + * Note: Query method is not supported on this register. + */ +#define MLXSW_REG_PTAR_ID 0x3006 +#define MLXSW_REG_PTAR_BASE_LEN 0x20 +#define MLXSW_REG_PTAR_KEY_ID_LEN 1 +#define MLXSW_REG_PTAR_KEY_ID_MAX_NUM 16 +#define MLXSW_REG_PTAR_LEN (MLXSW_REG_PTAR_BASE_LEN + \ + MLXSW_REG_PTAR_KEY_ID_MAX_NUM * MLXSW_REG_PTAR_KEY_ID_LEN) + +MLXSW_REG_DEFINE(ptar, MLXSW_REG_PTAR_ID, MLXSW_REG_PTAR_LEN); + +enum mlxsw_reg_ptar_op { + /* allocate a TCAM region */ + MLXSW_REG_PTAR_OP_ALLOC, + /* resize a TCAM region */ + MLXSW_REG_PTAR_OP_RESIZE, + /* deallocate TCAM region */ + MLXSW_REG_PTAR_OP_FREE, + /* test allocation */ + MLXSW_REG_PTAR_OP_TEST, +}; + +/* reg_ptar_op + * Access: OP + */ +MLXSW_ITEM32(reg, ptar, op, 0x00, 28, 4); + +/* reg_ptar_action_set_type + * Type of action set to be used on this region. + * For Spectrum, this is always type 2 - "flexible" + * Access: WO + */ +MLXSW_ITEM32(reg, ptar, action_set_type, 0x00, 16, 8); + +/* reg_ptar_key_type + * TCAM key type for the region. + * For Spectrum, this is always type 0x50 - "FLEX_KEY" + * Access: WO + */ +MLXSW_ITEM32(reg, ptar, key_type, 0x00, 0, 8); + +/* reg_ptar_region_size + * TCAM region size. When allocating/resizing this is the requested size, + * the response is the actual size. Note that actual size may be + * larger than requested. + * Allowed range 1 .. cap_max_rules-1 + * Reserved during op deallocate. + * Access: WO + */ +MLXSW_ITEM32(reg, ptar, region_size, 0x04, 0, 16); + +/* reg_ptar_region_id + * Region identifier + * Range 0 .. cap_max_regions-1 + * Access: Index + */ +MLXSW_ITEM32(reg, ptar, region_id, 0x08, 0, 16); + +/* reg_ptar_tcam_region_info + * Opaque object that represents the TCAM region. + * Returned when allocating a region. + * Provided by software for ACL generation and region deallocation and resize. + * Access: RW + */ +MLXSW_ITEM_BUF(reg, ptar, tcam_region_info, 0x10, + MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN); + +/* reg_ptar_flexible_key_id + * Identifier of the Flexible Key. + * Only valid if key_type == "FLEX_KEY" + * The key size will be rounded up to one of the following values: + * 9B, 18B, 36B, 54B. + * This field is reserved for in resize operation. + * Access: WO + */ +MLXSW_ITEM8_INDEXED(reg, ptar, flexible_key_id, 0x20, 0, 8, + MLXSW_REG_PTAR_KEY_ID_LEN, 0x00, false); + +static inline void mlxsw_reg_ptar_pack(char *payload, enum mlxsw_reg_ptar_op op, + u16 region_size, u16 region_id, + const char *tcam_region_info) +{ + MLXSW_REG_ZERO(ptar, payload); + mlxsw_reg_ptar_op_set(payload, op); + mlxsw_reg_ptar_action_set_type_set(payload, 2); /* "flexible" */ + mlxsw_reg_ptar_key_type_set(payload, 0x50); /* "FLEX_KEY" */ + mlxsw_reg_ptar_region_size_set(payload, region_size); + mlxsw_reg_ptar_region_id_set(payload, region_id); + mlxsw_reg_ptar_tcam_region_info_memcpy_to(payload, tcam_region_info); +} + +static inline void mlxsw_reg_ptar_key_id_pack(char *payload, int index, + u16 key_id) +{ + mlxsw_reg_ptar_flexible_key_id_set(payload, index, key_id); +} + +static inline void mlxsw_reg_ptar_unpack(char *payload, char *tcam_region_info) +{ + mlxsw_reg_ptar_tcam_region_info_memcpy_from(payload, tcam_region_info); +} + +/* PPBS - Policy-Engine Policy Based Switching Register + * ---------------------------------------------------- + * This register retrieves and sets Policy Based Switching Table entries. + */ +#define MLXSW_REG_PPBS_ID 0x300C +#define MLXSW_REG_PPBS_LEN 0x14 + +MLXSW_REG_DEFINE(ppbs, MLXSW_REG_PPBS_ID, MLXSW_REG_PPBS_LEN); + +/* reg_ppbs_pbs_ptr + * Index into the PBS table. + * For Spectrum, the index points to the KVD Linear. + * Access: Index + */ +MLXSW_ITEM32(reg, ppbs, pbs_ptr, 0x08, 0, 24); + +/* reg_ppbs_system_port + * Unique port identifier for the final destination of the packet. + * Access: RW + */ +MLXSW_ITEM32(reg, ppbs, system_port, 0x10, 0, 16); + +static inline void mlxsw_reg_ppbs_pack(char *payload, u32 pbs_ptr, + u16 system_port) +{ + MLXSW_REG_ZERO(ppbs, payload); + mlxsw_reg_ppbs_pbs_ptr_set(payload, pbs_ptr); + mlxsw_reg_ppbs_system_port_set(payload, system_port); +} + +/* PRCR - Policy-Engine Rules Copy Register + * ---------------------------------------- + * This register is used for accessing rules within a TCAM region. + */ +#define MLXSW_REG_PRCR_ID 0x300D +#define MLXSW_REG_PRCR_LEN 0x40 + +MLXSW_REG_DEFINE(prcr, MLXSW_REG_PRCR_ID, MLXSW_REG_PRCR_LEN); + +enum mlxsw_reg_prcr_op { + /* Move rules. Moves the rules from "tcam_region_info" starting + * at offset "offset" to "dest_tcam_region_info" + * at offset "dest_offset." + */ + MLXSW_REG_PRCR_OP_MOVE, + /* Copy rules. Copies the rules from "tcam_region_info" starting + * at offset "offset" to "dest_tcam_region_info" + * at offset "dest_offset." + */ + MLXSW_REG_PRCR_OP_COPY, +}; + +/* reg_prcr_op + * Access: OP + */ +MLXSW_ITEM32(reg, prcr, op, 0x00, 28, 4); + +/* reg_prcr_offset + * Offset within the source region to copy/move from. + * Access: Index + */ +MLXSW_ITEM32(reg, prcr, offset, 0x00, 0, 16); + +/* reg_prcr_size + * The number of rules to copy/move. + * Access: WO + */ +MLXSW_ITEM32(reg, prcr, size, 0x04, 0, 16); + +/* reg_prcr_tcam_region_info + * Opaque object that represents the source TCAM region. + * Access: Index + */ +MLXSW_ITEM_BUF(reg, prcr, tcam_region_info, 0x10, + MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN); + +/* reg_prcr_dest_offset + * Offset within the source region to copy/move to. + * Access: Index + */ +MLXSW_ITEM32(reg, prcr, dest_offset, 0x20, 0, 16); + +/* reg_prcr_dest_tcam_region_info + * Opaque object that represents the destination TCAM region. + * Access: Index + */ +MLXSW_ITEM_BUF(reg, prcr, dest_tcam_region_info, 0x30, + MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN); + +static inline void mlxsw_reg_prcr_pack(char *payload, enum mlxsw_reg_prcr_op op, + const char *src_tcam_region_info, + u16 src_offset, + const char *dest_tcam_region_info, + u16 dest_offset, u16 size) +{ + MLXSW_REG_ZERO(prcr, payload); + mlxsw_reg_prcr_op_set(payload, op); + mlxsw_reg_prcr_offset_set(payload, src_offset); + mlxsw_reg_prcr_size_set(payload, size); + mlxsw_reg_prcr_tcam_region_info_memcpy_to(payload, + src_tcam_region_info); + mlxsw_reg_prcr_dest_offset_set(payload, dest_offset); + mlxsw_reg_prcr_dest_tcam_region_info_memcpy_to(payload, + dest_tcam_region_info); +} + +/* PEFA - Policy-Engine Extended Flexible Action Register + * ------------------------------------------------------ + * This register is used for accessing an extended flexible action entry + * in the central KVD Linear Database. + */ +#define MLXSW_REG_PEFA_ID 0x300F +#define MLXSW_REG_PEFA_LEN 0xB0 + +MLXSW_REG_DEFINE(pefa, MLXSW_REG_PEFA_ID, MLXSW_REG_PEFA_LEN); + +/* reg_pefa_index + * Index in the KVD Linear Centralized Database. + * Access: Index + */ +MLXSW_ITEM32(reg, pefa, index, 0x00, 0, 24); + +#define MLXSW_REG_PXXX_FLEX_ACTION_SET_LEN 0xA8 + +/* reg_pefa_flex_action_set + * Action-set to perform when rule is matched. + * Must be zero padded if action set is shorter. + * Access: RW + */ +MLXSW_ITEM_BUF(reg, pefa, flex_action_set, 0x08, + MLXSW_REG_PXXX_FLEX_ACTION_SET_LEN); + +static inline void mlxsw_reg_pefa_pack(char *payload, u32 index, + const char *flex_action_set) +{ + MLXSW_REG_ZERO(pefa, payload); + mlxsw_reg_pefa_index_set(payload, index); + mlxsw_reg_pefa_flex_action_set_memcpy_to(payload, flex_action_set); +} + +/* PTCE-V2 - Policy-Engine TCAM Entry Register Version 2 + * ----------------------------------------------------- + * This register is used for accessing rules within a TCAM region. + * It is a new version of PTCE in order to support wider key, + * mask and action within a TCAM region. This register is not supported + * by SwitchX and SwitchX-2. + */ +#define MLXSW_REG_PTCE2_ID 0x3017 +#define MLXSW_REG_PTCE2_LEN 0x1D8 + +MLXSW_REG_DEFINE(ptce2, MLXSW_REG_PTCE2_ID, MLXSW_REG_PTCE2_LEN); + +/* reg_ptce2_v + * Valid. + * Access: RW + */ +MLXSW_ITEM32(reg, ptce2, v, 0x00, 31, 1); + +/* reg_ptce2_a + * Activity. Set if a packet lookup has hit on the specific entry. + * To clear the "a" bit, use "clear activity" op or "clear on read" op. + * Access: RO + */ +MLXSW_ITEM32(reg, ptce2, a, 0x00, 30, 1); + +enum mlxsw_reg_ptce2_op { + /* Read operation. */ + MLXSW_REG_PTCE2_OP_QUERY_READ = 0, + /* clear on read operation. Used to read entry + * and clear Activity bit. + */ + MLXSW_REG_PTCE2_OP_QUERY_CLEAR_ON_READ = 1, + /* Write operation. Used to write a new entry to the table. + * All R/W fields are relevant for new entry. Activity bit is set + * for new entries - Note write with v = 0 will delete the entry. + */ + MLXSW_REG_PTCE2_OP_WRITE_WRITE = 0, + /* Update action. Only action set will be updated. */ + MLXSW_REG_PTCE2_OP_WRITE_UPDATE = 1, + /* Clear activity. A bit is cleared for the entry. */ + MLXSW_REG_PTCE2_OP_WRITE_CLEAR_ACTIVITY = 2, +}; + +/* reg_ptce2_op + * Access: OP + */ +MLXSW_ITEM32(reg, ptce2, op, 0x00, 20, 3); + +/* reg_ptce2_offset + * Access: Index + */ +MLXSW_ITEM32(reg, ptce2, offset, 0x00, 0, 16); + +/* reg_ptce2_tcam_region_info + * Opaque object that represents the TCAM region. + * Access: Index + */ +MLXSW_ITEM_BUF(reg, ptce2, tcam_region_info, 0x10, + MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN); + +#define MLXSW_REG_PTCE2_FLEX_KEY_BLOCKS_LEN 96 + +/* reg_ptce2_flex_key_blocks + * ACL Key. + * Access: RW + */ +MLXSW_ITEM_BUF(reg, ptce2, flex_key_blocks, 0x20, + MLXSW_REG_PTCE2_FLEX_KEY_BLOCKS_LEN); + +/* reg_ptce2_mask + * mask- in the same size as key. A bit that is set directs the TCAM + * to compare the corresponding bit in key. A bit that is clear directs + * the TCAM to ignore the corresponding bit in key. + * Access: RW + */ +MLXSW_ITEM_BUF(reg, ptce2, mask, 0x80, + MLXSW_REG_PTCE2_FLEX_KEY_BLOCKS_LEN); + +/* reg_ptce2_flex_action_set + * ACL action set. + * Access: RW + */ +MLXSW_ITEM_BUF(reg, ptce2, flex_action_set, 0xE0, + MLXSW_REG_PXXX_FLEX_ACTION_SET_LEN); + +static inline void mlxsw_reg_ptce2_pack(char *payload, bool valid, + enum mlxsw_reg_ptce2_op op, + const char *tcam_region_info, + u16 offset) +{ + MLXSW_REG_ZERO(ptce2, payload); + mlxsw_reg_ptce2_v_set(payload, valid); + mlxsw_reg_ptce2_op_set(payload, op); + mlxsw_reg_ptce2_offset_set(payload, offset); + mlxsw_reg_ptce2_tcam_region_info_memcpy_to(payload, tcam_region_info); +} + /* QPCR - QoS Policer Configuration Register * ----------------------------------------- * The QPCR register is used to create policers - that limit @@ -3154,7 +3653,7 @@ static inline void mlxsw_reg_pspa_pack(char *payload, u8 swid, u8 local_port) * Configures the properties for forwarding to CPU. */ #define MLXSW_REG_HTGT_ID 0x7002 -#define MLXSW_REG_HTGT_LEN 0x100 +#define MLXSW_REG_HTGT_LEN 0x20 MLXSW_REG_DEFINE(htgt, MLXSW_REG_HTGT_ID, MLXSW_REG_HTGT_LEN); @@ -4965,6 +5464,46 @@ static inline void mlxsw_reg_mlcr_pack(char *payload, u8 local_port, MLXSW_REG_MLCR_DURATION_MAX : 0); } +/* MPSC - Monitoring Packet Sampling Configuration Register + * -------------------------------------------------------- + * MPSC Register is used to configure the Packet Sampling mechanism. + */ +#define MLXSW_REG_MPSC_ID 0x9080 +#define MLXSW_REG_MPSC_LEN 0x1C + +MLXSW_REG_DEFINE(mpsc, MLXSW_REG_MPSC_ID, MLXSW_REG_MPSC_LEN); + +/* reg_mpsc_local_port + * Local port number + * Not supported for CPU port + * Access: Index + */ +MLXSW_ITEM32(reg, mpsc, local_port, 0x00, 16, 8); + +/* reg_mpsc_e + * Enable sampling on port local_port + * Access: RW + */ +MLXSW_ITEM32(reg, mpsc, e, 0x04, 30, 1); + +#define MLXSW_REG_MPSC_RATE_MAX 3500000000UL + +/* reg_mpsc_rate + * Sampling rate = 1 out of rate packets (with randomization around + * the point). Valid values are: 1 to MLXSW_REG_MPSC_RATE_MAX + * Access: RW + */ +MLXSW_ITEM32(reg, mpsc, rate, 0x08, 0, 32); + +static inline void mlxsw_reg_mpsc_pack(char *payload, u8 local_port, bool e, + u32 rate) +{ + MLXSW_REG_ZERO(mpsc, payload); + mlxsw_reg_mpsc_local_port_set(payload, local_port); + mlxsw_reg_mpsc_e_set(payload, e); + mlxsw_reg_mpsc_rate_set(payload, rate); +} + /* SBPR - Shared Buffer Pools Register * ----------------------------------- * The SBPR configures and retrieves the shared buffer pools and configuration. @@ -5394,6 +5933,14 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(svpe), MLXSW_REG(sfmr), MLXSW_REG(spvmlr), + MLXSW_REG(ppbt), + MLXSW_REG(pacl), + MLXSW_REG(pagt), + MLXSW_REG(ptar), + MLXSW_REG(ppbs), + MLXSW_REG(prcr), + MLXSW_REG(pefa), + MLXSW_REG(ptce2), MLXSW_REG(qpcr), MLXSW_REG(qtct), MLXSW_REG(qeec), @@ -5429,6 +5976,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(mpat), MLXSW_REG(mpar), MLXSW_REG(mlcr), + MLXSW_REG(mpsc), MLXSW_REG(sbpr), MLXSW_REG(sbcm), MLXSW_REG(sbpm), diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h index 3c2171dbdba4..bce8c2e00630 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/resources.h +++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h @@ -1,7 +1,7 @@ /* * drivers/net/ethernet/mellanox/mlxsw/resources.h - * Copyright (c) 2016 Mellanox Technologies. All rights reserved. - * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com> + * Copyright (c) 2016-2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2016-2017 Jiri Pirko <jiri@mellanox.com> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -48,6 +48,14 @@ enum mlxsw_res_id { MLXSW_RES_ID_MAX_LAG, MLXSW_RES_ID_MAX_LAG_MEMBERS, MLXSW_RES_ID_MAX_BUFFER_SIZE, + MLXSW_RES_ID_ACL_MAX_TCAM_REGIONS, + MLXSW_RES_ID_ACL_MAX_TCAM_RULES, + MLXSW_RES_ID_ACL_MAX_REGIONS, + MLXSW_RES_ID_ACL_MAX_GROUPS, + MLXSW_RES_ID_ACL_MAX_GROUP_SIZE, + MLXSW_RES_ID_ACL_FLEX_KEYS, + MLXSW_RES_ID_ACL_MAX_ACTION_PER_RULE, + MLXSW_RES_ID_ACL_ACTIONS_PER_SET, MLXSW_RES_ID_MAX_CPU_POLICERS, MLXSW_RES_ID_MAX_VRS, MLXSW_RES_ID_MAX_RIFS, @@ -72,6 +80,14 @@ static u16 mlxsw_res_ids[] = { [MLXSW_RES_ID_MAX_LAG] = 0x2520, [MLXSW_RES_ID_MAX_LAG_MEMBERS] = 0x2521, [MLXSW_RES_ID_MAX_BUFFER_SIZE] = 0x2802, /* Bytes */ + [MLXSW_RES_ID_ACL_MAX_TCAM_REGIONS] = 0x2901, + [MLXSW_RES_ID_ACL_MAX_TCAM_RULES] = 0x2902, + [MLXSW_RES_ID_ACL_MAX_REGIONS] = 0x2903, + [MLXSW_RES_ID_ACL_MAX_GROUPS] = 0x2904, + [MLXSW_RES_ID_ACL_MAX_GROUP_SIZE] = 0x2905, + [MLXSW_RES_ID_ACL_FLEX_KEYS] = 0x2910, + [MLXSW_RES_ID_ACL_MAX_ACTION_PER_RULE] = 0x2911, + [MLXSW_RES_ID_ACL_ACTIONS_PER_SET] = 0x2912, [MLXSW_RES_ID_MAX_CPU_POLICERS] = 0x2A13, [MLXSW_RES_ID_MAX_VRS] = 0x2C01, [MLXSW_RES_ID_MAX_RIFS] = 0x2C02, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 003093abb170..16484f24b7db 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -1,7 +1,7 @@ /* * drivers/net/ethernet/mellanox/mlxsw/spectrum.c - * Copyright (c) 2015 Mellanox Technologies. All rights reserved. - * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com> + * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com> * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com> * Copyright (c) 2015 Elad Raz <eladr@mellanox.com> * @@ -57,6 +57,7 @@ #include <net/pkt_cls.h> #include <net/tc_act/tc_mirred.h> #include <net/netevent.h> +#include <net/tc_act/tc_sample.h> #include "spectrum.h" #include "pci.h" @@ -137,8 +138,6 @@ MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16); */ MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); -static bool mlxsw_sp_port_dev_check(const struct net_device *dev); - static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, const struct mlxsw_tx_info *tx_info) { @@ -469,6 +468,16 @@ static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port *from, mlxsw_sp_span_inspected_port_unbind(from, span_entry, type); } +static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port, + bool enable, u32 rate) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char mpsc_pl[MLXSW_REG_MPSC_LEN]; + + mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl); +} + static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, bool is_up) { @@ -948,15 +957,13 @@ out: /* Return the stats from a cache that is updated periodically, * as this function might get called in an atomic context. */ -static struct rtnl_link_stats64 * +static void mlxsw_sp_port_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); memcpy(stats, mlxsw_sp_port->hw_stats.cache, sizeof(*stats)); - - return stats; } int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, @@ -1164,8 +1171,8 @@ static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name, } static struct mlxsw_sp_port_mall_tc_entry * -mlxsw_sp_port_mirror_entry_find(struct mlxsw_sp_port *port, - unsigned long cookie) { +mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port, + unsigned long cookie) { struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list) @@ -1177,17 +1184,15 @@ mlxsw_sp_port_mirror_entry_find(struct mlxsw_sp_port *port, static int mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, - struct tc_cls_matchall_offload *cls, + struct mlxsw_sp_port_mall_mirror_tc_entry *mirror, const struct tc_action *a, bool ingress) { - struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; struct net *net = dev_net(mlxsw_sp_port->dev); enum mlxsw_sp_span_type span_type; struct mlxsw_sp_port *to_port; struct net_device *to_dev; int ifindex; - int err; ifindex = tcf_mirred_ifindex(a); to_dev = __dev_get_by_index(net, ifindex); @@ -1198,90 +1203,149 @@ mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, if (!mlxsw_sp_port_dev_check(to_dev)) { netdev_err(mlxsw_sp_port->dev, "Cannot mirror to a non-spectrum port"); - return -ENOTSUPP; + return -EOPNOTSUPP; } to_port = netdev_priv(to_dev); - mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); - if (!mall_tc_entry) - return -ENOMEM; + mirror->to_local_port = to_port->local_port; + mirror->ingress = ingress; + span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; + return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type); +} - mall_tc_entry->cookie = cls->cookie; - mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR; - mall_tc_entry->mirror.to_local_port = to_port->local_port; - mall_tc_entry->mirror.ingress = ingress; - list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list); +static void +mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_port_mall_mirror_tc_entry *mirror) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + enum mlxsw_sp_span_type span_type; + struct mlxsw_sp_port *to_port; - span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; - err = mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type); + to_port = mlxsw_sp->ports[mirror->to_local_port]; + span_type = mirror->ingress ? + MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; + mlxsw_sp_span_mirror_remove(mlxsw_sp_port, to_port, span_type); +} + +static int +mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port, + struct tc_cls_matchall_offload *cls, + const struct tc_action *a, + bool ingress) +{ + int err; + + if (!mlxsw_sp_port->sample) + return -EOPNOTSUPP; + if (rtnl_dereference(mlxsw_sp_port->sample->psample_group)) { + netdev_err(mlxsw_sp_port->dev, "sample already active\n"); + return -EEXIST; + } + if (tcf_sample_rate(a) > MLXSW_REG_MPSC_RATE_MAX) { + netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n"); + return -EOPNOTSUPP; + } + + rcu_assign_pointer(mlxsw_sp_port->sample->psample_group, + tcf_sample_psample_group(a)); + mlxsw_sp_port->sample->truncate = tcf_sample_truncate(a); + mlxsw_sp_port->sample->trunc_size = tcf_sample_trunc_size(a); + mlxsw_sp_port->sample->rate = tcf_sample_rate(a); + + err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, tcf_sample_rate(a)); if (err) - goto err_mirror_add; + goto err_port_sample_set; return 0; -err_mirror_add: - list_del(&mall_tc_entry->list); - kfree(mall_tc_entry); +err_port_sample_set: + RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); return err; } +static void +mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port) +{ + if (!mlxsw_sp_port->sample) + return; + + mlxsw_sp_port_sample_set(mlxsw_sp_port, false, 1); + RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); +} + static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, __be16 protocol, struct tc_cls_matchall_offload *cls, bool ingress) { + struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; const struct tc_action *a; LIST_HEAD(actions); int err; if (!tc_single_action(cls->exts)) { netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n"); - return -ENOTSUPP; + return -EOPNOTSUPP; } - tcf_exts_to_list(cls->exts, &actions); - list_for_each_entry(a, &actions, list) { - if (!is_tcf_mirred_egress_mirror(a) || - protocol != htons(ETH_P_ALL)) { - return -ENOTSUPP; - } + mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); + if (!mall_tc_entry) + return -ENOMEM; + mall_tc_entry->cookie = cls->cookie; - err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port, cls, + tcf_exts_to_list(cls->exts, &actions); + a = list_first_entry(&actions, struct tc_action, list); + + if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) { + struct mlxsw_sp_port_mall_mirror_tc_entry *mirror; + + mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR; + mirror = &mall_tc_entry->mirror; + err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port, + mirror, a, ingress); + } else if (is_tcf_sample(a) && protocol == htons(ETH_P_ALL)) { + mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE; + err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, cls, a, ingress); - if (err) - return err; + } else { + err = -EOPNOTSUPP; } + if (err) + goto err_add_action; + + list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list); return 0; + +err_add_action: + kfree(mall_tc_entry); + return err; } static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, struct tc_cls_matchall_offload *cls) { - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; - enum mlxsw_sp_span_type span_type; - struct mlxsw_sp_port *to_port; - mall_tc_entry = mlxsw_sp_port_mirror_entry_find(mlxsw_sp_port, - cls->cookie); + mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port, + cls->cookie); if (!mall_tc_entry) { netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n"); return; } + list_del(&mall_tc_entry->list); switch (mall_tc_entry->type) { case MLXSW_SP_PORT_MALL_MIRROR: - to_port = mlxsw_sp->ports[mall_tc_entry->mirror.to_local_port]; - span_type = mall_tc_entry->mirror.ingress ? - MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; - - mlxsw_sp_span_mirror_remove(mlxsw_sp_port, to_port, span_type); + mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port, + &mall_tc_entry->mirror); + break; + case MLXSW_SP_PORT_MALL_SAMPLE: + mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port); break; default: WARN_ON(1); } - list_del(&mall_tc_entry->list); kfree(mall_tc_entry); } @@ -1291,7 +1355,8 @@ static int mlxsw_sp_setup_tc(struct net_device *dev, u32 handle, struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); bool ingress = TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS); - if (tc->type == TC_SETUP_MATCHALL) { + switch (tc->type) { + case TC_SETUP_MATCHALL: switch (tc->cls_mall->command) { case TC_CLSMATCHALL_REPLACE: return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, @@ -1305,9 +1370,21 @@ static int mlxsw_sp_setup_tc(struct net_device *dev, u32 handle, default: return -EINVAL; } + case TC_SETUP_CLSFLOWER: + switch (tc->cls_flower->command) { + case TC_CLSFLOWER_REPLACE: + return mlxsw_sp_flower_replace(mlxsw_sp_port, ingress, + proto, tc->cls_flower); + case TC_CLSFLOWER_DESTROY: + mlxsw_sp_flower_destroy(mlxsw_sp_port, ingress, + tc->cls_flower); + return 0; + default: + return -EOPNOTSUPP; + } } - return -ENOTSUPP; + return -EOPNOTSUPP; } static const struct net_device_ops mlxsw_sp_port_netdev_ops = { @@ -1323,8 +1400,6 @@ static const struct net_device_ops mlxsw_sp_port_netdev_ops = { .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats, .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, - .ndo_neigh_construct = mlxsw_sp_router_neigh_construct, - .ndo_neigh_destroy = mlxsw_sp_router_neigh_destroy, .ndo_fdb_add = switchdev_port_fdb_add, .ndo_fdb_del = switchdev_port_fdb_del, .ndo_fdb_dump = switchdev_port_fdb_dump, @@ -1650,7 +1725,7 @@ mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats, break; default: WARN_ON(1); - return -ENOTSUPP; + return -EOPNOTSUPP; } return 0; } @@ -2256,6 +2331,13 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, goto err_alloc_stats; } + mlxsw_sp_port->sample = kzalloc(sizeof(*mlxsw_sp_port->sample), + GFP_KERNEL); + if (!mlxsw_sp_port->sample) { + err = -ENOMEM; + goto err_alloc_sample; + } + mlxsw_sp_port->hw_stats.cache = kzalloc(sizeof(*mlxsw_sp_port->hw_stats.cache), GFP_KERNEL); @@ -2384,6 +2466,8 @@ err_dev_addr_init: err_port_swid_set: kfree(mlxsw_sp_port->hw_stats.cache); err_alloc_hw_stats: + kfree(mlxsw_sp_port->sample); +err_alloc_sample: free_percpu(mlxsw_sp_port->pcpu_stats); err_alloc_stats: kfree(mlxsw_sp_port->untagged_vlans); @@ -2429,8 +2513,9 @@ static void __mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) mlxsw_sp_port_dcb_fini(mlxsw_sp_port); mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port); - free_percpu(mlxsw_sp_port->pcpu_stats); kfree(mlxsw_sp_port->hw_stats.cache); + kfree(mlxsw_sp_port->sample); + free_percpu(mlxsw_sp_port->pcpu_stats); kfree(mlxsw_sp_port->untagged_vlans); kfree(mlxsw_sp_port->active_vlans); WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vports_list)); @@ -2731,6 +2816,41 @@ static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port, return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); } +static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port, + void *priv) +{ + struct mlxsw_sp *mlxsw_sp = priv; + struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; + struct psample_group *psample_group; + u32 size; + + if (unlikely(!mlxsw_sp_port)) { + dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n", + local_port); + goto out; + } + if (unlikely(!mlxsw_sp_port->sample)) { + dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received on unsupported port\n", + local_port); + goto out; + } + + size = mlxsw_sp_port->sample->truncate ? + mlxsw_sp_port->sample->trunc_size : skb->len; + + rcu_read_lock(); + psample_group = rcu_dereference(mlxsw_sp_port->sample->psample_group); + if (!psample_group) + goto out_unlock; + psample_sample_packet(psample_group, skb, size, + mlxsw_sp_port->dev->ifindex, 0, + mlxsw_sp_port->sample->rate); +out_unlock: + rcu_read_unlock(); +out: + consume_skb(skb); +} + #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \ _is_ctrl, SP_##_trap_group, DISCARD) @@ -2766,6 +2886,9 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = { MLXSW_SP_RXL_NO_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false), MLXSW_SP_RXL_NO_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, ARP_MISS, false), MLXSW_SP_RXL_NO_MARK(BGP_IPV4, TRAP_TO_CPU, BGP_IPV4, false), + /* PKT Sample trap */ + MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU, + false, SP_IP2ME, DISCARD) }; static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) @@ -2950,10 +3073,16 @@ static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core, else table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST; - if (type == MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST) + switch (type) { + case MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST: flood_table = MLXSW_SP_FLOOD_TABLE_UC; - else - flood_table = MLXSW_SP_FLOOD_TABLE_BM; + break; + case MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4: + flood_table = MLXSW_SP_FLOOD_TABLE_MC; + break; + default: + flood_table = MLXSW_SP_FLOOD_TABLE_BC; + } mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type, flood_table); @@ -3089,6 +3218,12 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, goto err_span_init; } + err = mlxsw_sp_acl_init(mlxsw_sp); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n"); + goto err_acl_init; + } + err = mlxsw_sp_ports_create(mlxsw_sp); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); @@ -3098,6 +3233,8 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, return 0; err_ports_create: + mlxsw_sp_acl_fini(mlxsw_sp); +err_acl_init: mlxsw_sp_span_fini(mlxsw_sp); err_span_init: mlxsw_sp_router_fini(mlxsw_sp); @@ -3118,6 +3255,7 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); mlxsw_sp_ports_remove(mlxsw_sp); + mlxsw_sp_acl_fini(mlxsw_sp); mlxsw_sp_span_fini(mlxsw_sp); mlxsw_sp_router_fini(mlxsw_sp); mlxsw_sp_switchdev_fini(mlxsw_sp); @@ -3138,9 +3276,9 @@ static struct mlxsw_config_profile mlxsw_sp_config_profile = { .used_flood_tables = 1, .used_flood_mode = 1, .flood_mode = 3, - .max_fid_offset_flood_tables = 2, + .max_fid_offset_flood_tables = 3, .fid_offset_flood_table_size = VLAN_N_VID - 1, - .max_fid_flood_tables = 2, + .max_fid_flood_tables = 3, .fid_flood_table_size = MLXSW_SP_VFID_MAX, .used_max_ib_mc = 1, .max_ib_mc = 0, @@ -3183,7 +3321,7 @@ static struct mlxsw_driver mlxsw_sp_driver = { .profile = &mlxsw_sp_config_profile, }; -static bool mlxsw_sp_port_dev_check(const struct net_device *dev) +bool mlxsw_sp_port_dev_check(const struct net_device *dev) { return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; } @@ -3341,6 +3479,8 @@ mlxsw_sp_rif_alloc(u16 rif, struct net_device *l3_dev, struct mlxsw_sp_fid *f) if (!r) return NULL; + INIT_LIST_HEAD(&r->nexthop_list); + INIT_LIST_HEAD(&r->neigh_list); ether_addr_copy(r->addr, l3_dev->dev_addr); r->mtu = l3_dev->mtu; r->ref_count = 1; @@ -3409,6 +3549,8 @@ static void mlxsw_sp_vport_rif_sp_destroy(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid = f->fid; u16 rif = r->rif; + mlxsw_sp_router_rif_gone_sync(mlxsw_sp, r); + mlxsw_sp->rifs[rif] = NULL; f->r = NULL; @@ -3553,7 +3695,7 @@ static int mlxsw_sp_router_port_flood_set(struct mlxsw_sp *mlxsw_sp, u16 fid, table_type = mlxsw_sp_flood_table_type_get(fid); index = mlxsw_sp_flood_table_index_get(fid); - mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, index, table_type, + mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BC, index, table_type, 1, MLXSW_PORT_ROUTER_PORT, set); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); @@ -3638,6 +3780,8 @@ void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f = r->f; u16 rif = r->rif; + mlxsw_sp_router_rif_gone_sync(mlxsw_sp, r); + mlxsw_sp->rifs[rif] = NULL; f->r = NULL; @@ -3927,6 +4071,9 @@ static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port, mlxsw_sp_port->learning = 1; mlxsw_sp_port->learning_sync = 1; mlxsw_sp_port->uc_flood = 1; + mlxsw_sp_port->mc_flood = 1; + mlxsw_sp_port->mc_router = 0; + mlxsw_sp_port->mc_disabled = 1; mlxsw_sp_port->bridged = 1; return 0; @@ -3943,6 +4090,8 @@ static void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port) mlxsw_sp_port->learning = 0; mlxsw_sp_port->learning_sync = 0; mlxsw_sp_port->uc_flood = 0; + mlxsw_sp_port->mc_flood = 0; + mlxsw_sp_port->mc_router = 0; mlxsw_sp_port->bridged = 0; /* Add implicit VLAN interface in the device, so that untagged @@ -4605,6 +4754,9 @@ static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport, mlxsw_sp_vport->learning = 1; mlxsw_sp_vport->learning_sync = 1; mlxsw_sp_vport->uc_flood = 1; + mlxsw_sp_vport->mc_flood = 1; + mlxsw_sp_vport->mc_router = 0; + mlxsw_sp_vport->mc_disabled = 1; mlxsw_sp_vport->bridged = 1; return 0; @@ -4625,6 +4777,8 @@ static void mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport) mlxsw_sp_vport->learning = 0; mlxsw_sp_vport->learning_sync = 0; mlxsw_sp_vport->uc_flood = 0; + mlxsw_sp_vport->mc_flood = 0; + mlxsw_sp_vport->mc_router = 0; mlxsw_sp_vport->bridged = 0; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index cc1af19d699a..13ec85e7c392 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -1,7 +1,7 @@ /* * drivers/net/ethernet/mellanox/mlxsw/spectrum.h - * Copyright (c) 2015 Mellanox Technologies. All rights reserved. - * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com> + * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com> * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com> * Copyright (c) 2015 Elad Raz <eladr@mellanox.com> * @@ -46,12 +46,16 @@ #include <linux/dcbnl.h> #include <linux/in6.h> #include <linux/notifier.h> +#include <net/psample.h> +#include <net/pkt_cls.h> #include "port.h" #include "core.h" +#include "core_acl_flex_keys.h" +#include "core_acl_flex_actions.h" #define MLXSW_SP_VFID_BASE VLAN_N_VID -#define MLXSW_SP_VFID_MAX 6656 /* Bridged VLAN interfaces */ +#define MLXSW_SP_VFID_MAX 1024 /* Bridged VLAN interfaces */ #define MLXSW_SP_RFID_BASE 15360 #define MLXSW_SP_INVALID_RIF 0xffff @@ -104,6 +108,8 @@ struct mlxsw_sp_fid { }; struct mlxsw_sp_rif { + struct list_head nexthop_list; + struct list_head neigh_list; struct net_device *dev; unsigned int ref_count; struct mlxsw_sp_fid *f; @@ -229,6 +235,7 @@ struct mlxsw_sp_span_entry { enum mlxsw_sp_port_mall_action_type { MLXSW_SP_PORT_MALL_MIRROR, + MLXSW_SP_PORT_MALL_SAMPLE, }; struct mlxsw_sp_port_mall_mirror_tc_entry { @@ -249,17 +256,20 @@ struct mlxsw_sp_router { struct mlxsw_sp_lpm_tree lpm_trees[MLXSW_SP_LPM_TREE_COUNT]; struct mlxsw_sp_vr *vrs; struct rhashtable neigh_ht; + struct rhashtable nexthop_group_ht; + struct rhashtable nexthop_ht; struct { struct delayed_work dw; unsigned long interval; /* ms */ } neighs_update; struct delayed_work nexthop_probe_dw; #define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */ - struct list_head nexthop_group_list; struct list_head nexthop_neighs_list; bool aborted; }; +struct mlxsw_sp_acl; + struct mlxsw_sp { struct { struct list_head list; @@ -289,6 +299,7 @@ struct mlxsw_sp { u8 port_to_module[MLXSW_PORT_MAX_PORTS]; struct mlxsw_sp_sb sb; struct mlxsw_sp_router router; + struct mlxsw_sp_acl *acl; struct { DECLARE_BITMAP(usage, MLXSW_SP_KVD_LINEAR_SIZE); } kvdl; @@ -315,15 +326,25 @@ struct mlxsw_sp_port_pcpu_stats { u32 tx_dropped; }; +struct mlxsw_sp_port_sample { + struct psample_group __rcu *psample_group; + u32 trunc_size; + u32 rate; + bool truncate; +}; + struct mlxsw_sp_port { struct net_device *dev; struct mlxsw_sp_port_pcpu_stats __percpu *pcpu_stats; struct mlxsw_sp *mlxsw_sp; u8 local_port; u8 stp_state; - u8 learning:1, + u16 learning:1, learning_sync:1, uc_flood:1, + mc_flood:1, + mc_router:1, + mc_disabled:1, bridged:1, lagged:1, split:1; @@ -361,8 +382,10 @@ struct mlxsw_sp_port { struct rtnl_link_stats64 *cache; struct delayed_work update_dw; } hw_stats; + struct mlxsw_sp_port_sample *sample; }; +bool mlxsw_sp_port_dev_check(const struct net_device *dev); struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev); void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port); @@ -489,7 +512,8 @@ mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp, enum mlxsw_sp_flood_table { MLXSW_SP_FLOOD_TABLE_UC, - MLXSW_SP_FLOOD_TABLE_BM, + MLXSW_SP_FLOOD_TABLE_BC, + MLXSW_SP_FLOOD_TABLE_MC, }; int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp); @@ -582,14 +606,107 @@ static inline void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port) int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp); void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp); -int mlxsw_sp_router_neigh_construct(struct net_device *dev, - struct neighbour *n); -void mlxsw_sp_router_neigh_destroy(struct net_device *dev, - struct neighbour *n); int mlxsw_sp_router_netevent_event(struct notifier_block *unused, unsigned long event, void *ptr); +void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_rif *r); int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count); void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index); +struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl); + +struct mlxsw_sp_acl_rule_info { + unsigned int priority; + struct mlxsw_afk_element_values values; + struct mlxsw_afa_block *act_block; +}; + +enum mlxsw_sp_acl_profile { + MLXSW_SP_ACL_PROFILE_FLOWER, +}; + +struct mlxsw_sp_acl_profile_ops { + size_t ruleset_priv_size; + int (*ruleset_add)(struct mlxsw_sp *mlxsw_sp, + void *priv, void *ruleset_priv); + void (*ruleset_del)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv); + int (*ruleset_bind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv, + struct net_device *dev, bool ingress); + void (*ruleset_unbind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv); + size_t rule_priv_size; + int (*rule_add)(struct mlxsw_sp *mlxsw_sp, + void *ruleset_priv, void *rule_priv, + struct mlxsw_sp_acl_rule_info *rulei); + void (*rule_del)(struct mlxsw_sp *mlxsw_sp, void *rule_priv); +}; + +struct mlxsw_sp_acl_ops { + size_t priv_size; + int (*init)(struct mlxsw_sp *mlxsw_sp, void *priv); + void (*fini)(struct mlxsw_sp *mlxsw_sp, void *priv); + const struct mlxsw_sp_acl_profile_ops * + (*profile_ops)(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_sp_acl_profile profile); +}; + +struct mlxsw_sp_acl_ruleset; + +struct mlxsw_sp_acl_ruleset * +mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp, + struct net_device *dev, bool ingress, + enum mlxsw_sp_acl_profile profile); +void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_ruleset *ruleset); + +struct mlxsw_sp_acl_rule_info * +mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl); +void mlxsw_sp_acl_rulei_destroy(struct mlxsw_sp_acl_rule_info *rulei); +int mlxsw_sp_acl_rulei_commit(struct mlxsw_sp_acl_rule_info *rulei); +void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei, + unsigned int priority); +void mlxsw_sp_acl_rulei_keymask_u32(struct mlxsw_sp_acl_rule_info *rulei, + enum mlxsw_afk_element element, + u32 key_value, u32 mask_value); +void mlxsw_sp_acl_rulei_keymask_buf(struct mlxsw_sp_acl_rule_info *rulei, + enum mlxsw_afk_element element, + const char *key_value, + const char *mask_value, unsigned int len); +void mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei); +void mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei, + u16 group_id); +int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei); +int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule_info *rulei, + struct net_device *out_dev); + +struct mlxsw_sp_acl_rule; + +struct mlxsw_sp_acl_rule * +mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_ruleset *ruleset, + unsigned long cookie); +void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule *rule); +int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule *rule); +void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule *rule); +struct mlxsw_sp_acl_rule * +mlxsw_sp_acl_rule_lookup(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_ruleset *ruleset, + unsigned long cookie); +struct mlxsw_sp_acl_rule_info * +mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule *rule); + +int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp); +void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp); + +extern const struct mlxsw_sp_acl_ops mlxsw_sp_acl_tcam_ops; + +int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, + __be16 protocol, struct tc_cls_flower_offload *f); +void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, + struct tc_cls_flower_offload *f); + #endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c new file mode 100644 index 000000000000..8a18b3aa70dc --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c @@ -0,0 +1,572 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c + * Copyright (c) 2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/errno.h> +#include <linux/list.h> +#include <linux/string.h> +#include <linux/rhashtable.h> +#include <linux/netdevice.h> + +#include "reg.h" +#include "core.h" +#include "resources.h" +#include "spectrum.h" +#include "core_acl_flex_keys.h" +#include "core_acl_flex_actions.h" +#include "spectrum_acl_flex_keys.h" + +struct mlxsw_sp_acl { + struct mlxsw_afk *afk; + struct mlxsw_afa *afa; + const struct mlxsw_sp_acl_ops *ops; + struct rhashtable ruleset_ht; + unsigned long priv[0]; + /* priv has to be always the last item */ +}; + +struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl) +{ + return acl->afk; +} + +struct mlxsw_sp_acl_ruleset_ht_key { + struct net_device *dev; /* dev this ruleset is bound to */ + bool ingress; + const struct mlxsw_sp_acl_profile_ops *ops; +}; + +struct mlxsw_sp_acl_ruleset { + struct rhash_head ht_node; /* Member of acl HT */ + struct mlxsw_sp_acl_ruleset_ht_key ht_key; + struct rhashtable rule_ht; + unsigned int ref_count; + unsigned long priv[0]; + /* priv has to be always the last item */ +}; + +struct mlxsw_sp_acl_rule { + struct rhash_head ht_node; /* Member of rule HT */ + unsigned long cookie; /* HT key */ + struct mlxsw_sp_acl_ruleset *ruleset; + struct mlxsw_sp_acl_rule_info *rulei; + unsigned long priv[0]; + /* priv has to be always the last item */ +}; + +static const struct rhashtable_params mlxsw_sp_acl_ruleset_ht_params = { + .key_len = sizeof(struct mlxsw_sp_acl_ruleset_ht_key), + .key_offset = offsetof(struct mlxsw_sp_acl_ruleset, ht_key), + .head_offset = offsetof(struct mlxsw_sp_acl_ruleset, ht_node), + .automatic_shrinking = true, +}; + +static const struct rhashtable_params mlxsw_sp_acl_rule_ht_params = { + .key_len = sizeof(unsigned long), + .key_offset = offsetof(struct mlxsw_sp_acl_rule, cookie), + .head_offset = offsetof(struct mlxsw_sp_acl_rule, ht_node), + .automatic_shrinking = true, +}; + +static struct mlxsw_sp_acl_ruleset * +mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_acl_profile_ops *ops) +{ + struct mlxsw_sp_acl *acl = mlxsw_sp->acl; + struct mlxsw_sp_acl_ruleset *ruleset; + size_t alloc_size; + int err; + + alloc_size = sizeof(*ruleset) + ops->ruleset_priv_size; + ruleset = kzalloc(alloc_size, GFP_KERNEL); + if (!ruleset) + return ERR_PTR(-ENOMEM); + ruleset->ref_count = 1; + ruleset->ht_key.ops = ops; + + err = rhashtable_init(&ruleset->rule_ht, &mlxsw_sp_acl_rule_ht_params); + if (err) + goto err_rhashtable_init; + + err = ops->ruleset_add(mlxsw_sp, acl->priv, ruleset->priv); + if (err) + goto err_ops_ruleset_add; + + return ruleset; + +err_ops_ruleset_add: + rhashtable_destroy(&ruleset->rule_ht); +err_rhashtable_init: + kfree(ruleset); + return ERR_PTR(err); +} + +static void mlxsw_sp_acl_ruleset_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_ruleset *ruleset) +{ + const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; + + ops->ruleset_del(mlxsw_sp, ruleset->priv); + rhashtable_destroy(&ruleset->rule_ht); + kfree(ruleset); +} + +static int mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_ruleset *ruleset, + struct net_device *dev, bool ingress) +{ + const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; + struct mlxsw_sp_acl *acl = mlxsw_sp->acl; + int err; + + ruleset->ht_key.dev = dev; + ruleset->ht_key.ingress = ingress; + err = rhashtable_insert_fast(&acl->ruleset_ht, &ruleset->ht_node, + mlxsw_sp_acl_ruleset_ht_params); + if (err) + return err; + err = ops->ruleset_bind(mlxsw_sp, ruleset->priv, dev, ingress); + if (err) + goto err_ops_ruleset_bind; + return 0; + +err_ops_ruleset_bind: + rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node, + mlxsw_sp_acl_ruleset_ht_params); + return err; +} + +static void mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_ruleset *ruleset) +{ + const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; + struct mlxsw_sp_acl *acl = mlxsw_sp->acl; + + ops->ruleset_unbind(mlxsw_sp, ruleset->priv); + rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node, + mlxsw_sp_acl_ruleset_ht_params); +} + +static void mlxsw_sp_acl_ruleset_ref_inc(struct mlxsw_sp_acl_ruleset *ruleset) +{ + ruleset->ref_count++; +} + +static void mlxsw_sp_acl_ruleset_ref_dec(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_ruleset *ruleset) +{ + if (--ruleset->ref_count) + return; + mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, ruleset); + mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset); +} + +struct mlxsw_sp_acl_ruleset * +mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp, + struct net_device *dev, bool ingress, + enum mlxsw_sp_acl_profile profile) +{ + const struct mlxsw_sp_acl_profile_ops *ops; + struct mlxsw_sp_acl *acl = mlxsw_sp->acl; + struct mlxsw_sp_acl_ruleset_ht_key ht_key; + struct mlxsw_sp_acl_ruleset *ruleset; + int err; + + ops = acl->ops->profile_ops(mlxsw_sp, profile); + if (!ops) + return ERR_PTR(-EINVAL); + + memset(&ht_key, 0, sizeof(ht_key)); + ht_key.dev = dev; + ht_key.ingress = ingress; + ht_key.ops = ops; + ruleset = rhashtable_lookup_fast(&acl->ruleset_ht, &ht_key, + mlxsw_sp_acl_ruleset_ht_params); + if (ruleset) { + mlxsw_sp_acl_ruleset_ref_inc(ruleset); + return ruleset; + } + ruleset = mlxsw_sp_acl_ruleset_create(mlxsw_sp, ops); + if (IS_ERR(ruleset)) + return ruleset; + err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, ruleset, dev, ingress); + if (err) + goto err_ruleset_bind; + return ruleset; + +err_ruleset_bind: + mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset); + return ERR_PTR(err); +} + +void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_ruleset *ruleset) +{ + mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset); +} + +struct mlxsw_sp_acl_rule_info * +mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl) +{ + struct mlxsw_sp_acl_rule_info *rulei; + int err; + + rulei = kzalloc(sizeof(*rulei), GFP_KERNEL); + if (!rulei) + return NULL; + rulei->act_block = mlxsw_afa_block_create(acl->afa); + if (IS_ERR(rulei->act_block)) { + err = PTR_ERR(rulei->act_block); + goto err_afa_block_create; + } + return rulei; + +err_afa_block_create: + kfree(rulei); + return ERR_PTR(err); +} + +void mlxsw_sp_acl_rulei_destroy(struct mlxsw_sp_acl_rule_info *rulei) +{ + mlxsw_afa_block_destroy(rulei->act_block); + kfree(rulei); +} + +int mlxsw_sp_acl_rulei_commit(struct mlxsw_sp_acl_rule_info *rulei) +{ + return mlxsw_afa_block_commit(rulei->act_block); +} + +void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei, + unsigned int priority) +{ + rulei->priority = priority; +} + +void mlxsw_sp_acl_rulei_keymask_u32(struct mlxsw_sp_acl_rule_info *rulei, + enum mlxsw_afk_element element, + u32 key_value, u32 mask_value) +{ + mlxsw_afk_values_add_u32(&rulei->values, element, + key_value, mask_value); +} + +void mlxsw_sp_acl_rulei_keymask_buf(struct mlxsw_sp_acl_rule_info *rulei, + enum mlxsw_afk_element element, + const char *key_value, + const char *mask_value, unsigned int len) +{ + mlxsw_afk_values_add_buf(&rulei->values, element, + key_value, mask_value, len); +} + +void mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei) +{ + mlxsw_afa_block_continue(rulei->act_block); +} + +void mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei, + u16 group_id) +{ + mlxsw_afa_block_jump(rulei->act_block, group_id); +} + +int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei) +{ + return mlxsw_afa_block_append_drop(rulei->act_block); +} + +int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule_info *rulei, + struct net_device *out_dev) +{ + struct mlxsw_sp_port *mlxsw_sp_port; + u8 local_port; + bool in_port; + + if (out_dev) { + if (!mlxsw_sp_port_dev_check(out_dev)) + return -EINVAL; + mlxsw_sp_port = netdev_priv(out_dev); + if (mlxsw_sp_port->mlxsw_sp != mlxsw_sp) + return -EINVAL; + local_port = mlxsw_sp_port->local_port; + in_port = false; + } else { + /* If out_dev is NULL, the called wants to + * set forward to ingress port. + */ + local_port = 0; + in_port = true; + } + return mlxsw_afa_block_append_fwd(rulei->act_block, + local_port, in_port); +} + +struct mlxsw_sp_acl_rule * +mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_ruleset *ruleset, + unsigned long cookie) +{ + const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; + struct mlxsw_sp_acl_rule *rule; + int err; + + mlxsw_sp_acl_ruleset_ref_inc(ruleset); + rule = kzalloc(sizeof(*rule) + ops->rule_priv_size, GFP_KERNEL); + if (!rule) { + err = -ENOMEM; + goto err_alloc; + } + rule->cookie = cookie; + rule->ruleset = ruleset; + + rule->rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl); + if (IS_ERR(rule->rulei)) { + err = PTR_ERR(rule->rulei); + goto err_rulei_create; + } + return rule; + +err_rulei_create: + kfree(rule); +err_alloc: + mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset); + return ERR_PTR(err); +} + +void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule *rule) +{ + struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset; + + mlxsw_sp_acl_rulei_destroy(rule->rulei); + kfree(rule); + mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset); +} + +int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule *rule) +{ + struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset; + const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; + int err; + + err = ops->rule_add(mlxsw_sp, ruleset->priv, rule->priv, rule->rulei); + if (err) + return err; + + err = rhashtable_insert_fast(&ruleset->rule_ht, &rule->ht_node, + mlxsw_sp_acl_rule_ht_params); + if (err) + goto err_rhashtable_insert; + + return 0; + +err_rhashtable_insert: + ops->rule_del(mlxsw_sp, rule->priv); + return err; +} + +void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule *rule) +{ + struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset; + const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; + + rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node, + mlxsw_sp_acl_rule_ht_params); + ops->rule_del(mlxsw_sp, rule->priv); +} + +struct mlxsw_sp_acl_rule * +mlxsw_sp_acl_rule_lookup(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_ruleset *ruleset, + unsigned long cookie) +{ + return rhashtable_lookup_fast(&ruleset->rule_ht, &cookie, + mlxsw_sp_acl_rule_ht_params); +} + +struct mlxsw_sp_acl_rule_info * +mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule *rule) +{ + return rule->rulei; +} + +#define MLXSW_SP_KDVL_ACT_EXT_SIZE 1 + +static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index, + char *enc_actions, bool is_first) +{ + struct mlxsw_sp *mlxsw_sp = priv; + char pefa_pl[MLXSW_REG_PEFA_LEN]; + u32 kvdl_index; + int ret; + int err; + + /* The first action set of a TCAM entry is stored directly in TCAM, + * not KVD linear area. + */ + if (is_first) + return 0; + + ret = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KDVL_ACT_EXT_SIZE); + if (ret < 0) + return ret; + kvdl_index = ret; + mlxsw_reg_pefa_pack(pefa_pl, kvdl_index, enc_actions); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pefa), pefa_pl); + if (err) + goto err_pefa_write; + *p_kvdl_index = kvdl_index; + return 0; + +err_pefa_write: + mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index); + return err; +} + +static void mlxsw_sp_act_kvdl_set_del(void *priv, u32 kvdl_index, + bool is_first) +{ + struct mlxsw_sp *mlxsw_sp = priv; + + if (is_first) + return; + mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index); +} + +static int mlxsw_sp_act_kvdl_fwd_entry_add(void *priv, u32 *p_kvdl_index, + u8 local_port) +{ + struct mlxsw_sp *mlxsw_sp = priv; + char ppbs_pl[MLXSW_REG_PPBS_LEN]; + u32 kvdl_index; + int ret; + int err; + + ret = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1); + if (ret < 0) + return ret; + kvdl_index = ret; + mlxsw_reg_ppbs_pack(ppbs_pl, kvdl_index, local_port); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbs), ppbs_pl); + if (err) + goto err_ppbs_write; + *p_kvdl_index = kvdl_index; + return 0; + +err_ppbs_write: + mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index); + return err; +} + +static void mlxsw_sp_act_kvdl_fwd_entry_del(void *priv, u32 kvdl_index) +{ + struct mlxsw_sp *mlxsw_sp = priv; + + mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index); +} + +static const struct mlxsw_afa_ops mlxsw_sp_act_afa_ops = { + .kvdl_set_add = mlxsw_sp_act_kvdl_set_add, + .kvdl_set_del = mlxsw_sp_act_kvdl_set_del, + .kvdl_fwd_entry_add = mlxsw_sp_act_kvdl_fwd_entry_add, + .kvdl_fwd_entry_del = mlxsw_sp_act_kvdl_fwd_entry_del, +}; + +int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp) +{ + const struct mlxsw_sp_acl_ops *acl_ops = &mlxsw_sp_acl_tcam_ops; + struct mlxsw_sp_acl *acl; + int err; + + acl = kzalloc(sizeof(*acl) + acl_ops->priv_size, GFP_KERNEL); + if (!acl) + return -ENOMEM; + mlxsw_sp->acl = acl; + + acl->afk = mlxsw_afk_create(MLXSW_CORE_RES_GET(mlxsw_sp->core, + ACL_FLEX_KEYS), + mlxsw_sp_afk_blocks, + MLXSW_SP_AFK_BLOCKS_COUNT); + if (!acl->afk) { + err = -ENOMEM; + goto err_afk_create; + } + + acl->afa = mlxsw_afa_create(MLXSW_CORE_RES_GET(mlxsw_sp->core, + ACL_ACTIONS_PER_SET), + &mlxsw_sp_act_afa_ops, mlxsw_sp); + if (IS_ERR(acl->afa)) { + err = PTR_ERR(acl->afa); + goto err_afa_create; + } + + err = rhashtable_init(&acl->ruleset_ht, + &mlxsw_sp_acl_ruleset_ht_params); + if (err) + goto err_rhashtable_init; + + err = acl_ops->init(mlxsw_sp, acl->priv); + if (err) + goto err_acl_ops_init; + + acl->ops = acl_ops; + return 0; + +err_acl_ops_init: + rhashtable_destroy(&acl->ruleset_ht); +err_rhashtable_init: + mlxsw_afa_destroy(acl->afa); +err_afa_create: + mlxsw_afk_destroy(acl->afk); +err_afk_create: + kfree(acl); + return err; +} + +void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp) +{ + struct mlxsw_sp_acl *acl = mlxsw_sp->acl; + const struct mlxsw_sp_acl_ops *acl_ops = acl->ops; + + acl_ops->fini(mlxsw_sp, acl->priv); + rhashtable_destroy(&acl->ruleset_ht); + mlxsw_afa_destroy(acl->afa); + mlxsw_afk_destroy(acl->afk); + kfree(acl); +} diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h new file mode 100644 index 000000000000..82b81cf7f4a7 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h @@ -0,0 +1,109 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h + * Copyright (c) 2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MLXSW_SPECTRUM_ACL_FLEX_KEYS_H +#define _MLXSW_SPECTRUM_ACL_FLEX_KEYS_H + +#include "core_acl_flex_keys.h" + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_dmac[] = { + MLXSW_AFK_ELEMENT_INST_BUF(DMAC, 0x00, 6), + MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac[] = { + MLXSW_AFK_ELEMENT_INST_BUF(SMAC, 0x00, 6), + MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac_ex[] = { + MLXSW_AFK_ELEMENT_INST_BUF(SMAC, 0x02, 6), + MLXSW_AFK_ELEMENT_INST_U32(ETHERTYPE, 0x0C, 0, 16), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_sip[] = { + MLXSW_AFK_ELEMENT_INST_U32(SRC_IP4, 0x00, 0, 32), + MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8), + MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_dip[] = { + MLXSW_AFK_ELEMENT_INST_U32(DST_IP4, 0x00, 0, 32), + MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8), + MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_ex[] = { + MLXSW_AFK_ELEMENT_INST_U32(SRC_L4_PORT, 0x08, 0, 16), + MLXSW_AFK_ELEMENT_INST_U32(DST_L4_PORT, 0x0C, 0, 16), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_dip[] = { + MLXSW_AFK_ELEMENT_INST_BUF(DST_IP6_LO, 0x00, 8), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_ex1[] = { + MLXSW_AFK_ELEMENT_INST_BUF(DST_IP6_HI, 0x00, 8), + MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_sip[] = { + MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP6_LO, 0x00, 8), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_sip_ex[] = { + MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP6_HI, 0x00, 8), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_packet_type[] = { + MLXSW_AFK_ELEMENT_INST_U32(ETHERTYPE, 0x00, 0, 16), +}; + +static const struct mlxsw_afk_block mlxsw_sp_afk_blocks[] = { + MLXSW_AFK_BLOCK(0x10, mlxsw_sp_afk_element_info_l2_dmac), + MLXSW_AFK_BLOCK(0x11, mlxsw_sp_afk_element_info_l2_smac), + MLXSW_AFK_BLOCK(0x12, mlxsw_sp_afk_element_info_l2_smac_ex), + MLXSW_AFK_BLOCK(0x30, mlxsw_sp_afk_element_info_ipv4_sip), + MLXSW_AFK_BLOCK(0x31, mlxsw_sp_afk_element_info_ipv4_dip), + MLXSW_AFK_BLOCK(0x33, mlxsw_sp_afk_element_info_ipv4_ex), + MLXSW_AFK_BLOCK(0x60, mlxsw_sp_afk_element_info_ipv6_dip), + MLXSW_AFK_BLOCK(0x65, mlxsw_sp_afk_element_info_ipv6_ex1), + MLXSW_AFK_BLOCK(0x62, mlxsw_sp_afk_element_info_ipv6_sip), + MLXSW_AFK_BLOCK(0x63, mlxsw_sp_afk_element_info_ipv6_sip_ex), + MLXSW_AFK_BLOCK(0xB0, mlxsw_sp_afk_element_info_packet_type), +}; + +#define MLXSW_SP_AFK_BLOCKS_COUNT ARRAY_SIZE(mlxsw_sp_afk_blocks) + +#endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c new file mode 100644 index 000000000000..7382832215fa --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c @@ -0,0 +1,1084 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c + * Copyright (c) 2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/errno.h> +#include <linux/bitops.h> +#include <linux/list.h> +#include <linux/rhashtable.h> +#include <linux/netdevice.h> +#include <linux/parman.h> + +#include "reg.h" +#include "core.h" +#include "resources.h" +#include "spectrum.h" +#include "core_acl_flex_keys.h" + +struct mlxsw_sp_acl_tcam { + unsigned long *used_regions; /* bit array */ + unsigned int max_regions; + unsigned long *used_groups; /* bit array */ + unsigned int max_groups; + unsigned int max_group_size; +}; + +static int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv) +{ + struct mlxsw_sp_acl_tcam *tcam = priv; + u64 max_tcam_regions; + u64 max_regions; + u64 max_groups; + size_t alloc_size; + int err; + + max_tcam_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core, + ACL_MAX_TCAM_REGIONS); + max_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_REGIONS); + + /* Use 1:1 mapping between ACL region and TCAM region */ + if (max_tcam_regions < max_regions) + max_regions = max_tcam_regions; + + alloc_size = sizeof(tcam->used_regions[0]) * BITS_TO_LONGS(max_regions); + tcam->used_regions = kzalloc(alloc_size, GFP_KERNEL); + if (!tcam->used_regions) + return -ENOMEM; + tcam->max_regions = max_regions; + + max_groups = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_GROUPS); + alloc_size = sizeof(tcam->used_groups[0]) * BITS_TO_LONGS(max_groups); + tcam->used_groups = kzalloc(alloc_size, GFP_KERNEL); + if (!tcam->used_groups) { + err = -ENOMEM; + goto err_alloc_used_groups; + } + tcam->max_groups = max_groups; + tcam->max_group_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, + ACL_MAX_GROUP_SIZE); + return 0; + +err_alloc_used_groups: + kfree(tcam->used_regions); + return err; +} + +static void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp, void *priv) +{ + struct mlxsw_sp_acl_tcam *tcam = priv; + + kfree(tcam->used_groups); + kfree(tcam->used_regions); +} + +static int mlxsw_sp_acl_tcam_region_id_get(struct mlxsw_sp_acl_tcam *tcam, + u16 *p_id) +{ + u16 id; + + id = find_first_zero_bit(tcam->used_regions, tcam->max_regions); + if (id < tcam->max_regions) { + __set_bit(id, tcam->used_regions); + *p_id = id; + return 0; + } + return -ENOBUFS; +} + +static void mlxsw_sp_acl_tcam_region_id_put(struct mlxsw_sp_acl_tcam *tcam, + u16 id) +{ + __clear_bit(id, tcam->used_regions); +} + +static int mlxsw_sp_acl_tcam_group_id_get(struct mlxsw_sp_acl_tcam *tcam, + u16 *p_id) +{ + u16 id; + + id = find_first_zero_bit(tcam->used_groups, tcam->max_groups); + if (id < tcam->max_groups) { + __set_bit(id, tcam->used_groups); + *p_id = id; + return 0; + } + return -ENOBUFS; +} + +static void mlxsw_sp_acl_tcam_group_id_put(struct mlxsw_sp_acl_tcam *tcam, + u16 id) +{ + __clear_bit(id, tcam->used_groups); +} + +struct mlxsw_sp_acl_tcam_pattern { + const enum mlxsw_afk_element *elements; + unsigned int elements_count; +}; + +struct mlxsw_sp_acl_tcam_group { + struct mlxsw_sp_acl_tcam *tcam; + u16 id; + struct list_head region_list; + unsigned int region_count; + struct rhashtable chunk_ht; + struct { + u16 local_port; + bool ingress; + } bound; + struct mlxsw_sp_acl_tcam_group_ops *ops; + const struct mlxsw_sp_acl_tcam_pattern *patterns; + unsigned int patterns_count; +}; + +struct mlxsw_sp_acl_tcam_region { + struct list_head list; /* Member of a TCAM group */ + struct list_head chunk_list; /* List of chunks under this region */ + struct parman *parman; + struct mlxsw_sp *mlxsw_sp; + struct mlxsw_sp_acl_tcam_group *group; + u16 id; /* ACL ID and region ID - they are same */ + char tcam_region_info[MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN]; + struct mlxsw_afk_key_info *key_info; + struct { + struct parman_prio parman_prio; + struct parman_item parman_item; + struct mlxsw_sp_acl_rule_info *rulei; + } catchall; +}; + +struct mlxsw_sp_acl_tcam_chunk { + struct list_head list; /* Member of a TCAM region */ + struct rhash_head ht_node; /* Member of a chunk HT */ + unsigned int priority; /* Priority within the region and group */ + struct parman_prio parman_prio; + struct mlxsw_sp_acl_tcam_group *group; + struct mlxsw_sp_acl_tcam_region *region; + unsigned int ref_count; +}; + +struct mlxsw_sp_acl_tcam_entry { + struct parman_item parman_item; + struct mlxsw_sp_acl_tcam_chunk *chunk; +}; + +static const struct rhashtable_params mlxsw_sp_acl_tcam_chunk_ht_params = { + .key_len = sizeof(unsigned int), + .key_offset = offsetof(struct mlxsw_sp_acl_tcam_chunk, priority), + .head_offset = offsetof(struct mlxsw_sp_acl_tcam_chunk, ht_node), + .automatic_shrinking = true, +}; + +static int mlxsw_sp_acl_tcam_group_update(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_group *group) +{ + struct mlxsw_sp_acl_tcam_region *region; + char pagt_pl[MLXSW_REG_PAGT_LEN]; + int acl_index = 0; + + mlxsw_reg_pagt_pack(pagt_pl, group->id); + list_for_each_entry(region, &group->region_list, list) + mlxsw_reg_pagt_acl_id_pack(pagt_pl, acl_index++, region->id); + mlxsw_reg_pagt_size_set(pagt_pl, acl_index); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pagt), pagt_pl); +} + +static int +mlxsw_sp_acl_tcam_group_add(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam *tcam, + struct mlxsw_sp_acl_tcam_group *group, + const struct mlxsw_sp_acl_tcam_pattern *patterns, + unsigned int patterns_count) +{ + int err; + + group->tcam = tcam; + group->patterns = patterns; + group->patterns_count = patterns_count; + INIT_LIST_HEAD(&group->region_list); + err = mlxsw_sp_acl_tcam_group_id_get(tcam, &group->id); + if (err) + return err; + + err = mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group); + if (err) + goto err_group_update; + + err = rhashtable_init(&group->chunk_ht, + &mlxsw_sp_acl_tcam_chunk_ht_params); + if (err) + goto err_rhashtable_init; + + return 0; + +err_rhashtable_init: +err_group_update: + mlxsw_sp_acl_tcam_group_id_put(tcam, group->id); + return err; +} + +static void mlxsw_sp_acl_tcam_group_del(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_group *group) +{ + struct mlxsw_sp_acl_tcam *tcam = group->tcam; + + rhashtable_destroy(&group->chunk_ht); + mlxsw_sp_acl_tcam_group_id_put(tcam, group->id); + WARN_ON(!list_empty(&group->region_list)); +} + +static int +mlxsw_sp_acl_tcam_group_bind(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_group *group, + struct net_device *dev, bool ingress) +{ + struct mlxsw_sp_port *mlxsw_sp_port; + char ppbt_pl[MLXSW_REG_PPBT_LEN]; + + if (!mlxsw_sp_port_dev_check(dev)) + return -EINVAL; + + mlxsw_sp_port = netdev_priv(dev); + group->bound.local_port = mlxsw_sp_port->local_port; + group->bound.ingress = ingress; + mlxsw_reg_ppbt_pack(ppbt_pl, + group->bound.ingress ? MLXSW_REG_PXBT_E_IACL : + MLXSW_REG_PXBT_E_EACL, + MLXSW_REG_PXBT_OP_BIND, group->bound.local_port, + group->id); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl); +} + +static void +mlxsw_sp_acl_tcam_group_unbind(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_group *group) +{ + char ppbt_pl[MLXSW_REG_PPBT_LEN]; + + mlxsw_reg_ppbt_pack(ppbt_pl, + group->bound.ingress ? MLXSW_REG_PXBT_E_IACL : + MLXSW_REG_PXBT_E_EACL, + MLXSW_REG_PXBT_OP_UNBIND, group->bound.local_port, + group->id); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl); +} + +static unsigned int +mlxsw_sp_acl_tcam_region_prio(struct mlxsw_sp_acl_tcam_region *region) +{ + struct mlxsw_sp_acl_tcam_chunk *chunk; + + if (list_empty(®ion->chunk_list)) + return 0; + /* As a priority of a region, return priority of the first chunk */ + chunk = list_first_entry(®ion->chunk_list, typeof(*chunk), list); + return chunk->priority; +} + +static unsigned int +mlxsw_sp_acl_tcam_region_max_prio(struct mlxsw_sp_acl_tcam_region *region) +{ + struct mlxsw_sp_acl_tcam_chunk *chunk; + + if (list_empty(®ion->chunk_list)) + return 0; + chunk = list_last_entry(®ion->chunk_list, typeof(*chunk), list); + return chunk->priority; +} + +static void +mlxsw_sp_acl_tcam_group_list_add(struct mlxsw_sp_acl_tcam_group *group, + struct mlxsw_sp_acl_tcam_region *region) +{ + struct mlxsw_sp_acl_tcam_region *region2; + struct list_head *pos; + + /* Position the region inside the list according to priority */ + list_for_each(pos, &group->region_list) { + region2 = list_entry(pos, typeof(*region2), list); + if (mlxsw_sp_acl_tcam_region_prio(region2) > + mlxsw_sp_acl_tcam_region_prio(region)) + break; + } + list_add_tail(®ion->list, pos); + group->region_count++; +} + +static void +mlxsw_sp_acl_tcam_group_list_del(struct mlxsw_sp_acl_tcam_group *group, + struct mlxsw_sp_acl_tcam_region *region) +{ + group->region_count--; + list_del(®ion->list); +} + +static int +mlxsw_sp_acl_tcam_group_region_attach(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_group *group, + struct mlxsw_sp_acl_tcam_region *region) +{ + int err; + + if (group->region_count == group->tcam->max_group_size) + return -ENOBUFS; + + mlxsw_sp_acl_tcam_group_list_add(group, region); + + err = mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group); + if (err) + goto err_group_update; + region->group = group; + + return 0; + +err_group_update: + mlxsw_sp_acl_tcam_group_list_del(group, region); + mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group); + return err; +} + +static void +mlxsw_sp_acl_tcam_group_region_detach(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *region) +{ + struct mlxsw_sp_acl_tcam_group *group = region->group; + + mlxsw_sp_acl_tcam_group_list_del(group, region); + mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group); +} + +static struct mlxsw_sp_acl_tcam_region * +mlxsw_sp_acl_tcam_group_region_find(struct mlxsw_sp_acl_tcam_group *group, + unsigned int priority, + struct mlxsw_afk_element_usage *elusage, + bool *p_need_split) +{ + struct mlxsw_sp_acl_tcam_region *region, *region2; + struct list_head *pos; + bool issubset; + + list_for_each(pos, &group->region_list) { + region = list_entry(pos, typeof(*region), list); + + /* First, check if the requested priority does not rather belong + * under some of the next regions. + */ + if (pos->next != &group->region_list) { /* not last */ + region2 = list_entry(pos->next, typeof(*region2), list); + if (priority >= mlxsw_sp_acl_tcam_region_prio(region2)) + continue; + } + + issubset = mlxsw_afk_key_info_subset(region->key_info, elusage); + + /* If requested element usage would not fit and the priority + * is lower than the currently inspected region we cannot + * use this region, so return NULL to indicate new region has + * to be created. + */ + if (!issubset && + priority < mlxsw_sp_acl_tcam_region_prio(region)) + return NULL; + + /* If requested element usage would not fit and the priority + * is higher than the currently inspected region we cannot + * use this region. There is still some hope that the next + * region would be the fit. So let it be processed and + * eventually break at the check right above this. + */ + if (!issubset && + priority > mlxsw_sp_acl_tcam_region_max_prio(region)) + continue; + + /* Indicate if the region needs to be split in order to add + * the requested priority. Split is needed when requested + * element usage won't fit into the found region. + */ + *p_need_split = !issubset; + return region; + } + return NULL; /* New region has to be created. */ +} + +static void +mlxsw_sp_acl_tcam_group_use_patterns(struct mlxsw_sp_acl_tcam_group *group, + struct mlxsw_afk_element_usage *elusage, + struct mlxsw_afk_element_usage *out) +{ + const struct mlxsw_sp_acl_tcam_pattern *pattern; + int i; + + for (i = 0; i < group->patterns_count; i++) { + pattern = &group->patterns[i]; + mlxsw_afk_element_usage_fill(out, pattern->elements, + pattern->elements_count); + if (mlxsw_afk_element_usage_subset(elusage, out)) + return; + } + memcpy(out, elusage, sizeof(*out)); +} + +#define MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT 16 +#define MLXSW_SP_ACL_TCAM_REGION_RESIZE_STEP 16 + +static int +mlxsw_sp_acl_tcam_region_alloc(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *region) +{ + struct mlxsw_afk_key_info *key_info = region->key_info; + char ptar_pl[MLXSW_REG_PTAR_LEN]; + unsigned int encodings_count; + int i; + int err; + + mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_ALLOC, + MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT, + region->id, region->tcam_region_info); + encodings_count = mlxsw_afk_key_info_blocks_count_get(key_info); + for (i = 0; i < encodings_count; i++) { + u16 encoding; + + encoding = mlxsw_afk_key_info_block_encoding_get(key_info, i); + mlxsw_reg_ptar_key_id_pack(ptar_pl, i, encoding); + } + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl); + if (err) + return err; + mlxsw_reg_ptar_unpack(ptar_pl, region->tcam_region_info); + return 0; +} + +static void +mlxsw_sp_acl_tcam_region_free(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *region) +{ + char ptar_pl[MLXSW_REG_PTAR_LEN]; + + mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_FREE, 0, region->id, + region->tcam_region_info); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl); +} + +static int +mlxsw_sp_acl_tcam_region_resize(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *region, + u16 new_size) +{ + char ptar_pl[MLXSW_REG_PTAR_LEN]; + + mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_RESIZE, + new_size, region->id, region->tcam_region_info); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl); +} + +static int +mlxsw_sp_acl_tcam_region_enable(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *region) +{ + char pacl_pl[MLXSW_REG_PACL_LEN]; + + mlxsw_reg_pacl_pack(pacl_pl, region->id, true, + region->tcam_region_info); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl); +} + +static void +mlxsw_sp_acl_tcam_region_disable(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *region) +{ + char pacl_pl[MLXSW_REG_PACL_LEN]; + + mlxsw_reg_pacl_pack(pacl_pl, region->id, false, + region->tcam_region_info); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl); +} + +static int +mlxsw_sp_acl_tcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *region, + unsigned int offset, + struct mlxsw_sp_acl_rule_info *rulei) +{ + char ptce2_pl[MLXSW_REG_PTCE2_LEN]; + char *act_set; + char *mask; + char *key; + + mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_WRITE_WRITE, + region->tcam_region_info, offset); + key = mlxsw_reg_ptce2_flex_key_blocks_data(ptce2_pl); + mask = mlxsw_reg_ptce2_mask_data(ptce2_pl); + mlxsw_afk_encode(region->key_info, &rulei->values, key, mask); + + /* Only the first action set belongs here, the rest is in KVD */ + act_set = mlxsw_afa_block_first_set(rulei->act_block); + mlxsw_reg_ptce2_flex_action_set_memcpy_to(ptce2_pl, act_set); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl); +} + +static void +mlxsw_sp_acl_tcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *region, + unsigned int offset) +{ + char ptce2_pl[MLXSW_REG_PTCE2_LEN]; + + mlxsw_reg_ptce2_pack(ptce2_pl, false, MLXSW_REG_PTCE2_OP_WRITE_WRITE, + region->tcam_region_info, offset); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl); +} + +#define MLXSW_SP_ACL_TCAM_CATCHALL_PRIO (~0U) + +static int +mlxsw_sp_acl_tcam_region_catchall_add(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *region) +{ + struct parman_prio *parman_prio = ®ion->catchall.parman_prio; + struct parman_item *parman_item = ®ion->catchall.parman_item; + struct mlxsw_sp_acl_rule_info *rulei; + int err; + + parman_prio_init(region->parman, parman_prio, + MLXSW_SP_ACL_TCAM_CATCHALL_PRIO); + err = parman_item_add(region->parman, parman_prio, parman_item); + if (err) + goto err_parman_item_add; + + rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl); + if (IS_ERR(rulei)) { + err = PTR_ERR(rulei); + goto err_rulei_create; + } + + mlxsw_sp_acl_rulei_act_continue(rulei); + err = mlxsw_sp_acl_rulei_commit(rulei); + if (err) + goto err_rulei_commit; + + err = mlxsw_sp_acl_tcam_region_entry_insert(mlxsw_sp, region, + parman_item->index, rulei); + region->catchall.rulei = rulei; + if (err) + goto err_rule_insert; + + return 0; + +err_rule_insert: +err_rulei_commit: + mlxsw_sp_acl_rulei_destroy(rulei); +err_rulei_create: + parman_item_remove(region->parman, parman_prio, parman_item); +err_parman_item_add: + parman_prio_fini(parman_prio); + return err; +} + +static void +mlxsw_sp_acl_tcam_region_catchall_del(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *region) +{ + struct parman_prio *parman_prio = ®ion->catchall.parman_prio; + struct parman_item *parman_item = ®ion->catchall.parman_item; + struct mlxsw_sp_acl_rule_info *rulei = region->catchall.rulei; + + mlxsw_sp_acl_tcam_region_entry_remove(mlxsw_sp, region, + parman_item->index); + mlxsw_sp_acl_rulei_destroy(rulei); + parman_item_remove(region->parman, parman_prio, parman_item); + parman_prio_fini(parman_prio); +} + +static void +mlxsw_sp_acl_tcam_region_move(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *region, + u16 src_offset, u16 dst_offset, u16 size) +{ + char prcr_pl[MLXSW_REG_PRCR_LEN]; + + mlxsw_reg_prcr_pack(prcr_pl, MLXSW_REG_PRCR_OP_MOVE, + region->tcam_region_info, src_offset, + region->tcam_region_info, dst_offset, size); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(prcr), prcr_pl); +} + +static int mlxsw_sp_acl_tcam_region_parman_resize(void *priv, + unsigned long new_count) +{ + struct mlxsw_sp_acl_tcam_region *region = priv; + struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp; + u64 max_tcam_rules; + + max_tcam_rules = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_TCAM_RULES); + if (new_count > max_tcam_rules) + return -EINVAL; + return mlxsw_sp_acl_tcam_region_resize(mlxsw_sp, region, new_count); +} + +static void mlxsw_sp_acl_tcam_region_parman_move(void *priv, + unsigned long from_index, + unsigned long to_index, + unsigned long count) +{ + struct mlxsw_sp_acl_tcam_region *region = priv; + struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp; + + mlxsw_sp_acl_tcam_region_move(mlxsw_sp, region, + from_index, to_index, count); +} + +static const struct parman_ops mlxsw_sp_acl_tcam_region_parman_ops = { + .base_count = MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT, + .resize_step = MLXSW_SP_ACL_TCAM_REGION_RESIZE_STEP, + .resize = mlxsw_sp_acl_tcam_region_parman_resize, + .move = mlxsw_sp_acl_tcam_region_parman_move, + .algo = PARMAN_ALGO_TYPE_LSORT, +}; + +static struct mlxsw_sp_acl_tcam_region * +mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam *tcam, + struct mlxsw_afk_element_usage *elusage) +{ + struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl); + struct mlxsw_sp_acl_tcam_region *region; + int err; + + region = kzalloc(sizeof(*region), GFP_KERNEL); + if (!region) + return ERR_PTR(-ENOMEM); + INIT_LIST_HEAD(®ion->chunk_list); + region->mlxsw_sp = mlxsw_sp; + + region->parman = parman_create(&mlxsw_sp_acl_tcam_region_parman_ops, + region); + if (!region->parman) { + err = -ENOMEM; + goto err_parman_create; + } + + region->key_info = mlxsw_afk_key_info_get(afk, elusage); + if (IS_ERR(region->key_info)) { + err = PTR_ERR(region->key_info); + goto err_key_info_get; + } + + err = mlxsw_sp_acl_tcam_region_id_get(tcam, ®ion->id); + if (err) + goto err_region_id_get; + + err = mlxsw_sp_acl_tcam_region_alloc(mlxsw_sp, region); + if (err) + goto err_tcam_region_alloc; + + err = mlxsw_sp_acl_tcam_region_enable(mlxsw_sp, region); + if (err) + goto err_tcam_region_enable; + + err = mlxsw_sp_acl_tcam_region_catchall_add(mlxsw_sp, region); + if (err) + goto err_tcam_region_catchall_add; + + return region; + +err_tcam_region_catchall_add: + mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region); +err_tcam_region_enable: + mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region); +err_tcam_region_alloc: + mlxsw_sp_acl_tcam_region_id_put(tcam, region->id); +err_region_id_get: + mlxsw_afk_key_info_put(region->key_info); +err_key_info_get: + parman_destroy(region->parman); +err_parman_create: + kfree(region); + return ERR_PTR(err); +} + +static void +mlxsw_sp_acl_tcam_region_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *region) +{ + mlxsw_sp_acl_tcam_region_catchall_del(mlxsw_sp, region); + mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region); + mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region); + mlxsw_sp_acl_tcam_region_id_put(region->group->tcam, region->id); + mlxsw_afk_key_info_put(region->key_info); + parman_destroy(region->parman); + kfree(region); +} + +static int +mlxsw_sp_acl_tcam_chunk_assoc(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_group *group, + unsigned int priority, + struct mlxsw_afk_element_usage *elusage, + struct mlxsw_sp_acl_tcam_chunk *chunk) +{ + struct mlxsw_sp_acl_tcam_region *region; + bool region_created = false; + bool need_split; + int err; + + region = mlxsw_sp_acl_tcam_group_region_find(group, priority, elusage, + &need_split); + if (region && need_split) { + /* According to priority, the chunk should belong to an + * existing region. However, this chunk needs elements + * that region does not contain. We need to split the existing + * region into two and create a new region for this chunk + * in between. This is not supported now. + */ + return -EOPNOTSUPP; + } + if (!region) { + struct mlxsw_afk_element_usage region_elusage; + + mlxsw_sp_acl_tcam_group_use_patterns(group, elusage, + ®ion_elusage); + region = mlxsw_sp_acl_tcam_region_create(mlxsw_sp, group->tcam, + ®ion_elusage); + if (IS_ERR(region)) + return PTR_ERR(region); + region_created = true; + } + + chunk->region = region; + list_add_tail(&chunk->list, ®ion->chunk_list); + + if (!region_created) + return 0; + + err = mlxsw_sp_acl_tcam_group_region_attach(mlxsw_sp, group, region); + if (err) + goto err_group_region_attach; + + return 0; + +err_group_region_attach: + mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, region); + return err; +} + +static void +mlxsw_sp_acl_tcam_chunk_deassoc(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_chunk *chunk) +{ + struct mlxsw_sp_acl_tcam_region *region = chunk->region; + + list_del(&chunk->list); + if (list_empty(®ion->chunk_list)) { + mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, region); + mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, region); + } +} + +static struct mlxsw_sp_acl_tcam_chunk * +mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_group *group, + unsigned int priority, + struct mlxsw_afk_element_usage *elusage) +{ + struct mlxsw_sp_acl_tcam_chunk *chunk; + int err; + + if (priority == MLXSW_SP_ACL_TCAM_CATCHALL_PRIO) + return ERR_PTR(-EINVAL); + + chunk = kzalloc(sizeof(*chunk), GFP_KERNEL); + if (!chunk) + return ERR_PTR(-ENOMEM); + chunk->priority = priority; + chunk->group = group; + chunk->ref_count = 1; + + err = mlxsw_sp_acl_tcam_chunk_assoc(mlxsw_sp, group, priority, + elusage, chunk); + if (err) + goto err_chunk_assoc; + + parman_prio_init(chunk->region->parman, &chunk->parman_prio, priority); + + err = rhashtable_insert_fast(&group->chunk_ht, &chunk->ht_node, + mlxsw_sp_acl_tcam_chunk_ht_params); + if (err) + goto err_rhashtable_insert; + + return chunk; + +err_rhashtable_insert: + parman_prio_fini(&chunk->parman_prio); + mlxsw_sp_acl_tcam_chunk_deassoc(mlxsw_sp, chunk); +err_chunk_assoc: + kfree(chunk); + return ERR_PTR(err); +} + +static void +mlxsw_sp_acl_tcam_chunk_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_chunk *chunk) +{ + struct mlxsw_sp_acl_tcam_group *group = chunk->group; + + rhashtable_remove_fast(&group->chunk_ht, &chunk->ht_node, + mlxsw_sp_acl_tcam_chunk_ht_params); + parman_prio_fini(&chunk->parman_prio); + mlxsw_sp_acl_tcam_chunk_deassoc(mlxsw_sp, chunk); + kfree(chunk); +} + +static struct mlxsw_sp_acl_tcam_chunk * +mlxsw_sp_acl_tcam_chunk_get(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_group *group, + unsigned int priority, + struct mlxsw_afk_element_usage *elusage) +{ + struct mlxsw_sp_acl_tcam_chunk *chunk; + + chunk = rhashtable_lookup_fast(&group->chunk_ht, &priority, + mlxsw_sp_acl_tcam_chunk_ht_params); + if (chunk) { + if (WARN_ON(!mlxsw_afk_key_info_subset(chunk->region->key_info, + elusage))) + return ERR_PTR(-EINVAL); + chunk->ref_count++; + return chunk; + } + return mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, group, + priority, elusage); +} + +static void mlxsw_sp_acl_tcam_chunk_put(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_chunk *chunk) +{ + if (--chunk->ref_count) + return; + mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, chunk); +} + +static int mlxsw_sp_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_group *group, + struct mlxsw_sp_acl_tcam_entry *entry, + struct mlxsw_sp_acl_rule_info *rulei) +{ + struct mlxsw_sp_acl_tcam_chunk *chunk; + struct mlxsw_sp_acl_tcam_region *region; + int err; + + chunk = mlxsw_sp_acl_tcam_chunk_get(mlxsw_sp, group, rulei->priority, + &rulei->values.elusage); + if (IS_ERR(chunk)) + return PTR_ERR(chunk); + + region = chunk->region; + err = parman_item_add(region->parman, &chunk->parman_prio, + &entry->parman_item); + if (err) + goto err_parman_item_add; + + err = mlxsw_sp_acl_tcam_region_entry_insert(mlxsw_sp, region, + entry->parman_item.index, + rulei); + if (err) + goto err_rule_insert; + entry->chunk = chunk; + + return 0; + +err_rule_insert: + parman_item_remove(region->parman, &chunk->parman_prio, + &entry->parman_item); +err_parman_item_add: + mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk); + return err; +} + +static void mlxsw_sp_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_entry *entry) +{ + struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk; + struct mlxsw_sp_acl_tcam_region *region = chunk->region; + + mlxsw_sp_acl_tcam_region_entry_remove(mlxsw_sp, region, + entry->parman_item.index); + parman_item_remove(region->parman, &chunk->parman_prio, + &entry->parman_item); + mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk); +} + +static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = { + MLXSW_AFK_ELEMENT_SRC_SYS_PORT, + MLXSW_AFK_ELEMENT_DMAC, + MLXSW_AFK_ELEMENT_SMAC, + MLXSW_AFK_ELEMENT_ETHERTYPE, + MLXSW_AFK_ELEMENT_IP_PROTO, + MLXSW_AFK_ELEMENT_SRC_IP4, + MLXSW_AFK_ELEMENT_DST_IP4, + MLXSW_AFK_ELEMENT_DST_L4_PORT, + MLXSW_AFK_ELEMENT_SRC_L4_PORT, +}; + +static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv6[] = { + MLXSW_AFK_ELEMENT_ETHERTYPE, + MLXSW_AFK_ELEMENT_IP_PROTO, + MLXSW_AFK_ELEMENT_SRC_IP6_HI, + MLXSW_AFK_ELEMENT_SRC_IP6_LO, + MLXSW_AFK_ELEMENT_DST_IP6_HI, + MLXSW_AFK_ELEMENT_DST_IP6_LO, + MLXSW_AFK_ELEMENT_DST_L4_PORT, + MLXSW_AFK_ELEMENT_SRC_L4_PORT, +}; + +static const struct mlxsw_sp_acl_tcam_pattern mlxsw_sp_acl_tcam_patterns[] = { + { + .elements = mlxsw_sp_acl_tcam_pattern_ipv4, + .elements_count = ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv4), + }, + { + .elements = mlxsw_sp_acl_tcam_pattern_ipv6, + .elements_count = ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv6), + }, +}; + +#define MLXSW_SP_ACL_TCAM_PATTERNS_COUNT \ + ARRAY_SIZE(mlxsw_sp_acl_tcam_patterns) + +struct mlxsw_sp_acl_tcam_flower_ruleset { + struct mlxsw_sp_acl_tcam_group group; +}; + +struct mlxsw_sp_acl_tcam_flower_rule { + struct mlxsw_sp_acl_tcam_entry entry; +}; + +static int +mlxsw_sp_acl_tcam_flower_ruleset_add(struct mlxsw_sp *mlxsw_sp, + void *priv, void *ruleset_priv) +{ + struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv; + struct mlxsw_sp_acl_tcam *tcam = priv; + + return mlxsw_sp_acl_tcam_group_add(mlxsw_sp, tcam, &ruleset->group, + mlxsw_sp_acl_tcam_patterns, + MLXSW_SP_ACL_TCAM_PATTERNS_COUNT); +} + +static void +mlxsw_sp_acl_tcam_flower_ruleset_del(struct mlxsw_sp *mlxsw_sp, + void *ruleset_priv) +{ + struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv; + + mlxsw_sp_acl_tcam_group_del(mlxsw_sp, &ruleset->group); +} + +static int +mlxsw_sp_acl_tcam_flower_ruleset_bind(struct mlxsw_sp *mlxsw_sp, + void *ruleset_priv, + struct net_device *dev, bool ingress) +{ + struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv; + + return mlxsw_sp_acl_tcam_group_bind(mlxsw_sp, &ruleset->group, + dev, ingress); +} + +static void +mlxsw_sp_acl_tcam_flower_ruleset_unbind(struct mlxsw_sp *mlxsw_sp, + void *ruleset_priv) +{ + struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv; + + mlxsw_sp_acl_tcam_group_unbind(mlxsw_sp, &ruleset->group); +} + +static int +mlxsw_sp_acl_tcam_flower_rule_add(struct mlxsw_sp *mlxsw_sp, + void *ruleset_priv, void *rule_priv, + struct mlxsw_sp_acl_rule_info *rulei) +{ + struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv; + struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv; + + return mlxsw_sp_acl_tcam_entry_add(mlxsw_sp, &ruleset->group, + &rule->entry, rulei); +} + +static void +mlxsw_sp_acl_tcam_flower_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv) +{ + struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv; + + mlxsw_sp_acl_tcam_entry_del(mlxsw_sp, &rule->entry); +} + +static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = { + .ruleset_priv_size = sizeof(struct mlxsw_sp_acl_tcam_flower_ruleset), + .ruleset_add = mlxsw_sp_acl_tcam_flower_ruleset_add, + .ruleset_del = mlxsw_sp_acl_tcam_flower_ruleset_del, + .ruleset_bind = mlxsw_sp_acl_tcam_flower_ruleset_bind, + .ruleset_unbind = mlxsw_sp_acl_tcam_flower_ruleset_unbind, + .rule_priv_size = sizeof(struct mlxsw_sp_acl_tcam_flower_rule), + .rule_add = mlxsw_sp_acl_tcam_flower_rule_add, + .rule_del = mlxsw_sp_acl_tcam_flower_rule_del, +}; + +static const struct mlxsw_sp_acl_profile_ops * +mlxsw_sp_acl_tcam_profile_ops_arr[] = { + [MLXSW_SP_ACL_PROFILE_FLOWER] = &mlxsw_sp_acl_tcam_flower_ops, +}; + +static const struct mlxsw_sp_acl_profile_ops * +mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_sp_acl_profile profile) +{ + const struct mlxsw_sp_acl_profile_ops *ops; + + if (WARN_ON(profile >= ARRAY_SIZE(mlxsw_sp_acl_tcam_profile_ops_arr))) + return NULL; + ops = mlxsw_sp_acl_tcam_profile_ops_arr[profile]; + if (WARN_ON(!ops)) + return NULL; + return ops; +} + +const struct mlxsw_sp_acl_ops mlxsw_sp_acl_tcam_ops = { + .priv_size = sizeof(struct mlxsw_sp_acl_tcam), + .init = mlxsw_sp_acl_tcam_init, + .fini = mlxsw_sp_acl_tcam_fini, + .profile_ops = mlxsw_sp_acl_tcam_profile_ops, +}; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c new file mode 100644 index 000000000000..22ab42925377 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c @@ -0,0 +1,316 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c + * Copyright (c) 2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/netdevice.h> +#include <net/flow_dissector.h> +#include <net/pkt_cls.h> +#include <net/tc_act/tc_gact.h> +#include <net/tc_act/tc_mirred.h> + +#include "spectrum.h" +#include "core_acl_flex_keys.h" + +static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, + struct net_device *dev, + struct mlxsw_sp_acl_rule_info *rulei, + struct tcf_exts *exts) +{ + const struct tc_action *a; + LIST_HEAD(actions); + int err; + + if (tc_no_actions(exts)) + return 0; + + tcf_exts_to_list(exts, &actions); + list_for_each_entry(a, &actions, list) { + if (is_tcf_gact_shot(a)) { + err = mlxsw_sp_acl_rulei_act_drop(rulei); + if (err) + return err; + } else if (is_tcf_mirred_egress_redirect(a)) { + int ifindex = tcf_mirred_ifindex(a); + struct net_device *out_dev; + + out_dev = __dev_get_by_index(dev_net(dev), ifindex); + if (out_dev == dev) + out_dev = NULL; + + err = mlxsw_sp_acl_rulei_act_fwd(mlxsw_sp, rulei, + out_dev); + if (err) + return err; + } else { + dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n"); + return -EOPNOTSUPP; + } + } + return 0; +} + +static void mlxsw_sp_flower_parse_ipv4(struct mlxsw_sp_acl_rule_info *rulei, + struct tc_cls_flower_offload *f) +{ + struct flow_dissector_key_ipv4_addrs *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_IPV4_ADDRS, + f->key); + struct flow_dissector_key_ipv4_addrs *mask = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_IPV4_ADDRS, + f->mask); + + mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_IP4, + ntohl(key->src), ntohl(mask->src)); + mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_IP4, + ntohl(key->dst), ntohl(mask->dst)); +} + +static void mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info *rulei, + struct tc_cls_flower_offload *f) +{ + struct flow_dissector_key_ipv6_addrs *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_IPV6_ADDRS, + f->key); + struct flow_dissector_key_ipv6_addrs *mask = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_IPV6_ADDRS, + f->mask); + size_t addr_half_size = sizeof(key->src) / 2; + + mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP6_HI, + &key->src.s6_addr[0], + &mask->src.s6_addr[0], + addr_half_size); + mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP6_LO, + &key->src.s6_addr[addr_half_size], + &mask->src.s6_addr[addr_half_size], + addr_half_size); + mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP6_HI, + &key->dst.s6_addr[0], + &mask->dst.s6_addr[0], + addr_half_size); + mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP6_LO, + &key->dst.s6_addr[addr_half_size], + &mask->dst.s6_addr[addr_half_size], + addr_half_size); +} + +static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule_info *rulei, + struct tc_cls_flower_offload *f, + u8 ip_proto) +{ + struct flow_dissector_key_ports *key, *mask; + + if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) + return 0; + + if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) { + dev_err(mlxsw_sp->bus_info->dev, "Only UDP and TCP keys are supported\n"); + return -EINVAL; + } + + key = skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_PORTS, + f->key); + mask = skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_PORTS, + f->mask); + mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_L4_PORT, + ntohs(key->dst), ntohs(mask->dst)); + mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_L4_PORT, + ntohs(key->src), ntohs(mask->src)); + return 0; +} + +static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp, + struct net_device *dev, + struct mlxsw_sp_acl_rule_info *rulei, + struct tc_cls_flower_offload *f) +{ + u16 addr_type = 0; + u8 ip_proto = 0; + int err; + + if (f->dissector->used_keys & + ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | + BIT(FLOW_DISSECTOR_KEY_BASIC) | + BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_PORTS))) { + dev_err(mlxsw_sp->bus_info->dev, "Unsupported key\n"); + return -EOPNOTSUPP; + } + + mlxsw_sp_acl_rulei_priority(rulei, f->prio); + + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) { + struct flow_dissector_key_control *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_CONTROL, + f->key); + addr_type = key->addr_type; + } + + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_dissector_key_basic *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_BASIC, + f->key); + struct flow_dissector_key_basic *mask = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_BASIC, + f->mask); + u16 n_proto_key = ntohs(key->n_proto); + u16 n_proto_mask = ntohs(mask->n_proto); + + if (n_proto_key == ETH_P_ALL) { + n_proto_key = 0; + n_proto_mask = 0; + } + mlxsw_sp_acl_rulei_keymask_u32(rulei, + MLXSW_AFK_ELEMENT_ETHERTYPE, + n_proto_key, n_proto_mask); + + ip_proto = key->ip_proto; + mlxsw_sp_acl_rulei_keymask_u32(rulei, + MLXSW_AFK_ELEMENT_IP_PROTO, + key->ip_proto, mask->ip_proto); + } + + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + struct flow_dissector_key_eth_addrs *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_ETH_ADDRS, + f->key); + struct flow_dissector_key_eth_addrs *mask = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_ETH_ADDRS, + f->mask); + + mlxsw_sp_acl_rulei_keymask_buf(rulei, + MLXSW_AFK_ELEMENT_DMAC, + key->dst, mask->dst, + sizeof(key->dst)); + mlxsw_sp_acl_rulei_keymask_buf(rulei, + MLXSW_AFK_ELEMENT_SMAC, + key->src, mask->src, + sizeof(key->src)); + } + + if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) + mlxsw_sp_flower_parse_ipv4(rulei, f); + + if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) + mlxsw_sp_flower_parse_ipv6(rulei, f); + + err = mlxsw_sp_flower_parse_ports(mlxsw_sp, rulei, f, ip_proto); + if (err) + return err; + + return mlxsw_sp_flower_parse_actions(mlxsw_sp, dev, rulei, f->exts); +} + +int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, + __be16 protocol, struct tc_cls_flower_offload *f) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct net_device *dev = mlxsw_sp_port->dev; + struct mlxsw_sp_acl_rule_info *rulei; + struct mlxsw_sp_acl_ruleset *ruleset; + struct mlxsw_sp_acl_rule *rule; + int err; + + ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, dev, ingress, + MLXSW_SP_ACL_PROFILE_FLOWER); + if (IS_ERR(ruleset)) + return PTR_ERR(ruleset); + + rule = mlxsw_sp_acl_rule_create(mlxsw_sp, ruleset, f->cookie); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + goto err_rule_create; + } + + rulei = mlxsw_sp_acl_rule_rulei(rule); + err = mlxsw_sp_flower_parse(mlxsw_sp, dev, rulei, f); + if (err) + goto err_flower_parse; + + err = mlxsw_sp_acl_rulei_commit(rulei); + if (err) + goto err_rulei_commit; + + err = mlxsw_sp_acl_rule_add(mlxsw_sp, rule); + if (err) + goto err_rule_add; + + mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); + return 0; + +err_rule_add: +err_rulei_commit: +err_flower_parse: + mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule); +err_rule_create: + mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); + return err; +} + +void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, + struct tc_cls_flower_offload *f) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct mlxsw_sp_acl_ruleset *ruleset; + struct mlxsw_sp_acl_rule *rule; + + ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, mlxsw_sp_port->dev, + ingress, + MLXSW_SP_ACL_PROFILE_FLOWER); + if (WARN_ON(IS_ERR(ruleset))) + return; + + rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie); + if (!WARN_ON(!rule)) { + mlxsw_sp_acl_rule_del(mlxsw_sp, rule); + mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule); + } + + mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); +} diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 9e494a446b7e..bd8de6b9be71 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -40,6 +40,7 @@ #include <linux/bitops.h> #include <linux/in6.h> #include <linux/notifier.h> +#include <linux/inetdevice.h> #include <net/netevent.h> #include <net/neighbour.h> #include <net/arp.h> @@ -108,7 +109,6 @@ mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage, } struct mlxsw_sp_fib_key { - struct net_device *dev; unsigned char addr[sizeof(struct in6_addr)]; unsigned char prefix_len; }; @@ -121,95 +121,39 @@ enum mlxsw_sp_fib_entry_type { struct mlxsw_sp_nexthop_group; -struct mlxsw_sp_fib_entry { - struct rhash_head ht_node; +struct mlxsw_sp_fib_node { + struct list_head entry_list; struct list_head list; + struct rhash_head ht_node; + struct mlxsw_sp_vr *vr; struct mlxsw_sp_fib_key key; +}; + +struct mlxsw_sp_fib_entry_params { + u32 tb_id; + u32 prio; + u8 tos; + u8 type; +}; + +struct mlxsw_sp_fib_entry { + struct list_head list; + struct mlxsw_sp_fib_node *fib_node; enum mlxsw_sp_fib_entry_type type; - unsigned int ref_count; - u16 rif; /* used for action local */ - struct mlxsw_sp_vr *vr; - struct fib_info *fi; struct list_head nexthop_group_node; struct mlxsw_sp_nexthop_group *nh_group; + struct mlxsw_sp_fib_entry_params params; + bool offloaded; }; struct mlxsw_sp_fib { struct rhashtable ht; - struct list_head entry_list; + struct list_head node_list; unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT]; struct mlxsw_sp_prefix_usage prefix_usage; }; -static const struct rhashtable_params mlxsw_sp_fib_ht_params = { - .key_offset = offsetof(struct mlxsw_sp_fib_entry, key), - .head_offset = offsetof(struct mlxsw_sp_fib_entry, ht_node), - .key_len = sizeof(struct mlxsw_sp_fib_key), - .automatic_shrinking = true, -}; - -static int mlxsw_sp_fib_entry_insert(struct mlxsw_sp_fib *fib, - struct mlxsw_sp_fib_entry *fib_entry) -{ - unsigned char prefix_len = fib_entry->key.prefix_len; - int err; - - err = rhashtable_insert_fast(&fib->ht, &fib_entry->ht_node, - mlxsw_sp_fib_ht_params); - if (err) - return err; - list_add_tail(&fib_entry->list, &fib->entry_list); - if (fib->prefix_ref_count[prefix_len]++ == 0) - mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len); - return 0; -} - -static void mlxsw_sp_fib_entry_remove(struct mlxsw_sp_fib *fib, - struct mlxsw_sp_fib_entry *fib_entry) -{ - unsigned char prefix_len = fib_entry->key.prefix_len; - - if (--fib->prefix_ref_count[prefix_len] == 0) - mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len); - list_del(&fib_entry->list); - rhashtable_remove_fast(&fib->ht, &fib_entry->ht_node, - mlxsw_sp_fib_ht_params); -} - -static struct mlxsw_sp_fib_entry * -mlxsw_sp_fib_entry_create(struct mlxsw_sp_fib *fib, const void *addr, - size_t addr_len, unsigned char prefix_len, - struct net_device *dev) -{ - struct mlxsw_sp_fib_entry *fib_entry; - - fib_entry = kzalloc(sizeof(*fib_entry), GFP_KERNEL); - if (!fib_entry) - return NULL; - fib_entry->key.dev = dev; - memcpy(fib_entry->key.addr, addr, addr_len); - fib_entry->key.prefix_len = prefix_len; - return fib_entry; -} - -static void mlxsw_sp_fib_entry_destroy(struct mlxsw_sp_fib_entry *fib_entry) -{ - kfree(fib_entry); -} - -static struct mlxsw_sp_fib_entry * -mlxsw_sp_fib_entry_lookup(struct mlxsw_sp_fib *fib, const void *addr, - size_t addr_len, unsigned char prefix_len, - struct net_device *dev) -{ - struct mlxsw_sp_fib_key key; - - memset(&key, 0, sizeof(key)); - key.dev = dev; - memcpy(key.addr, addr, addr_len); - key.prefix_len = prefix_len; - return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params); -} +static const struct rhashtable_params mlxsw_sp_fib_ht_params; static struct mlxsw_sp_fib *mlxsw_sp_fib_create(void) { @@ -222,7 +166,7 @@ static struct mlxsw_sp_fib *mlxsw_sp_fib_create(void) err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params); if (err) goto err_rhashtable_init; - INIT_LIST_HEAD(&fib->entry_list); + INIT_LIST_HEAD(&fib->node_list); return fib; err_rhashtable_init: @@ -232,6 +176,7 @@ err_rhashtable_init: static void mlxsw_sp_fib_destroy(struct mlxsw_sp_fib *fib) { + WARN_ON(!list_empty(&fib->node_list)); rhashtable_destroy(&fib->ht); kfree(fib); } @@ -496,30 +441,40 @@ static int mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr, struct mlxsw_sp_prefix_usage *req_prefix_usage) { - struct mlxsw_sp_lpm_tree *lpm_tree; + struct mlxsw_sp_lpm_tree *lpm_tree = vr->lpm_tree; + struct mlxsw_sp_lpm_tree *new_tree; + int err; - if (mlxsw_sp_prefix_usage_eq(req_prefix_usage, - &vr->lpm_tree->prefix_usage)) + if (mlxsw_sp_prefix_usage_eq(req_prefix_usage, &lpm_tree->prefix_usage)) return 0; - lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, req_prefix_usage, + new_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, req_prefix_usage, vr->proto, false); - if (IS_ERR(lpm_tree)) { + if (IS_ERR(new_tree)) { /* We failed to get a tree according to the required * prefix usage. However, the current tree might be still good * for us if our requirement is subset of the prefixes used * in the tree. */ if (mlxsw_sp_prefix_usage_subset(req_prefix_usage, - &vr->lpm_tree->prefix_usage)) + &lpm_tree->prefix_usage)) return 0; - return PTR_ERR(lpm_tree); + return PTR_ERR(new_tree); } - mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, vr); - mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree); + /* Prevent packet loss by overwriting existing binding */ + vr->lpm_tree = new_tree; + err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr); + if (err) + goto err_tree_bind; + mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree); + + return 0; + +err_tree_bind: vr->lpm_tree = lpm_tree; - return mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr); + mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree); + return err; } static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, @@ -610,12 +565,11 @@ struct mlxsw_sp_neigh_key { }; struct mlxsw_sp_neigh_entry { + struct list_head rif_list_node; struct rhash_head ht_node; struct mlxsw_sp_neigh_key key; u16 rif; - bool offloaded; - struct delayed_work dw; - struct mlxsw_sp_port *mlxsw_sp_port; + bool connected; unsigned char ha[ETH_ALEN]; struct list_head nexthop_list; /* list of nexthops using * this neigh entry @@ -629,105 +583,91 @@ static const struct rhashtable_params mlxsw_sp_neigh_ht_params = { .key_len = sizeof(struct mlxsw_sp_neigh_key), }; -static int -mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_neigh_entry *neigh_entry) -{ - return rhashtable_insert_fast(&mlxsw_sp->router.neigh_ht, - &neigh_entry->ht_node, - mlxsw_sp_neigh_ht_params); -} - -static void -mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_neigh_entry *neigh_entry) -{ - rhashtable_remove_fast(&mlxsw_sp->router.neigh_ht, - &neigh_entry->ht_node, - mlxsw_sp_neigh_ht_params); -} - -static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work); - static struct mlxsw_sp_neigh_entry * -mlxsw_sp_neigh_entry_create(struct neighbour *n, u16 rif) +mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n, + u16 rif) { struct mlxsw_sp_neigh_entry *neigh_entry; - neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_ATOMIC); + neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL); if (!neigh_entry) return NULL; + neigh_entry->key.n = n; neigh_entry->rif = rif; - INIT_DELAYED_WORK(&neigh_entry->dw, mlxsw_sp_router_neigh_update_hw); INIT_LIST_HEAD(&neigh_entry->nexthop_list); + return neigh_entry; } -static void -mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp_neigh_entry *neigh_entry) +static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry) { kfree(neigh_entry); } -static struct mlxsw_sp_neigh_entry * -mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n) +static int +mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_neigh_entry *neigh_entry) { - struct mlxsw_sp_neigh_key key; + return rhashtable_insert_fast(&mlxsw_sp->router.neigh_ht, + &neigh_entry->ht_node, + mlxsw_sp_neigh_ht_params); +} - key.n = n; - return rhashtable_lookup_fast(&mlxsw_sp->router.neigh_ht, - &key, mlxsw_sp_neigh_ht_params); +static void +mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_neigh_entry *neigh_entry) +{ + rhashtable_remove_fast(&mlxsw_sp->router.neigh_ht, + &neigh_entry->ht_node, + mlxsw_sp_neigh_ht_params); } -int mlxsw_sp_router_neigh_construct(struct net_device *dev, - struct neighbour *n) +static struct mlxsw_sp_neigh_entry * +mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n) { - struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; struct mlxsw_sp_neigh_entry *neigh_entry; struct mlxsw_sp_rif *r; int err; - if (n->tbl != &arp_tbl) - return 0; - - neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n); - if (neigh_entry) - return 0; - r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev); - if (WARN_ON(!r)) - return -EINVAL; + if (!r) + return ERR_PTR(-EINVAL); - neigh_entry = mlxsw_sp_neigh_entry_create(n, r->rif); + neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, r->rif); if (!neigh_entry) - return -ENOMEM; + return ERR_PTR(-ENOMEM); + err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry); if (err) goto err_neigh_entry_insert; - return 0; + + list_add(&neigh_entry->rif_list_node, &r->neigh_list); + + return neigh_entry; err_neigh_entry_insert: - mlxsw_sp_neigh_entry_destroy(neigh_entry); - return err; + mlxsw_sp_neigh_entry_free(neigh_entry); + return ERR_PTR(err); } -void mlxsw_sp_router_neigh_destroy(struct net_device *dev, - struct neighbour *n) +static void +mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_neigh_entry *neigh_entry) { - struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - struct mlxsw_sp_neigh_entry *neigh_entry; + list_del(&neigh_entry->rif_list_node); + mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry); + mlxsw_sp_neigh_entry_free(neigh_entry); +} - if (n->tbl != &arp_tbl) - return; +static struct mlxsw_sp_neigh_entry * +mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n) +{ + struct mlxsw_sp_neigh_key key; - neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n); - if (!neigh_entry) - return; - mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry); - mlxsw_sp_neigh_entry_destroy(neigh_entry); + key.n = n; + return rhashtable_lookup_fast(&mlxsw_sp->router.neigh_ht, + &key, mlxsw_sp_neigh_ht_params); } static void @@ -866,13 +806,11 @@ static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp) /* Take RTNL mutex here to prevent lists from changes */ rtnl_lock(); list_for_each_entry(neigh_entry, &mlxsw_sp->router.nexthop_neighs_list, - nexthop_neighs_list_node) { + nexthop_neighs_list_node) /* If this neigh have nexthops, make the kernel think this neigh * is active regardless of the traffic. */ - if (!list_empty(&neigh_entry->nexthop_list)) - neigh_event_send(neigh_entry->key.n, NULL); - } + neigh_event_send(neigh_entry->key.n, NULL); rtnl_unlock(); } @@ -916,11 +854,9 @@ static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work) */ rtnl_lock(); list_for_each_entry(neigh_entry, &mlxsw_sp->router.nexthop_neighs_list, - nexthop_neighs_list_node) { - if (!(neigh_entry->key.n->nud_state & NUD_VALID) && - !list_empty(&neigh_entry->nexthop_list)) + nexthop_neighs_list_node) + if (!neigh_entry->connected) neigh_event_send(neigh_entry->key.n, NULL); - } rtnl_unlock(); mlxsw_core_schedule_dw(&mlxsw_sp->router.nexthop_probe_dw, @@ -932,79 +868,101 @@ mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_neigh_entry *neigh_entry, bool removing); -static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work) +static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding) +{ + return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD : + MLXSW_REG_RAUHT_OP_WRITE_DELETE; +} + +static void +mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_neigh_entry *neigh_entry, + enum mlxsw_reg_rauht_op op) { - struct mlxsw_sp_neigh_entry *neigh_entry = - container_of(work, struct mlxsw_sp_neigh_entry, dw.work); struct neighbour *n = neigh_entry->key.n; - struct mlxsw_sp_port *mlxsw_sp_port = neigh_entry->mlxsw_sp_port; - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + u32 dip = ntohl(*((__be32 *) n->primary_key)); char rauht_pl[MLXSW_REG_RAUHT_LEN]; - struct net_device *dev; + + mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha, + dip); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl); +} + +static void +mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_neigh_entry *neigh_entry, + bool adding) +{ + if (!adding && !neigh_entry->connected) + return; + neigh_entry->connected = adding; + if (neigh_entry->key.n->tbl == &arp_tbl) + mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry, + mlxsw_sp_rauht_op(adding)); + else + WARN_ON_ONCE(1); +} + +struct mlxsw_sp_neigh_event_work { + struct work_struct work; + struct mlxsw_sp *mlxsw_sp; + struct neighbour *n; +}; + +static void mlxsw_sp_router_neigh_event_work(struct work_struct *work) +{ + struct mlxsw_sp_neigh_event_work *neigh_work = + container_of(work, struct mlxsw_sp_neigh_event_work, work); + struct mlxsw_sp *mlxsw_sp = neigh_work->mlxsw_sp; + struct mlxsw_sp_neigh_entry *neigh_entry; + struct neighbour *n = neigh_work->n; + unsigned char ha[ETH_ALEN]; bool entry_connected; u8 nud_state, dead; - bool updating; - bool removing; - bool adding; - u32 dip; - int err; + /* If these parameters are changed after we release the lock, + * then we are guaranteed to receive another event letting us + * know about it. + */ read_lock_bh(&n->lock); - dip = ntohl(*((__be32 *) n->primary_key)); - memcpy(neigh_entry->ha, n->ha, sizeof(neigh_entry->ha)); + memcpy(ha, n->ha, ETH_ALEN); nud_state = n->nud_state; dead = n->dead; - dev = n->dev; read_unlock_bh(&n->lock); + rtnl_lock(); entry_connected = nud_state & NUD_VALID && !dead; - adding = (!neigh_entry->offloaded) && entry_connected; - updating = neigh_entry->offloaded && entry_connected; - removing = neigh_entry->offloaded && !entry_connected; - - if (adding || updating) { - mlxsw_reg_rauht_pack4(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_ADD, - neigh_entry->rif, - neigh_entry->ha, dip); - err = mlxsw_reg_write(mlxsw_sp->core, - MLXSW_REG(rauht), rauht_pl); - if (err) { - netdev_err(dev, "Could not add neigh %pI4h\n", &dip); - neigh_entry->offloaded = false; - } else { - neigh_entry->offloaded = true; - } - mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, false); - } else if (removing) { - mlxsw_reg_rauht_pack4(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_DELETE, - neigh_entry->rif, - neigh_entry->ha, dip); - err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), - rauht_pl); - if (err) { - netdev_err(dev, "Could not delete neigh %pI4h\n", &dip); - neigh_entry->offloaded = true; - } else { - neigh_entry->offloaded = false; - } - mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, true); + neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n); + if (!entry_connected && !neigh_entry) + goto out; + if (!neigh_entry) { + neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n); + if (IS_ERR(neigh_entry)) + goto out; } + memcpy(neigh_entry->ha, ha, ETH_ALEN); + mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected); + mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected); + + if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list)) + mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry); + +out: + rtnl_unlock(); neigh_release(n); - mlxsw_sp_port_dev_put(mlxsw_sp_port); + kfree(neigh_work); } int mlxsw_sp_router_netevent_event(struct notifier_block *unused, unsigned long event, void *ptr) { - struct mlxsw_sp_neigh_entry *neigh_entry; + struct mlxsw_sp_neigh_event_work *neigh_work; struct mlxsw_sp_port *mlxsw_sp_port; struct mlxsw_sp *mlxsw_sp; unsigned long interval; - struct net_device *dev; struct neigh_parms *p; struct neighbour *n; - u32 dip; switch (event) { case NETEVENT_DELAY_PROBE_TIME_UPDATE: @@ -1029,33 +987,31 @@ int mlxsw_sp_router_netevent_event(struct notifier_block *unused, break; case NETEVENT_NEIGH_UPDATE: n = ptr; - dev = n->dev; if (n->tbl != &arp_tbl) return NOTIFY_DONE; - mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(dev); + mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev); if (!mlxsw_sp_port) return NOTIFY_DONE; - mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - dip = ntohl(*((__be32 *) n->primary_key)); - neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n); - if (WARN_ON(!neigh_entry)) { + neigh_work = kzalloc(sizeof(*neigh_work), GFP_ATOMIC); + if (!neigh_work) { mlxsw_sp_port_dev_put(mlxsw_sp_port); - return NOTIFY_DONE; + return NOTIFY_BAD; } - neigh_entry->mlxsw_sp_port = mlxsw_sp_port; + + INIT_WORK(&neigh_work->work, mlxsw_sp_router_neigh_event_work); + neigh_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + neigh_work->n = n; /* Take a reference to ensure the neighbour won't be * destructed until we drop the reference in delayed * work. */ neigh_clone(n); - if (!mlxsw_core_schedule_dw(&neigh_entry->dw, 0)) { - neigh_release(n); - mlxsw_sp_port_dev_put(mlxsw_sp_port); - } + mlxsw_core_schedule_work(&neigh_work->work); + mlxsw_sp_port_dev_put(mlxsw_sp_port); break; } @@ -1093,11 +1049,40 @@ static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp) rhashtable_destroy(&mlxsw_sp->router.neigh_ht); } +static int mlxsw_sp_neigh_rif_flush(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_rif *r) +{ + char rauht_pl[MLXSW_REG_RAUHT_LEN]; + + mlxsw_reg_rauht_pack(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_DELETE_ALL, + r->rif, r->addr); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl); +} + +static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_rif *r) +{ + struct mlxsw_sp_neigh_entry *neigh_entry, *tmp; + + mlxsw_sp_neigh_rif_flush(mlxsw_sp, r); + list_for_each_entry_safe(neigh_entry, tmp, &r->neigh_list, + rif_list_node) + mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry); +} + +struct mlxsw_sp_nexthop_key { + struct fib_nh *fib_nh; +}; + struct mlxsw_sp_nexthop { struct list_head neigh_list_node; /* member of neigh entry list */ + struct list_head rif_list_node; struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group * this belongs to */ + struct rhash_head ht_node; + struct mlxsw_sp_nexthop_key key; + struct mlxsw_sp_rif *r; u8 should_offload:1, /* set indicates this neigh is connected and * should be put to KVD linear area of this group. */ @@ -1110,16 +1095,81 @@ struct mlxsw_sp_nexthop { struct mlxsw_sp_neigh_entry *neigh_entry; }; +struct mlxsw_sp_nexthop_group_key { + struct fib_info *fi; +}; + struct mlxsw_sp_nexthop_group { - struct list_head list; /* node in mlxsw->router.nexthop_group_list */ + struct rhash_head ht_node; struct list_head fib_list; /* list of fib entries that use this group */ - u8 adj_index_valid:1; + struct mlxsw_sp_nexthop_group_key key; + u8 adj_index_valid:1, + gateway:1; /* routes using the group use a gateway */ u32 adj_index; u16 ecmp_size; u16 count; struct mlxsw_sp_nexthop nexthops[0]; +#define nh_rif nexthops[0].r +}; + +static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = { + .key_offset = offsetof(struct mlxsw_sp_nexthop_group, key), + .head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node), + .key_len = sizeof(struct mlxsw_sp_nexthop_group_key), }; +static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop_group *nh_grp) +{ + return rhashtable_insert_fast(&mlxsw_sp->router.nexthop_group_ht, + &nh_grp->ht_node, + mlxsw_sp_nexthop_group_ht_params); +} + +static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop_group *nh_grp) +{ + rhashtable_remove_fast(&mlxsw_sp->router.nexthop_group_ht, + &nh_grp->ht_node, + mlxsw_sp_nexthop_group_ht_params); +} + +static struct mlxsw_sp_nexthop_group * +mlxsw_sp_nexthop_group_lookup(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop_group_key key) +{ + return rhashtable_lookup_fast(&mlxsw_sp->router.nexthop_group_ht, &key, + mlxsw_sp_nexthop_group_ht_params); +} + +static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = { + .key_offset = offsetof(struct mlxsw_sp_nexthop, key), + .head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node), + .key_len = sizeof(struct mlxsw_sp_nexthop_key), +}; + +static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop *nh) +{ + return rhashtable_insert_fast(&mlxsw_sp->router.nexthop_ht, + &nh->ht_node, mlxsw_sp_nexthop_ht_params); +} + +static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop *nh) +{ + rhashtable_remove_fast(&mlxsw_sp->router.nexthop_ht, &nh->ht_node, + mlxsw_sp_nexthop_ht_params); +} + +static struct mlxsw_sp_nexthop * +mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop_key key) +{ + return rhashtable_lookup_fast(&mlxsw_sp->router.nexthop_ht, &key, + mlxsw_sp_nexthop_ht_params); +} + static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr, u32 adj_index, u16 ecmp_size, @@ -1144,9 +1194,9 @@ static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp, int err; list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) { - if (vr == fib_entry->vr) + if (vr == fib_entry->fib_node->vr) continue; - vr = fib_entry->vr; + vr = fib_entry->fib_node->vr; err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, vr, old_adj_index, old_ecmp_size, @@ -1234,6 +1284,11 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp, int i; int err; + if (!nh_grp->gateway) { + mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp); + return; + } + for (i = 0; i < nh_grp->count; i++) { nh = &nh_grp->nexthops[i]; @@ -1336,42 +1391,63 @@ mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp, { struct mlxsw_sp_nexthop *nh; - /* Take RTNL mutex here to prevent lists from changes */ - rtnl_lock(); list_for_each_entry(nh, &neigh_entry->nexthop_list, neigh_list_node) { __mlxsw_sp_nexthop_neigh_update(nh, removing); mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp); } - rtnl_unlock(); } -static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_nexthop_group *nh_grp, - struct mlxsw_sp_nexthop *nh, - struct fib_nh *fib_nh) +static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh, + struct mlxsw_sp_rif *r) +{ + if (nh->r) + return; + + nh->r = r; + list_add(&nh->rif_list_node, &r->nexthop_list); +} + +static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh) +{ + if (!nh->r) + return; + + list_del(&nh->rif_list_node); + nh->r = NULL; +} + +static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop *nh) { struct mlxsw_sp_neigh_entry *neigh_entry; - struct net_device *dev = fib_nh->nh_dev; + struct fib_nh *fib_nh = nh->key.fib_nh; struct neighbour *n; u8 nud_state, dead; + int err; + + if (!nh->nh_grp->gateway || nh->neigh_entry) + return 0; /* Take a reference of neigh here ensuring that neigh would * not be detructed before the nexthop entry is finished. * The reference is taken either in neigh_lookup() or - * in neith_create() in case n is not found. + * in neigh_create() in case n is not found. */ - n = neigh_lookup(&arp_tbl, &fib_nh->nh_gw, dev); + n = neigh_lookup(&arp_tbl, &fib_nh->nh_gw, fib_nh->nh_dev); if (!n) { - n = neigh_create(&arp_tbl, &fib_nh->nh_gw, dev); + n = neigh_create(&arp_tbl, &fib_nh->nh_gw, fib_nh->nh_dev); if (IS_ERR(n)) return PTR_ERR(n); neigh_event_send(n, NULL); } neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n); if (!neigh_entry) { - neigh_release(n); - return -EINVAL; + neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n); + if (IS_ERR(neigh_entry)) { + err = -EINVAL; + goto err_neigh_entry_create; + } } /* If that is the first nexthop connected to that neigh, add to @@ -1381,7 +1457,6 @@ static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp, list_add_tail(&neigh_entry->nexthop_neighs_list_node, &mlxsw_sp->router.nexthop_neighs_list); - nh->nh_grp = nh_grp; nh->neigh_entry = neigh_entry; list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list); read_lock_bh(&n->lock); @@ -1391,23 +1466,126 @@ static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp, __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead)); return 0; + +err_neigh_entry_create: + neigh_release(n); + return err; } -static void mlxsw_sp_nexthop_fini(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_nexthop *nh) +static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop *nh) { struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry; + struct neighbour *n; + + if (!neigh_entry) + return; + n = neigh_entry->key.n; __mlxsw_sp_nexthop_neigh_update(nh, true); list_del(&nh->neigh_list_node); + nh->neigh_entry = NULL; /* If that is the last nexthop connected to that neigh, remove from * nexthop_neighs_list */ - if (list_empty(&nh->neigh_entry->nexthop_list)) - list_del(&nh->neigh_entry->nexthop_neighs_list_node); + if (list_empty(&neigh_entry->nexthop_list)) + list_del(&neigh_entry->nexthop_neighs_list_node); + + if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list)) + mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry); + + neigh_release(n); +} + +static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop_group *nh_grp, + struct mlxsw_sp_nexthop *nh, + struct fib_nh *fib_nh) +{ + struct net_device *dev = fib_nh->nh_dev; + struct in_device *in_dev; + struct mlxsw_sp_rif *r; + int err; + + nh->nh_grp = nh_grp; + nh->key.fib_nh = fib_nh; + err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh); + if (err) + return err; + + in_dev = __in_dev_get_rtnl(dev); + if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) && + fib_nh->nh_flags & RTNH_F_LINKDOWN) + return 0; + + r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); + if (!r) + return 0; + mlxsw_sp_nexthop_rif_init(nh, r); + + err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh); + if (err) + goto err_nexthop_neigh_init; + + return 0; + +err_nexthop_neigh_init: + mlxsw_sp_nexthop_remove(mlxsw_sp, nh); + return err; +} + +static void mlxsw_sp_nexthop_fini(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop *nh) +{ + mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh); + mlxsw_sp_nexthop_rif_fini(nh); + mlxsw_sp_nexthop_remove(mlxsw_sp, nh); +} + +static void mlxsw_sp_nexthop_event(struct mlxsw_sp *mlxsw_sp, + unsigned long event, struct fib_nh *fib_nh) +{ + struct mlxsw_sp_nexthop_key key; + struct mlxsw_sp_nexthop *nh; + struct mlxsw_sp_rif *r; + + if (mlxsw_sp->router.aborted) + return; + + key.fib_nh = fib_nh; + nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key); + if (WARN_ON_ONCE(!nh)) + return; + + r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, fib_nh->nh_dev); + if (!r) + return; + + switch (event) { + case FIB_EVENT_NH_ADD: + mlxsw_sp_nexthop_rif_init(nh, r); + mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh); + break; + case FIB_EVENT_NH_DEL: + mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh); + mlxsw_sp_nexthop_rif_fini(nh); + break; + } + + mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp); +} + +static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_rif *r) +{ + struct mlxsw_sp_nexthop *nh, *tmp; - neigh_release(neigh_entry->key.n); + list_for_each_entry_safe(nh, tmp, &r->nexthop_list, rif_list_node) { + mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh); + mlxsw_sp_nexthop_rif_fini(nh); + mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp); + } } static struct mlxsw_sp_nexthop_group * @@ -1426,7 +1604,9 @@ mlxsw_sp_nexthop_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi) if (!nh_grp) return ERR_PTR(-ENOMEM); INIT_LIST_HEAD(&nh_grp->fib_list); + nh_grp->gateway = fi->fib_nh->nh_scope == RT_SCOPE_LINK; nh_grp->count = fi->fib_nhs; + nh_grp->key.fi = fi; for (i = 0; i < nh_grp->count; i++) { nh = &nh_grp->nexthops[i]; fib_nh = &fi->fib_nh[i]; @@ -1434,13 +1614,18 @@ mlxsw_sp_nexthop_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi) if (err) goto err_nexthop_init; } - list_add_tail(&nh_grp->list, &mlxsw_sp->router.nexthop_group_list); + err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp); + if (err) + goto err_nexthop_group_insert; mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp); return nh_grp; +err_nexthop_group_insert: err_nexthop_init: - for (i--; i >= 0; i--) + for (i--; i >= 0; i--) { + nh = &nh_grp->nexthops[i]; mlxsw_sp_nexthop_fini(mlxsw_sp, nh); + } kfree(nh_grp); return ERR_PTR(err); } @@ -1452,7 +1637,7 @@ mlxsw_sp_nexthop_group_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_nexthop *nh; int i; - list_del(&nh_grp->list); + mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp); for (i = 0; i < nh_grp->count; i++) { nh = &nh_grp->nexthops[i]; mlxsw_sp_nexthop_fini(mlxsw_sp, nh); @@ -1462,59 +1647,15 @@ mlxsw_sp_nexthop_group_destroy(struct mlxsw_sp *mlxsw_sp, kfree(nh_grp); } -static bool mlxsw_sp_nexthop_match(struct mlxsw_sp_nexthop *nh, - struct fib_info *fi) -{ - int i; - - for (i = 0; i < fi->fib_nhs; i++) { - struct fib_nh *fib_nh = &fi->fib_nh[i]; - struct neighbour *n = nh->neigh_entry->key.n; - - if (memcmp(n->primary_key, &fib_nh->nh_gw, - sizeof(fib_nh->nh_gw)) == 0 && - n->dev == fib_nh->nh_dev) - return true; - } - return false; -} - -static bool mlxsw_sp_nexthop_group_match(struct mlxsw_sp_nexthop_group *nh_grp, - struct fib_info *fi) -{ - int i; - - if (nh_grp->count != fi->fib_nhs) - return false; - for (i = 0; i < nh_grp->count; i++) { - struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i]; - - if (!mlxsw_sp_nexthop_match(nh, fi)) - return false; - } - return true; -} - -static struct mlxsw_sp_nexthop_group * -mlxsw_sp_nexthop_group_find(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi) -{ - struct mlxsw_sp_nexthop_group *nh_grp; - - list_for_each_entry(nh_grp, &mlxsw_sp->router.nexthop_group_list, - list) { - if (mlxsw_sp_nexthop_group_match(nh_grp, fi)) - return nh_grp; - } - return NULL; -} - static int mlxsw_sp_nexthop_group_get(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_entry *fib_entry, struct fib_info *fi) { + struct mlxsw_sp_nexthop_group_key key; struct mlxsw_sp_nexthop_group *nh_grp; - nh_grp = mlxsw_sp_nexthop_group_find(mlxsw_sp, fi); + key.fi = fi; + nh_grp = mlxsw_sp_nexthop_group_lookup(mlxsw_sp, key); if (!nh_grp) { nh_grp = mlxsw_sp_nexthop_group_create(mlxsw_sp, fi); if (IS_ERR(nh_grp)) @@ -1536,13 +1677,82 @@ static void mlxsw_sp_nexthop_group_put(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_nexthop_group_destroy(mlxsw_sp, nh_grp); } +static bool +mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry) +{ + struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group; + + if (fib_entry->params.tos) + return false; + + switch (fib_entry->type) { + case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE: + return !!nh_group->adj_index_valid; + case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL: + return !!nh_group->nh_rif; + default: + return false; + } +} + +static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry) +{ + fib_entry->offloaded = true; + + switch (fib_entry->fib_node->vr->proto) { + case MLXSW_SP_L3_PROTO_IPV4: + fib_info_offload_inc(fib_entry->nh_group->key.fi); + break; + case MLXSW_SP_L3_PROTO_IPV6: + WARN_ON_ONCE(1); + } +} + +static void +mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry) +{ + switch (fib_entry->fib_node->vr->proto) { + case MLXSW_SP_L3_PROTO_IPV4: + fib_info_offload_dec(fib_entry->nh_group->key.fi); + break; + case MLXSW_SP_L3_PROTO_IPV6: + WARN_ON_ONCE(1); + } + + fib_entry->offloaded = false; +} + +static void +mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry, + enum mlxsw_reg_ralue_op op, int err) +{ + switch (op) { + case MLXSW_REG_RALUE_OP_WRITE_DELETE: + if (!fib_entry->offloaded) + return; + return mlxsw_sp_fib_entry_offload_unset(fib_entry); + case MLXSW_REG_RALUE_OP_WRITE_WRITE: + if (err) + return; + if (mlxsw_sp_fib_entry_should_offload(fib_entry) && + !fib_entry->offloaded) + mlxsw_sp_fib_entry_offload_set(fib_entry); + else if (!mlxsw_sp_fib_entry_should_offload(fib_entry) && + fib_entry->offloaded) + mlxsw_sp_fib_entry_offload_unset(fib_entry); + return; + default: + return; + } +} + static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_entry *fib_entry, enum mlxsw_reg_ralue_op op) { char ralue_pl[MLXSW_REG_RALUE_LEN]; - u32 *p_dip = (u32 *) fib_entry->key.addr; - struct mlxsw_sp_vr *vr = fib_entry->vr; + u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr; + struct mlxsw_sp_vr *vr = fib_entry->fib_node->vr; enum mlxsw_reg_ralue_trap_action trap_action; u16 trap_id = 0; u32 adjacency_index = 0; @@ -1552,7 +1762,7 @@ static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp, * with provided ECMP size. Otherwise, setup trap and pass * traffic to kernel. */ - if (fib_entry->nh_group->adj_index_valid) { + if (mlxsw_sp_fib_entry_should_offload(fib_entry)) { trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP; adjacency_index = fib_entry->nh_group->adj_index; ecmp_size = fib_entry->nh_group->ecmp_size; @@ -1563,7 +1773,8 @@ static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp, mlxsw_reg_ralue_pack4(ralue_pl, (enum mlxsw_reg_ralxx_protocol) vr->proto, op, - vr->id, fib_entry->key.prefix_len, *p_dip); + vr->id, fib_entry->fib_node->key.prefix_len, + *p_dip); mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id, adjacency_index, ecmp_size); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); @@ -1573,16 +1784,27 @@ static int mlxsw_sp_fib_entry_op4_local(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_entry *fib_entry, enum mlxsw_reg_ralue_op op) { + struct mlxsw_sp_rif *r = fib_entry->nh_group->nh_rif; + enum mlxsw_reg_ralue_trap_action trap_action; char ralue_pl[MLXSW_REG_RALUE_LEN]; - u32 *p_dip = (u32 *) fib_entry->key.addr; - struct mlxsw_sp_vr *vr = fib_entry->vr; + u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr; + struct mlxsw_sp_vr *vr = fib_entry->fib_node->vr; + u16 trap_id = 0; + u16 rif = 0; + + if (mlxsw_sp_fib_entry_should_offload(fib_entry)) { + trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP; + rif = r->rif; + } else { + trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP; + trap_id = MLXSW_TRAP_ID_RTR_INGRESS0; + } mlxsw_reg_ralue_pack4(ralue_pl, (enum mlxsw_reg_ralxx_protocol) vr->proto, op, - vr->id, fib_entry->key.prefix_len, *p_dip); - mlxsw_reg_ralue_act_local_pack(ralue_pl, - MLXSW_REG_RALUE_TRAP_ACTION_NOP, 0, - fib_entry->rif); + vr->id, fib_entry->fib_node->key.prefix_len, + *p_dip); + mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id, rif); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); } @@ -1591,12 +1813,13 @@ static int mlxsw_sp_fib_entry_op4_trap(struct mlxsw_sp *mlxsw_sp, enum mlxsw_reg_ralue_op op) { char ralue_pl[MLXSW_REG_RALUE_LEN]; - u32 *p_dip = (u32 *) fib_entry->key.addr; - struct mlxsw_sp_vr *vr = fib_entry->vr; + u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr; + struct mlxsw_sp_vr *vr = fib_entry->fib_node->vr; mlxsw_reg_ralue_pack4(ralue_pl, (enum mlxsw_reg_ralxx_protocol) vr->proto, op, - vr->id, fib_entry->key.prefix_len, *p_dip); + vr->id, fib_entry->fib_node->key.prefix_len, + *p_dip); mlxsw_reg_ralue_act_ip2me_pack(ralue_pl); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); } @@ -1620,13 +1843,17 @@ static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_entry *fib_entry, enum mlxsw_reg_ralue_op op) { - switch (fib_entry->vr->proto) { + int err = -EINVAL; + + switch (fib_entry->fib_node->vr->proto) { case MLXSW_SP_L3_PROTO_IPV4: - return mlxsw_sp_fib_entry_op4(mlxsw_sp, fib_entry, op); + err = mlxsw_sp_fib_entry_op4(mlxsw_sp, fib_entry, op); + break; case MLXSW_SP_L3_PROTO_IPV6: - return -EINVAL; + return err; } - return -EINVAL; + mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, err); + return err; } static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp, @@ -1644,14 +1871,11 @@ static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp, } static int -mlxsw_sp_router_fib4_entry_init(struct mlxsw_sp *mlxsw_sp, - const struct fib_entry_notifier_info *fen_info, - struct mlxsw_sp_fib_entry *fib_entry) +mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp, + const struct fib_entry_notifier_info *fen_info, + struct mlxsw_sp_fib_entry *fib_entry) { struct fib_info *fi = fen_info->fi; - struct mlxsw_sp_rif *r = NULL; - int nhsel; - int err; if (fen_info->type == RTN_LOCAL || fen_info->type == RTN_BROADCAST) { fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP; @@ -1659,58 +1883,177 @@ mlxsw_sp_router_fib4_entry_init(struct mlxsw_sp *mlxsw_sp, } if (fen_info->type != RTN_UNICAST) return -EINVAL; + if (fi->fib_nh->nh_scope != RT_SCOPE_LINK) + fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL; + else + fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE; + return 0; +} - for (nhsel = 0; nhsel < fi->fib_nhs; nhsel++) { - const struct fib_nh *nh = &fi->fib_nh[nhsel]; +static struct mlxsw_sp_fib_entry * +mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_node *fib_node, + const struct fib_entry_notifier_info *fen_info) +{ + struct mlxsw_sp_fib_entry *fib_entry; + int err; - if (!nh->nh_dev) - continue; - r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, nh->nh_dev); - if (!r) { - /* In case router interface is not found for - * at least one of the nexthops, that means - * the nexthop points to some device unrelated - * to us. Set trap and pass the packets for - * this prefix to kernel. - */ - break; - } + fib_entry = kzalloc(sizeof(*fib_entry), GFP_KERNEL); + if (!fib_entry) { + err = -ENOMEM; + goto err_fib_entry_alloc; } - if (!r) { - fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP; - return 0; - } + err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry); + if (err) + goto err_fib4_entry_type_set; - if (fi->fib_scope != RT_SCOPE_UNIVERSE) { - fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL; - fib_entry->rif = r->rif; - } else { - fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE; - err = mlxsw_sp_nexthop_group_get(mlxsw_sp, fib_entry, fi); - if (err) - return err; - } - fib_info_offload_inc(fen_info->fi); - return 0; + err = mlxsw_sp_nexthop_group_get(mlxsw_sp, fib_entry, fen_info->fi); + if (err) + goto err_nexthop_group_get; + + fib_entry->params.prio = fen_info->fi->fib_priority; + fib_entry->params.tb_id = fen_info->tb_id; + fib_entry->params.type = fen_info->type; + fib_entry->params.tos = fen_info->tos; + + fib_entry->fib_node = fib_node; + + return fib_entry; + +err_nexthop_group_get: +err_fib4_entry_type_set: + kfree(fib_entry); +err_fib_entry_alloc: + return ERR_PTR(err); } -static void -mlxsw_sp_router_fib4_entry_fini(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_fib_entry *fib_entry) +static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry) { - if (fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP) - fib_info_offload_dec(fib_entry->fi); - if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_REMOTE) - mlxsw_sp_nexthop_group_put(mlxsw_sp, fib_entry); + mlxsw_sp_nexthop_group_put(mlxsw_sp, fib_entry); + kfree(fib_entry); } +static struct mlxsw_sp_fib_node * +mlxsw_sp_fib4_node_get(struct mlxsw_sp *mlxsw_sp, + const struct fib_entry_notifier_info *fen_info); + static struct mlxsw_sp_fib_entry * -mlxsw_sp_fib_entry_get(struct mlxsw_sp *mlxsw_sp, - const struct fib_entry_notifier_info *fen_info) +mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp, + const struct fib_entry_notifier_info *fen_info) { struct mlxsw_sp_fib_entry *fib_entry; - struct fib_info *fi = fen_info->fi; + struct mlxsw_sp_fib_node *fib_node; + + fib_node = mlxsw_sp_fib4_node_get(mlxsw_sp, fen_info); + if (IS_ERR(fib_node)) + return NULL; + + list_for_each_entry(fib_entry, &fib_node->entry_list, list) { + if (fib_entry->params.tb_id == fen_info->tb_id && + fib_entry->params.tos == fen_info->tos && + fib_entry->params.type == fen_info->type && + fib_entry->nh_group->key.fi == fen_info->fi) { + return fib_entry; + } + } + + return NULL; +} + +static const struct rhashtable_params mlxsw_sp_fib_ht_params = { + .key_offset = offsetof(struct mlxsw_sp_fib_node, key), + .head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node), + .key_len = sizeof(struct mlxsw_sp_fib_key), + .automatic_shrinking = true, +}; + +static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib, + struct mlxsw_sp_fib_node *fib_node) +{ + return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node, + mlxsw_sp_fib_ht_params); +} + +static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib, + struct mlxsw_sp_fib_node *fib_node) +{ + rhashtable_remove_fast(&fib->ht, &fib_node->ht_node, + mlxsw_sp_fib_ht_params); +} + +static struct mlxsw_sp_fib_node * +mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr, + size_t addr_len, unsigned char prefix_len) +{ + struct mlxsw_sp_fib_key key; + + memset(&key, 0, sizeof(key)); + memcpy(key.addr, addr, addr_len); + key.prefix_len = prefix_len; + return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params); +} + +static struct mlxsw_sp_fib_node * +mlxsw_sp_fib_node_create(struct mlxsw_sp_vr *vr, const void *addr, + size_t addr_len, unsigned char prefix_len) +{ + struct mlxsw_sp_fib_node *fib_node; + + fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL); + if (!fib_node) + return NULL; + + INIT_LIST_HEAD(&fib_node->entry_list); + list_add(&fib_node->list, &vr->fib->node_list); + memcpy(fib_node->key.addr, addr, addr_len); + fib_node->key.prefix_len = prefix_len; + mlxsw_sp_fib_node_insert(vr->fib, fib_node); + fib_node->vr = vr; + + return fib_node; +} + +static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node) +{ + mlxsw_sp_fib_node_remove(fib_node->vr->fib, fib_node); + list_del(&fib_node->list); + WARN_ON(!list_empty(&fib_node->entry_list)); + kfree(fib_node); +} + +static bool +mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node, + const struct mlxsw_sp_fib_entry *fib_entry) +{ + return list_first_entry(&fib_node->entry_list, + struct mlxsw_sp_fib_entry, list) == fib_entry; +} + +static void mlxsw_sp_fib_node_prefix_inc(struct mlxsw_sp_fib_node *fib_node) +{ + unsigned char prefix_len = fib_node->key.prefix_len; + struct mlxsw_sp_fib *fib = fib_node->vr->fib; + + if (fib->prefix_ref_count[prefix_len]++ == 0) + mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len); +} + +static void mlxsw_sp_fib_node_prefix_dec(struct mlxsw_sp_fib_node *fib_node) +{ + unsigned char prefix_len = fib_node->key.prefix_len; + struct mlxsw_sp_fib *fib = fib_node->vr->fib; + + if (--fib->prefix_ref_count[prefix_len] == 0) + mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len); +} + +static struct mlxsw_sp_fib_node * +mlxsw_sp_fib4_node_get(struct mlxsw_sp *mlxsw_sp, + const struct fib_entry_notifier_info *fen_info) +{ + struct mlxsw_sp_fib_node *fib_node; struct mlxsw_sp_vr *vr; int err; @@ -1719,113 +2062,258 @@ mlxsw_sp_fib_entry_get(struct mlxsw_sp *mlxsw_sp, if (IS_ERR(vr)) return ERR_CAST(vr); - fib_entry = mlxsw_sp_fib_entry_lookup(vr->fib, &fen_info->dst, - sizeof(fen_info->dst), - fen_info->dst_len, fi->fib_dev); - if (fib_entry) { - /* Already exists, just take a reference */ - fib_entry->ref_count++; - return fib_entry; - } - fib_entry = mlxsw_sp_fib_entry_create(vr->fib, &fen_info->dst, - sizeof(fen_info->dst), - fen_info->dst_len, fi->fib_dev); - if (!fib_entry) { + fib_node = mlxsw_sp_fib_node_lookup(vr->fib, &fen_info->dst, + sizeof(fen_info->dst), + fen_info->dst_len); + if (fib_node) + return fib_node; + + fib_node = mlxsw_sp_fib_node_create(vr, &fen_info->dst, + sizeof(fen_info->dst), + fen_info->dst_len); + if (!fib_node) { err = -ENOMEM; - goto err_fib_entry_create; + goto err_fib_node_create; } - fib_entry->vr = vr; - fib_entry->fi = fi; - fib_entry->ref_count = 1; - err = mlxsw_sp_router_fib4_entry_init(mlxsw_sp, fen_info, fib_entry); - if (err) - goto err_fib4_entry_init; - - return fib_entry; + return fib_node; -err_fib4_entry_init: - mlxsw_sp_fib_entry_destroy(fib_entry); -err_fib_entry_create: +err_fib_node_create: mlxsw_sp_vr_put(mlxsw_sp, vr); - return ERR_PTR(err); } +static void mlxsw_sp_fib4_node_put(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_node *fib_node) +{ + struct mlxsw_sp_vr *vr = fib_node->vr; + + if (!list_empty(&fib_node->entry_list)) + return; + mlxsw_sp_fib_node_destroy(fib_node); + mlxsw_sp_vr_put(mlxsw_sp, vr); +} + static struct mlxsw_sp_fib_entry * -mlxsw_sp_fib_entry_find(struct mlxsw_sp *mlxsw_sp, - const struct fib_entry_notifier_info *fen_info) +mlxsw_sp_fib4_node_entry_find(const struct mlxsw_sp_fib_node *fib_node, + const struct mlxsw_sp_fib_entry_params *params) { - struct mlxsw_sp_vr *vr; + struct mlxsw_sp_fib_entry *fib_entry; - vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id, - MLXSW_SP_L3_PROTO_IPV4); - if (!vr) - return NULL; + list_for_each_entry(fib_entry, &fib_node->entry_list, list) { + if (fib_entry->params.tb_id > params->tb_id) + continue; + if (fib_entry->params.tb_id != params->tb_id) + break; + if (fib_entry->params.tos > params->tos) + continue; + if (fib_entry->params.prio >= params->prio || + fib_entry->params.tos < params->tos) + return fib_entry; + } - return mlxsw_sp_fib_entry_lookup(vr->fib, &fen_info->dst, - sizeof(fen_info->dst), - fen_info->dst_len, - fen_info->fi->fib_dev); + return NULL; } -static void mlxsw_sp_fib_entry_put(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_fib_entry *fib_entry) +static int mlxsw_sp_fib4_node_list_append(struct mlxsw_sp_fib_entry *fib_entry, + struct mlxsw_sp_fib_entry *new_entry) { - struct mlxsw_sp_vr *vr = fib_entry->vr; + struct mlxsw_sp_fib_node *fib_node; + + if (WARN_ON(!fib_entry)) + return -EINVAL; - if (--fib_entry->ref_count == 0) { - mlxsw_sp_router_fib4_entry_fini(mlxsw_sp, fib_entry); - mlxsw_sp_fib_entry_destroy(fib_entry); + fib_node = fib_entry->fib_node; + list_for_each_entry_from(fib_entry, &fib_node->entry_list, list) { + if (fib_entry->params.tb_id != new_entry->params.tb_id || + fib_entry->params.tos != new_entry->params.tos || + fib_entry->params.prio != new_entry->params.prio) + break; } - mlxsw_sp_vr_put(mlxsw_sp, vr); + + list_add_tail(&new_entry->list, &fib_entry->list); + return 0; } -static void mlxsw_sp_fib_entry_put_all(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_fib_entry *fib_entry) +static int +mlxsw_sp_fib4_node_list_insert(struct mlxsw_sp_fib_node *fib_node, + struct mlxsw_sp_fib_entry *new_entry, + bool replace, bool append) { - unsigned int last_ref_count; + struct mlxsw_sp_fib_entry *fib_entry; - do { - last_ref_count = fib_entry->ref_count; - mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry); - } while (last_ref_count != 1); + fib_entry = mlxsw_sp_fib4_node_entry_find(fib_node, &new_entry->params); + + if (append) + return mlxsw_sp_fib4_node_list_append(fib_entry, new_entry); + if (replace && WARN_ON(!fib_entry)) + return -EINVAL; + + /* Insert new entry before replaced one, so that we can later + * remove the second. + */ + if (fib_entry) { + list_add_tail(&new_entry->list, &fib_entry->list); + } else { + struct mlxsw_sp_fib_entry *last; + + list_for_each_entry(last, &fib_node->entry_list, list) { + if (new_entry->params.tb_id > last->params.tb_id) + break; + fib_entry = last; + } + + if (fib_entry) + list_add(&new_entry->list, &fib_entry->list); + else + list_add(&new_entry->list, &fib_node->entry_list); + } + + return 0; +} + +static void +mlxsw_sp_fib4_node_list_remove(struct mlxsw_sp_fib_entry *fib_entry) +{ + list_del(&fib_entry->list); +} + +static int +mlxsw_sp_fib4_node_entry_add(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_fib_node *fib_node, + struct mlxsw_sp_fib_entry *fib_entry) +{ + if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry)) + return 0; + + /* To prevent packet loss, overwrite the previously offloaded + * entry. + */ + if (!list_is_singular(&fib_node->entry_list)) { + enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE; + struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list); + + mlxsw_sp_fib_entry_offload_refresh(n, op, 0); + } + + return mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry); } -static int mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp, - struct fib_entry_notifier_info *fen_info) +static void +mlxsw_sp_fib4_node_entry_del(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_fib_node *fib_node, + struct mlxsw_sp_fib_entry *fib_entry) +{ + if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry)) + return; + + /* Promote the next entry by overwriting the deleted entry */ + if (!list_is_singular(&fib_node->entry_list)) { + struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list); + enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE; + + mlxsw_sp_fib_entry_update(mlxsw_sp, n); + mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0); + return; + } + + mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry); +} + +static int mlxsw_sp_fib4_node_entry_link(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry, + bool replace, bool append) +{ + struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node; + int err; + + err = mlxsw_sp_fib4_node_list_insert(fib_node, fib_entry, replace, + append); + if (err) + return err; + + err = mlxsw_sp_fib4_node_entry_add(mlxsw_sp, fib_node, fib_entry); + if (err) + goto err_fib4_node_entry_add; + + mlxsw_sp_fib_node_prefix_inc(fib_node); + + return 0; + +err_fib4_node_entry_add: + mlxsw_sp_fib4_node_list_remove(fib_entry); + return err; +} + +static void +mlxsw_sp_fib4_node_entry_unlink(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry) +{ + struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node; + + mlxsw_sp_fib_node_prefix_dec(fib_node); + mlxsw_sp_fib4_node_entry_del(mlxsw_sp, fib_node, fib_entry); + mlxsw_sp_fib4_node_list_remove(fib_entry); +} + +static void mlxsw_sp_fib4_entry_replace(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry, + bool replace) +{ + struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node; + struct mlxsw_sp_fib_entry *replaced; + + if (!replace) + return; + + /* We inserted the new entry before replaced one */ + replaced = list_next_entry(fib_entry, list); + + mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, replaced); + mlxsw_sp_fib4_entry_destroy(mlxsw_sp, replaced); + mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node); +} + +static int +mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp, + const struct fib_entry_notifier_info *fen_info, + bool replace, bool append) { struct mlxsw_sp_fib_entry *fib_entry; - struct mlxsw_sp_vr *vr; + struct mlxsw_sp_fib_node *fib_node; int err; if (mlxsw_sp->router.aborted) return 0; - fib_entry = mlxsw_sp_fib_entry_get(mlxsw_sp, fen_info); - if (IS_ERR(fib_entry)) { - dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB4 entry being added.\n"); - return PTR_ERR(fib_entry); + fib_node = mlxsw_sp_fib4_node_get(mlxsw_sp, fen_info); + if (IS_ERR(fib_node)) { + dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n"); + return PTR_ERR(fib_node); } - if (fib_entry->ref_count != 1) - return 0; + fib_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info); + if (IS_ERR(fib_entry)) { + dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n"); + err = PTR_ERR(fib_entry); + goto err_fib4_entry_create; + } - vr = fib_entry->vr; - err = mlxsw_sp_fib_entry_insert(vr->fib, fib_entry); + err = mlxsw_sp_fib4_node_entry_link(mlxsw_sp, fib_entry, replace, + append); if (err) { - dev_warn(mlxsw_sp->bus_info->dev, "Failed to insert FIB4 entry being added.\n"); - goto err_fib_entry_insert; + dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n"); + goto err_fib4_node_entry_link; } - err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry); - if (err) - goto err_fib_entry_add; + + mlxsw_sp_fib4_entry_replace(mlxsw_sp, fib_entry, replace); + return 0; -err_fib_entry_add: - mlxsw_sp_fib_entry_remove(vr->fib, fib_entry); -err_fib_entry_insert: - mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry); +err_fib4_node_entry_link: + mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib_entry); +err_fib4_entry_create: + mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node); return err; } @@ -1833,20 +2321,19 @@ static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp, struct fib_entry_notifier_info *fen_info) { struct mlxsw_sp_fib_entry *fib_entry; + struct mlxsw_sp_fib_node *fib_node; if (mlxsw_sp->router.aborted) return; - fib_entry = mlxsw_sp_fib_entry_find(mlxsw_sp, fen_info); - if (!fib_entry) + fib_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info); + if (WARN_ON(!fib_entry)) return; + fib_node = fib_entry->fib_node; - if (fib_entry->ref_count == 1) { - mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry); - mlxsw_sp_fib_entry_remove(fib_entry->vr->fib, fib_entry); - } - - mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry); + mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib_entry); + mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib_entry); + mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node); } static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp) @@ -1880,10 +2367,42 @@ static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp) return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); } +static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_node *fib_node) +{ + struct mlxsw_sp_fib_entry *fib_entry, *tmp; + + list_for_each_entry_safe(fib_entry, tmp, &fib_node->entry_list, list) { + bool do_break = &tmp->list == &fib_node->entry_list; + + mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib_entry); + mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib_entry); + mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node); + /* Break when entry list is empty and node was freed. + * Otherwise, we'll access freed memory in the next + * iteration. + */ + if (do_break) + break; + } +} + +static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_node *fib_node) +{ + switch (fib_node->vr->proto) { + case MLXSW_SP_L3_PROTO_IPV4: + mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node); + break; + case MLXSW_SP_L3_PROTO_IPV6: + WARN_ON_ONCE(1); + break; + } +} + static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp) { - struct mlxsw_sp_fib_entry *fib_entry; - struct mlxsw_sp_fib_entry *tmp; + struct mlxsw_sp_fib_node *fib_node, *tmp; struct mlxsw_sp_vr *vr; int i; @@ -1893,14 +2412,11 @@ static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp) if (!vr->used) continue; - list_for_each_entry_safe(fib_entry, tmp, - &vr->fib->entry_list, list) { - bool do_break = &tmp->list == &vr->fib->entry_list; + list_for_each_entry_safe(fib_node, tmp, &vr->fib->node_list, + list) { + bool do_break = &tmp->list == &vr->fib->node_list; - mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry); - mlxsw_sp_fib_entry_remove(fib_entry->vr->fib, - fib_entry); - mlxsw_sp_fib_entry_put_all(mlxsw_sp, fib_entry); + mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node); if (do_break) break; } @@ -1921,6 +2437,28 @@ static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp) dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n"); } +static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif) +{ + char ritr_pl[MLXSW_REG_RITR_LEN]; + int err; + + mlxsw_reg_ritr_rif_pack(ritr_pl, rif); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); + if (WARN_ON_ONCE(err)) + return err; + + mlxsw_reg_ritr_enable_set(ritr_pl, false); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); +} + +void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_rif *r) +{ + mlxsw_sp_router_rif_disable(mlxsw_sp, r->rif); + mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, r); + mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, r); +} + static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp) { char rgcr_pl[MLXSW_REG_RGCR_LEN]; @@ -1964,8 +2502,11 @@ static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp) } struct mlxsw_sp_fib_event_work { - struct delayed_work dw; - struct fib_entry_notifier_info fen_info; + struct work_struct work; + union { + struct fib_entry_notifier_info fen_info; + struct fib_nh_notifier_info fnh_info; + }; struct mlxsw_sp *mlxsw_sp; unsigned long event; }; @@ -1973,15 +2514,21 @@ struct mlxsw_sp_fib_event_work { static void mlxsw_sp_router_fib_event_work(struct work_struct *work) { struct mlxsw_sp_fib_event_work *fib_work = - container_of(work, struct mlxsw_sp_fib_event_work, dw.work); + container_of(work, struct mlxsw_sp_fib_event_work, work); struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp; + bool replace, append; int err; /* Protect internal structures from changes */ rtnl_lock(); switch (fib_work->event) { + case FIB_EVENT_ENTRY_REPLACE: /* fall through */ + case FIB_EVENT_ENTRY_APPEND: /* fall through */ case FIB_EVENT_ENTRY_ADD: - err = mlxsw_sp_router_fib4_add(mlxsw_sp, &fib_work->fen_info); + replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE; + append = fib_work->event == FIB_EVENT_ENTRY_APPEND; + err = mlxsw_sp_router_fib4_add(mlxsw_sp, &fib_work->fen_info, + replace, append); if (err) mlxsw_sp_router_fib4_abort(mlxsw_sp); fib_info_put(fib_work->fen_info.fi); @@ -1994,6 +2541,12 @@ static void mlxsw_sp_router_fib_event_work(struct work_struct *work) case FIB_EVENT_RULE_DEL: mlxsw_sp_router_fib4_abort(mlxsw_sp); break; + case FIB_EVENT_NH_ADD: /* fall through */ + case FIB_EVENT_NH_DEL: + mlxsw_sp_nexthop_event(mlxsw_sp, fib_work->event, + fib_work->fnh_info.fib_nh); + fib_info_put(fib_work->fnh_info.fib_nh->nh_parent); + break; } rtnl_unlock(); kfree(fib_work); @@ -2014,11 +2567,13 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb, if (WARN_ON(!fib_work)) return NOTIFY_BAD; - INIT_DELAYED_WORK(&fib_work->dw, mlxsw_sp_router_fib_event_work); + INIT_WORK(&fib_work->work, mlxsw_sp_router_fib_event_work); fib_work->mlxsw_sp = mlxsw_sp; fib_work->event = event; switch (event) { + case FIB_EVENT_ENTRY_REPLACE: /* fall through */ + case FIB_EVENT_ENTRY_APPEND: /* fall through */ case FIB_EVENT_ENTRY_ADD: /* fall through */ case FIB_EVENT_ENTRY_DEL: memcpy(&fib_work->fen_info, ptr, sizeof(fib_work->fen_info)); @@ -2027,9 +2582,14 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb, */ fib_info_hold(fib_work->fen_info.fi); break; + case FIB_EVENT_NH_ADD: /* fall through */ + case FIB_EVENT_NH_DEL: + memcpy(&fib_work->fnh_info, ptr, sizeof(fib_work->fnh_info)); + fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent); + break; } - mlxsw_core_schedule_odw(&fib_work->dw, 0); + mlxsw_core_schedule_work(&fib_work->work); return NOTIFY_DONE; } @@ -2051,11 +2611,20 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp) int err; INIT_LIST_HEAD(&mlxsw_sp->router.nexthop_neighs_list); - INIT_LIST_HEAD(&mlxsw_sp->router.nexthop_group_list); err = __mlxsw_sp_router_init(mlxsw_sp); if (err) return err; + err = rhashtable_init(&mlxsw_sp->router.nexthop_ht, + &mlxsw_sp_nexthop_ht_params); + if (err) + goto err_nexthop_ht_init; + + err = rhashtable_init(&mlxsw_sp->router.nexthop_group_ht, + &mlxsw_sp_nexthop_group_ht_params); + if (err) + goto err_nexthop_group_ht_init; + mlxsw_sp_lpm_init(mlxsw_sp); err = mlxsw_sp_vrs_init(mlxsw_sp); if (err) @@ -2078,6 +2647,10 @@ err_register_fib_notifier: err_neigh_init: mlxsw_sp_vrs_fini(mlxsw_sp); err_vrs_init: + rhashtable_destroy(&mlxsw_sp->router.nexthop_group_ht); +err_nexthop_group_ht_init: + rhashtable_destroy(&mlxsw_sp->router.nexthop_ht); +err_nexthop_ht_init: __mlxsw_sp_router_fini(mlxsw_sp); return err; } @@ -2087,5 +2660,7 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp) unregister_fib_notifier(&mlxsw_sp->fib_nb); mlxsw_sp_neigh_fini(mlxsw_sp); mlxsw_sp_vrs_fini(mlxsw_sp); + rhashtable_destroy(&mlxsw_sp->router.nexthop_group_ht); + rhashtable_destroy(&mlxsw_sp->router.nexthop_ht); __mlxsw_sp_router_fini(mlxsw_sp); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index b87ba7d36bc4..598727d578c1 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -71,8 +71,21 @@ mlxsw_sp_port_orig_get(struct net_device *dev, struct mlxsw_sp_port *mlxsw_sp_port) { struct mlxsw_sp_port *mlxsw_sp_vport; + struct mlxsw_sp_fid *fid; u16 vid; + if (netif_is_bridge_master(dev)) { + fid = mlxsw_sp_vfid_find(mlxsw_sp_port->mlxsw_sp, + dev); + if (fid) { + mlxsw_sp_vport = + mlxsw_sp_port_vport_find_by_fid(mlxsw_sp_port, + fid->fid); + WARN_ON(!mlxsw_sp_vport); + return mlxsw_sp_vport; + } + } + if (!is_vlan_dev(dev)) return mlxsw_sp_port; @@ -166,9 +179,10 @@ static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port, return mlxsw_sp_port_stp_state_set(mlxsw_sp_port, state); } -static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port, - u16 idx_begin, u16 idx_end, bool uc_set, - bool bm_set) +static int __mlxsw_sp_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port, + u16 idx_begin, u16 idx_end, + enum mlxsw_sp_flood_table table, + bool set) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; u16 local_port = mlxsw_sp_port->local_port; @@ -186,31 +200,48 @@ static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port, if (!sftr_pl) return -ENOMEM; - mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin, - table_type, range, local_port, uc_set); + mlxsw_reg_sftr_pack(sftr_pl, table, idx_begin, + table_type, range, local_port, set); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); + + kfree(sftr_pl); + return err; +} + +static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port, + u16 idx_begin, u16 idx_end, bool uc_set, + bool bc_set, bool mc_set) +{ + int err; + + err = __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end, + MLXSW_SP_FLOOD_TABLE_UC, uc_set); if (err) - goto buffer_out; + return err; - mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, idx_begin, - table_type, range, local_port, bm_set); - err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); + err = __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end, + MLXSW_SP_FLOOD_TABLE_BC, bc_set); if (err) goto err_flood_bm_set; - goto buffer_out; + err = __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end, + MLXSW_SP_FLOOD_TABLE_MC, mc_set); + if (err) + goto err_flood_mc_set; + return 0; +err_flood_mc_set: + __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end, + MLXSW_SP_FLOOD_TABLE_BC, !bc_set); err_flood_bm_set: - mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin, - table_type, range, local_port, !uc_set); - mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); -buffer_out: - kfree(sftr_pl); + __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end, + MLXSW_SP_FLOOD_TABLE_UC, !uc_set); return err; } -static int mlxsw_sp_port_uc_flood_set(struct mlxsw_sp_port *mlxsw_sp_port, - bool set) +static int mlxsw_sp_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port, + enum mlxsw_sp_flood_table table, + bool set) { struct net_device *dev = mlxsw_sp_port->dev; u16 vid, last_visited_vid; @@ -220,13 +251,13 @@ static int mlxsw_sp_port_uc_flood_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid = mlxsw_sp_vport_fid_get(mlxsw_sp_port)->fid; u16 vfid = mlxsw_sp_fid_to_vfid(fid); - return __mlxsw_sp_port_flood_set(mlxsw_sp_port, vfid, vfid, - set, true); + return __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, vfid, + vfid, table, set); } for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) { - err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, vid, set, - true); + err = __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, vid, vid, + table, set); if (err) { last_visited_vid = vid; goto err_port_flood_set; @@ -237,21 +268,53 @@ static int mlxsw_sp_port_uc_flood_set(struct mlxsw_sp_port *mlxsw_sp_port, err_port_flood_set: for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid) - __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, vid, !set, true); + __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, vid, vid, table, + !set); netdev_err(dev, "Failed to configure unicast flooding\n"); return err; } +static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port, + struct switchdev_trans *trans, + bool mc_disabled) +{ + int set; + int err = 0; + + if (switchdev_trans_ph_prepare(trans)) + return 0; + + if (mlxsw_sp_port->mc_router != mlxsw_sp_port->mc_flood) { + set = mc_disabled ? + mlxsw_sp_port->mc_flood : mlxsw_sp_port->mc_router; + err = mlxsw_sp_port_flood_table_set(mlxsw_sp_port, + MLXSW_SP_FLOOD_TABLE_MC, + set); + } + + if (!err) + mlxsw_sp_port->mc_disabled = mc_disabled; + + return err; +} + int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid, bool set) { + bool mc_set = set; u16 vfid; /* In case of vFIDs, index into the flooding table is relative to * the start of the vFIDs range. */ vfid = mlxsw_sp_fid_to_vfid(fid); - return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set, set); + + if (set) + mc_set = mlxsw_sp_vport->mc_disabled ? + mlxsw_sp_vport->mc_flood : mlxsw_sp_vport->mc_router; + + return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set, set, + mc_set); } static int mlxsw_sp_port_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, @@ -297,8 +360,9 @@ static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port, return 0; if ((uc_flood ^ brport_flags) & BR_FLOOD) { - err = mlxsw_sp_port_uc_flood_set(mlxsw_sp_port, - !mlxsw_sp_port->uc_flood); + err = mlxsw_sp_port_flood_table_set(mlxsw_sp_port, + MLXSW_SP_FLOOD_TABLE_UC, + !mlxsw_sp_port->uc_flood); if (err) return err; } @@ -318,8 +382,9 @@ static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port, err_port_learning_set: if ((uc_flood ^ brport_flags) & BR_FLOOD) - mlxsw_sp_port_uc_flood_set(mlxsw_sp_port, - mlxsw_sp_port->uc_flood); + mlxsw_sp_port_flood_table_set(mlxsw_sp_port, + MLXSW_SP_FLOOD_TABLE_UC, + mlxsw_sp_port->uc_flood); return err; } @@ -371,6 +436,22 @@ static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, return 0; } +static int mlxsw_sp_port_attr_mc_router_set(struct mlxsw_sp_port *mlxsw_sp_port, + struct switchdev_trans *trans, + bool is_port_mc_router) +{ + if (switchdev_trans_ph_prepare(trans)) + return 0; + + mlxsw_sp_port->mc_router = is_port_mc_router; + if (!mlxsw_sp_port->mc_disabled) + return mlxsw_sp_port_flood_table_set(mlxsw_sp_port, + MLXSW_SP_FLOOD_TABLE_MC, + is_port_mc_router); + + return 0; +} + static int mlxsw_sp_port_attr_set(struct net_device *dev, const struct switchdev_attr *attr, struct switchdev_trans *trans) @@ -400,6 +481,14 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev, attr->orig_dev, attr->u.vlan_filtering); break; + case SWITCHDEV_ATTR_ID_PORT_MROUTER: + err = mlxsw_sp_port_attr_mc_router_set(mlxsw_sp_port, trans, + attr->u.mrouter); + break; + case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED: + err = mlxsw_sp_port_mc_disabled_set(mlxsw_sp_port, trans, + attr->u.mc_disabled); + break; default: err = -EOPNOTSUPP; break; @@ -545,6 +634,7 @@ static int mlxsw_sp_port_fid_map(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid, static int mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid_begin, u16 fid_end) { + bool mc_flood; int fid, err; for (fid = fid_begin; fid <= fid_end; fid++) { @@ -553,8 +643,12 @@ static int mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port, goto err_port_fid_join; } + mc_flood = mlxsw_sp_port->mc_disabled ? + mlxsw_sp_port->mc_flood : mlxsw_sp_port->mc_router; + err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end, - mlxsw_sp_port->uc_flood, true); + mlxsw_sp_port->uc_flood, true, + mc_flood); if (err) goto err_port_flood_set; @@ -570,7 +664,7 @@ err_port_fid_map: for (fid--; fid >= fid_begin; fid--) mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, false); __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end, false, - false); + false, false); err_port_flood_set: fid = fid_end; err_port_fid_join: @@ -588,7 +682,7 @@ static void mlxsw_sp_port_fid_leave(struct mlxsw_sp_port *mlxsw_sp_port, mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, false); __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end, false, - false); + false, false); for (fid = fid_begin; fid <= fid_end; fid++) __mlxsw_sp_port_fid_leave(mlxsw_sp_port, fid); diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c index 2e88115e8735..ec1e886d4566 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c +++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c @@ -382,7 +382,7 @@ static int mlxsw_sx_port_change_mtu(struct net_device *dev, int mtu) return 0; } -static struct rtnl_link_stats64 * +static void mlxsw_sx_port_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { @@ -411,7 +411,6 @@ mlxsw_sx_port_get_stats64(struct net_device *dev, tx_dropped += p->tx_dropped; } stats->tx_dropped = tx_dropped; - return stats; } static int mlxsw_sx_port_get_phys_port_name(struct net_device *dev, char *name, @@ -734,7 +733,7 @@ static u32 mlxsw_sx_from_ptys_advert_link(u32 ptys_eth_proto) } static void mlxsw_sx_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto, - struct ethtool_cmd *cmd) + struct ethtool_link_ksettings *cmd) { u32 speed = SPEED_UNKNOWN; u8 duplex = DUPLEX_UNKNOWN; @@ -751,8 +750,8 @@ static void mlxsw_sx_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto, } } out: - ethtool_cmd_speed_set(cmd, speed); - cmd->duplex = duplex; + cmd->base.speed = speed; + cmd->base.duplex = duplex; } static u8 mlxsw_sx_port_connector_port(u32 ptys_eth_proto) @@ -777,8 +776,9 @@ static u8 mlxsw_sx_port_connector_port(u32 ptys_eth_proto) return PORT_OTHER; } -static int mlxsw_sx_port_get_settings(struct net_device *dev, - struct ethtool_cmd *cmd) +static int +mlxsw_sx_port_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev); struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; @@ -786,6 +786,7 @@ static int mlxsw_sx_port_get_settings(struct net_device *dev, u32 eth_proto_cap; u32 eth_proto_admin; u32 eth_proto_oper; + u32 supported, advertising, lp_advertising; int err; mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port, 0); @@ -797,18 +798,24 @@ static int mlxsw_sx_port_get_settings(struct net_device *dev, mlxsw_reg_ptys_eth_unpack(ptys_pl, ð_proto_cap, ð_proto_admin, ð_proto_oper); - cmd->supported = mlxsw_sx_from_ptys_supported_port(eth_proto_cap) | + supported = mlxsw_sx_from_ptys_supported_port(eth_proto_cap) | mlxsw_sx_from_ptys_supported_link(eth_proto_cap) | SUPPORTED_Pause | SUPPORTED_Asym_Pause; - cmd->advertising = mlxsw_sx_from_ptys_advert_link(eth_proto_admin); + advertising = mlxsw_sx_from_ptys_advert_link(eth_proto_admin); mlxsw_sx_from_ptys_speed_duplex(netif_carrier_ok(dev), eth_proto_oper, cmd); eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap; - cmd->port = mlxsw_sx_port_connector_port(eth_proto_oper); - cmd->lp_advertising = mlxsw_sx_from_ptys_advert_link(eth_proto_oper); + cmd->base.port = mlxsw_sx_port_connector_port(eth_proto_oper); + lp_advertising = mlxsw_sx_from_ptys_advert_link(eth_proto_oper); + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising, + lp_advertising); - cmd->transceiver = XCVR_INTERNAL; return 0; } @@ -848,8 +855,9 @@ static u32 mlxsw_sx_to_ptys_upper_speed(u32 upper_speed) return ptys_proto; } -static int mlxsw_sx_port_set_settings(struct net_device *dev, - struct ethtool_cmd *cmd) +static int +mlxsw_sx_port_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev); struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; @@ -858,13 +866,17 @@ static int mlxsw_sx_port_set_settings(struct net_device *dev, u32 eth_proto_new; u32 eth_proto_cap; u32 eth_proto_admin; + u32 advertising; bool is_up; int err; - speed = ethtool_cmd_speed(cmd); + speed = cmd->base.speed; + + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); - eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ? - mlxsw_sx_to_ptys_advert_link(cmd->advertising) : + eth_proto_new = cmd->base.autoneg == AUTONEG_ENABLE ? + mlxsw_sx_to_ptys_advert_link(advertising) : mlxsw_sx_to_ptys_speed(speed); mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port, 0); @@ -921,8 +933,8 @@ static const struct ethtool_ops mlxsw_sx_port_ethtool_ops = { .get_strings = mlxsw_sx_port_get_strings, .get_ethtool_stats = mlxsw_sx_port_get_stats, .get_sset_count = mlxsw_sx_port_get_sset_count, - .get_settings = mlxsw_sx_port_get_settings, - .set_settings = mlxsw_sx_port_set_settings, + .get_link_ksettings = mlxsw_sx_port_get_link_ksettings, + .set_link_ksettings = mlxsw_sx_port_set_link_ksettings, }; static int mlxsw_sx_port_attr_get(struct net_device *dev, diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h index 7ab275deacac..02ea48b15eb5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/trap.h +++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h @@ -54,6 +54,7 @@ enum { MLXSW_TRAP_ID_IGMP_V2_REPORT = 0x32, MLXSW_TRAP_ID_IGMP_V2_LEAVE = 0x33, MLXSW_TRAP_ID_IGMP_V3_REPORT = 0x34, + MLXSW_TRAP_ID_PKT_SAMPLE = 0x38, MLXSW_TRAP_ID_ARPBC = 0x50, MLXSW_TRAP_ID_ARPUC = 0x51, MLXSW_TRAP_ID_MTUERROR = 0x52, diff --git a/drivers/net/ethernet/micrel/ks8695net.c b/drivers/net/ethernet/micrel/ks8695net.c index 20cb85bc0c5f..bd51e057e915 100644 --- a/drivers/net/ethernet/micrel/ks8695net.c +++ b/drivers/net/ethernet/micrel/ks8695net.c @@ -519,7 +519,7 @@ static int ks8695_rx(struct ks8695_priv *ksp, int budget) /* Relinquish the SKB to the network layer */ skb_put(skb, pktlen); skb->protocol = eth_type_trans(skb, ndev); - netif_receive_skb(skb); + napi_gro_receive(&ksp->napi, skb); /* Record stats */ ndev->stats.rx_packets++; @@ -561,18 +561,17 @@ rx_finished: static int ks8695_poll(struct napi_struct *napi, int budget) { struct ks8695_priv *ksp = container_of(napi, struct ks8695_priv, napi); - unsigned long work_done; - unsigned long isr = readl(KS8695_IRQ_VA + KS8695_INTEN); unsigned long mask_bit = 1 << ks8695_get_rx_enable_bit(ksp); + int work_done; work_done = ks8695_rx(ksp, budget); - if (work_done < budget) { + if (work_done < budget && napi_complete_done(napi, work_done)) { unsigned long flags; + spin_lock_irqsave(&ksp->rx_lock, flags); - __napi_complete(napi); - /*enable rx interrupt*/ + /* enable rx interrupt */ writel(isr | mask_bit, KS8695_IRQ_VA + KS8695_INTEN); spin_unlock_irqrestore(&ksp->rx_lock, flags); } @@ -855,85 +854,94 @@ ks8695_set_msglevel(struct net_device *ndev, u32 value) } /** - * ks8695_wan_get_settings - Get device-specific settings. + * ks8695_wan_get_link_ksettings - Get device-specific settings. * @ndev: The network device to read settings from * @cmd: The ethtool structure to read into */ static int -ks8695_wan_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd) +ks8695_wan_get_link_ksettings(struct net_device *ndev, + struct ethtool_link_ksettings *cmd) { struct ks8695_priv *ksp = netdev_priv(ndev); u32 ctrl; + u32 supported, advertising; /* All ports on the KS8695 support these... */ - cmd->supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | + supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_TP | SUPPORTED_MII); - cmd->transceiver = XCVR_INTERNAL; - cmd->advertising = ADVERTISED_TP | ADVERTISED_MII; - cmd->port = PORT_MII; - cmd->supported |= (SUPPORTED_Autoneg | SUPPORTED_Pause); - cmd->phy_address = 0; + advertising = ADVERTISED_TP | ADVERTISED_MII; + cmd->base.port = PORT_MII; + supported |= (SUPPORTED_Autoneg | SUPPORTED_Pause); + cmd->base.phy_address = 0; ctrl = readl(ksp->phyiface_regs + KS8695_WMC); if ((ctrl & WMC_WAND) == 0) { /* auto-negotiation is enabled */ - cmd->advertising |= ADVERTISED_Autoneg; + advertising |= ADVERTISED_Autoneg; if (ctrl & WMC_WANA100F) - cmd->advertising |= ADVERTISED_100baseT_Full; + advertising |= ADVERTISED_100baseT_Full; if (ctrl & WMC_WANA100H) - cmd->advertising |= ADVERTISED_100baseT_Half; + advertising |= ADVERTISED_100baseT_Half; if (ctrl & WMC_WANA10F) - cmd->advertising |= ADVERTISED_10baseT_Full; + advertising |= ADVERTISED_10baseT_Full; if (ctrl & WMC_WANA10H) - cmd->advertising |= ADVERTISED_10baseT_Half; + advertising |= ADVERTISED_10baseT_Half; if (ctrl & WMC_WANAP) - cmd->advertising |= ADVERTISED_Pause; - cmd->autoneg = AUTONEG_ENABLE; + advertising |= ADVERTISED_Pause; + cmd->base.autoneg = AUTONEG_ENABLE; - ethtool_cmd_speed_set(cmd, - (ctrl & WMC_WSS) ? SPEED_100 : SPEED_10); - cmd->duplex = (ctrl & WMC_WDS) ? + cmd->base.speed = (ctrl & WMC_WSS) ? SPEED_100 : SPEED_10; + cmd->base.duplex = (ctrl & WMC_WDS) ? DUPLEX_FULL : DUPLEX_HALF; } else { /* auto-negotiation is disabled */ - cmd->autoneg = AUTONEG_DISABLE; + cmd->base.autoneg = AUTONEG_DISABLE; - ethtool_cmd_speed_set(cmd, ((ctrl & WMC_WANF100) ? - SPEED_100 : SPEED_10)); - cmd->duplex = (ctrl & WMC_WANFF) ? + cmd->base.speed = (ctrl & WMC_WANF100) ? + SPEED_100 : SPEED_10; + cmd->base.duplex = (ctrl & WMC_WANFF) ? DUPLEX_FULL : DUPLEX_HALF; } + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); + return 0; } /** - * ks8695_wan_set_settings - Set device-specific settings. + * ks8695_wan_set_link_ksettings - Set device-specific settings. * @ndev: The network device to configure * @cmd: The settings to configure */ static int -ks8695_wan_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd) +ks8695_wan_set_link_ksettings(struct net_device *ndev, + const struct ethtool_link_ksettings *cmd) { struct ks8695_priv *ksp = netdev_priv(ndev); u32 ctrl; + u32 advertising; - if ((cmd->speed != SPEED_10) && (cmd->speed != SPEED_100)) - return -EINVAL; - if ((cmd->duplex != DUPLEX_HALF) && (cmd->duplex != DUPLEX_FULL)) + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); + + if ((cmd->base.speed != SPEED_10) && (cmd->base.speed != SPEED_100)) return -EINVAL; - if (cmd->port != PORT_MII) + if ((cmd->base.duplex != DUPLEX_HALF) && + (cmd->base.duplex != DUPLEX_FULL)) return -EINVAL; - if (cmd->transceiver != XCVR_INTERNAL) + if (cmd->base.port != PORT_MII) return -EINVAL; - if ((cmd->autoneg != AUTONEG_DISABLE) && - (cmd->autoneg != AUTONEG_ENABLE)) + if ((cmd->base.autoneg != AUTONEG_DISABLE) && + (cmd->base.autoneg != AUTONEG_ENABLE)) return -EINVAL; - if (cmd->autoneg == AUTONEG_ENABLE) { - if ((cmd->advertising & (ADVERTISED_10baseT_Half | + if (cmd->base.autoneg == AUTONEG_ENABLE) { + if ((advertising & (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full)) == 0) @@ -943,13 +951,13 @@ ks8695_wan_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd) ctrl &= ~(WMC_WAND | WMC_WANA100F | WMC_WANA100H | WMC_WANA10F | WMC_WANA10H); - if (cmd->advertising & ADVERTISED_100baseT_Full) + if (advertising & ADVERTISED_100baseT_Full) ctrl |= WMC_WANA100F; - if (cmd->advertising & ADVERTISED_100baseT_Half) + if (advertising & ADVERTISED_100baseT_Half) ctrl |= WMC_WANA100H; - if (cmd->advertising & ADVERTISED_10baseT_Full) + if (advertising & ADVERTISED_10baseT_Full) ctrl |= WMC_WANA10F; - if (cmd->advertising & ADVERTISED_10baseT_Half) + if (advertising & ADVERTISED_10baseT_Half) ctrl |= WMC_WANA10H; /* force a re-negotiation */ @@ -962,9 +970,9 @@ ks8695_wan_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd) ctrl |= WMC_WAND; ctrl &= ~(WMC_WANF100 | WMC_WANFF); - if (cmd->speed == SPEED_100) + if (cmd->base.speed == SPEED_100) ctrl |= WMC_WANF100; - if (cmd->duplex == DUPLEX_FULL) + if (cmd->base.duplex == DUPLEX_FULL) ctrl |= WMC_WANFF; writel(ctrl, ksp->phyiface_regs + KS8695_WMC); @@ -1043,12 +1051,12 @@ static const struct ethtool_ops ks8695_ethtool_ops = { static const struct ethtool_ops ks8695_wan_ethtool_ops = { .get_msglevel = ks8695_get_msglevel, .set_msglevel = ks8695_set_msglevel, - .get_settings = ks8695_wan_get_settings, - .set_settings = ks8695_wan_set_settings, .nway_reset = ks8695_wan_nwayreset, .get_link = ethtool_op_get_link, .get_pauseparam = ks8695_wan_get_pause, .get_drvinfo = ks8695_get_drvinfo, + .get_link_ksettings = ks8695_wan_get_link_ksettings, + .set_link_ksettings = ks8695_wan_set_link_ksettings, }; /* Network device interface functions */ diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c index e7e1aff40bd9..279ee4612981 100644 --- a/drivers/net/ethernet/micrel/ks8851.c +++ b/drivers/net/ethernet/micrel/ks8851.c @@ -84,7 +84,6 @@ union ks8851_tx_hdr { * @rc_ier: Cached copy of KS_IER. * @rc_ccr: Cached copy of KS_CCR. * @rc_rxqcr: Cached copy of KS_RXQCR. - * @eeprom_size: Companion eeprom size in Bytes, 0 if no eeprom * @eeprom: 93CX6 EEPROM state for accessing on-board EEPROM. * @vdd_reg: Optional regulator supplying the chip * @vdd_io: Optional digital power supply for IO @@ -120,7 +119,6 @@ struct ks8851_net { u16 rc_ier; u16 rc_rxqcr; u16 rc_ccr; - u16 eeprom_size; struct mii_if_info mii; struct ks8851_rxctrl rxctrl; @@ -1088,16 +1086,18 @@ static void ks8851_set_msglevel(struct net_device *dev, u32 to) ks->msg_enable = to; } -static int ks8851_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int ks8851_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct ks8851_net *ks = netdev_priv(dev); - return mii_ethtool_gset(&ks->mii, cmd); + return mii_ethtool_get_link_ksettings(&ks->mii, cmd); } -static int ks8851_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int ks8851_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct ks8851_net *ks = netdev_priv(dev); - return mii_ethtool_sset(&ks->mii, cmd); + return mii_ethtool_set_link_ksettings(&ks->mii, cmd); } static u32 ks8851_get_link(struct net_device *dev) @@ -1253,13 +1253,13 @@ static const struct ethtool_ops ks8851_ethtool_ops = { .get_drvinfo = ks8851_get_drvinfo, .get_msglevel = ks8851_get_msglevel, .set_msglevel = ks8851_set_msglevel, - .get_settings = ks8851_get_settings, - .set_settings = ks8851_set_settings, .get_link = ks8851_get_link, .nway_reset = ks8851_nway_reset, .get_eeprom_len = ks8851_get_eeprom_len, .get_eeprom = ks8851_get_eeprom, .set_eeprom = ks8851_set_eeprom, + .get_link_ksettings = ks8851_get_link_ksettings, + .set_link_ksettings = ks8851_set_link_ksettings, }; /* MII interface controls */ @@ -1533,11 +1533,6 @@ static int ks8851_probe(struct spi_device *spi) /* cache the contents of the CCR register for EEPROM, etc. */ ks->rc_ccr = ks8851_rdreg16(ks, KS_CCR); - if (ks->rc_ccr & CCR_EEPROM) - ks->eeprom_size = 128; - else - ks->eeprom_size = 0; - ks8851_read_selftest(ks); ks8851_init_mac(ks); diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c index db628078a4e6..7647f7bdbcb8 100644 --- a/drivers/net/ethernet/micrel/ks8851_mll.c +++ b/drivers/net/ethernet/micrel/ks8851_mll.c @@ -1311,16 +1311,18 @@ static void ks_set_msglevel(struct net_device *netdev, u32 to) ks->msg_enable = to; } -static int ks_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd) +static int ks_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) { struct ks_net *ks = netdev_priv(netdev); - return mii_ethtool_gset(&ks->mii, cmd); + return mii_ethtool_get_link_ksettings(&ks->mii, cmd); } -static int ks_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd) +static int ks_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) { struct ks_net *ks = netdev_priv(netdev); - return mii_ethtool_sset(&ks->mii, cmd); + return mii_ethtool_set_link_ksettings(&ks->mii, cmd); } static u32 ks_get_link(struct net_device *netdev) @@ -1339,10 +1341,10 @@ static const struct ethtool_ops ks_ethtool_ops = { .get_drvinfo = ks_get_drvinfo, .get_msglevel = ks_get_msglevel, .set_msglevel = ks_set_msglevel, - .get_settings = ks_get_settings, - .set_settings = ks_set_settings, .get_link = ks_get_link, .nway_reset = ks_nway_reset, + .get_link_ksettings = ks_get_link_ksettings, + .set_link_ksettings = ks_set_link_ksettings, }; /* MII interface controls */ diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c index 97f6ef1fa7d0..ee1c78abab0b 100644 --- a/drivers/net/ethernet/micrel/ksz884x.c +++ b/drivers/net/ethernet/micrel/ksz884x.c @@ -1251,10 +1251,10 @@ struct ksz_port_info { * @tx_size: Transmit data size. Used for TX optimization. * The maximum is defined by MAX_TX_HELD_SIZE. * @perm_addr: Permanent MAC address. - * @override_addr: Overrided MAC address. + * @override_addr: Overridden MAC address. * @address: Additional MAC address entries. * @addr_list_size: Additional MAC address list size. - * @mac_override: Indication of MAC address overrided. + * @mac_override: Indication of MAC address overridden. * @promiscuous: Counter to keep track of promiscuous mode set. * @all_multi: Counter to keep track of all multicast mode set. * @multi_list: Multicast address entries. @@ -4042,7 +4042,7 @@ static int empty_addr(u8 *addr) * @hw: The hardware instance. * * This routine programs the MAC address of the hardware when the address is - * overrided. + * overridden. */ static void hw_set_addr(struct ksz_hw *hw) { @@ -5944,7 +5944,7 @@ static u16 eeprom_data[EEPROM_SIZE] = { 0 }; /* These functions use the MII functions in mii.c. */ /** - * netdev_get_settings - get network device settings + * netdev_get_link_ksettings - get network device settings * @dev: Network device. * @cmd: Ethtool command. * @@ -5952,23 +5952,26 @@ static u16 eeprom_data[EEPROM_SIZE] = { 0 }; * * Return 0 if successful; otherwise an error code. */ -static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int netdev_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct dev_priv *priv = netdev_priv(dev); struct dev_info *hw_priv = priv->adapter; mutex_lock(&hw_priv->lock); - mii_ethtool_gset(&priv->mii_if, cmd); - cmd->advertising |= SUPPORTED_TP; + mii_ethtool_get_link_ksettings(&priv->mii_if, cmd); + ethtool_link_ksettings_add_link_mode(cmd, advertising, TP); mutex_unlock(&hw_priv->lock); /* Save advertised settings for workaround in next function. */ - priv->advertising = cmd->advertising; + ethtool_convert_link_mode_to_legacy_u32(&priv->advertising, + cmd->link_modes.advertising); + return 0; } /** - * netdev_set_settings - set network device settings + * netdev_set_link_ksettings - set network device settings * @dev: Network device. * @cmd: Ethtool command. * @@ -5976,54 +5979,65 @@ static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) * * Return 0 if successful; otherwise an error code. */ -static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int netdev_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct dev_priv *priv = netdev_priv(dev); struct dev_info *hw_priv = priv->adapter; struct ksz_port *port = &priv->port; - u32 speed = ethtool_cmd_speed(cmd); + struct ethtool_link_ksettings copy_cmd; + u32 speed = cmd->base.speed; + u32 advertising; int rc; + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); + /* * ethtool utility does not change advertised setting if auto * negotiation is not specified explicitly. */ - if (cmd->autoneg && priv->advertising == cmd->advertising) { - cmd->advertising |= ADVERTISED_ALL; + if (cmd->base.autoneg && priv->advertising == advertising) { + advertising |= ADVERTISED_ALL; if (10 == speed) - cmd->advertising &= + advertising &= ~(ADVERTISED_100baseT_Full | ADVERTISED_100baseT_Half); else if (100 == speed) - cmd->advertising &= + advertising &= ~(ADVERTISED_10baseT_Full | ADVERTISED_10baseT_Half); - if (0 == cmd->duplex) - cmd->advertising &= + if (0 == cmd->base.duplex) + advertising &= ~(ADVERTISED_100baseT_Full | ADVERTISED_10baseT_Full); - else if (1 == cmd->duplex) - cmd->advertising &= + else if (1 == cmd->base.duplex) + advertising &= ~(ADVERTISED_100baseT_Half | ADVERTISED_10baseT_Half); } mutex_lock(&hw_priv->lock); - if (cmd->autoneg && - (cmd->advertising & ADVERTISED_ALL) == - ADVERTISED_ALL) { + if (cmd->base.autoneg && + (advertising & ADVERTISED_ALL) == ADVERTISED_ALL) { port->duplex = 0; port->speed = 0; port->force_link = 0; } else { - port->duplex = cmd->duplex + 1; + port->duplex = cmd->base.duplex + 1; if (1000 != speed) port->speed = speed; - if (cmd->autoneg) + if (cmd->base.autoneg) port->force_link = 0; else port->force_link = 1; } - rc = mii_ethtool_sset(&priv->mii_if, cmd); + + memcpy(©_cmd, cmd, sizeof(copy_cmd)); + ethtool_convert_legacy_u32_to_link_mode(copy_cmd.link_modes.advertising, + advertising); + rc = mii_ethtool_set_link_ksettings( + &priv->mii_if, + (const struct ethtool_link_ksettings *)©_cmd); mutex_unlock(&hw_priv->lock); return rc; } @@ -6597,8 +6611,6 @@ static int netdev_set_features(struct net_device *dev, } static const struct ethtool_ops netdev_ethtool_ops = { - .get_settings = netdev_get_settings, - .set_settings = netdev_set_settings, .nway_reset = netdev_nway_reset, .get_link = netdev_get_link, .get_drvinfo = netdev_get_drvinfo, @@ -6617,6 +6629,8 @@ static const struct ethtool_ops netdev_ethtool_ops = { .get_strings = netdev_get_strings, .get_sset_count = netdev_get_sset_count, .get_ethtool_stats = netdev_get_ethtool_stats, + .get_link_ksettings = netdev_get_link_ksettings, + .set_link_ksettings = netdev_set_link_ksettings, }; /* @@ -7029,7 +7043,7 @@ static int pcidev_init(struct pci_dev *pdev, const struct pci_device_id *id) if (macaddr[0] != ':') get_mac_addr(hw_priv, macaddr, MAIN_PORT); - /* Read MAC address and initialize override address if not overrided. */ + /* Read MAC address and initialize override address if not overridden. */ hw_read_addr(hw); /* Multiple device interfaces mode requires a second MAC address. */ diff --git a/drivers/net/ethernet/microchip/enc28j60.c b/drivers/net/ethernet/microchip/enc28j60.c index 045b9106c0ff..f6ecfa778660 100644 --- a/drivers/net/ethernet/microchip/enc28j60.c +++ b/drivers/net/ethernet/microchip/enc28j60.c @@ -1487,27 +1487,30 @@ enc28j60_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) } static int -enc28j60_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +enc28j60_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct enc28j60_net *priv = netdev_priv(dev); - cmd->transceiver = XCVR_INTERNAL; - cmd->supported = SUPPORTED_10baseT_Half - | SUPPORTED_10baseT_Full - | SUPPORTED_TP; - ethtool_cmd_speed_set(cmd, SPEED_10); - cmd->duplex = priv->full_duplex ? DUPLEX_FULL : DUPLEX_HALF; - cmd->port = PORT_TP; - cmd->autoneg = AUTONEG_DISABLE; + ethtool_link_ksettings_zero_link_mode(cmd, supported); + ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Half); + ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, TP); + + cmd->base.speed = SPEED_10; + cmd->base.duplex = priv->full_duplex ? DUPLEX_FULL : DUPLEX_HALF; + cmd->base.port = PORT_TP; + cmd->base.autoneg = AUTONEG_DISABLE; return 0; } static int -enc28j60_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +enc28j60_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { - return enc28j60_setlink(dev, cmd->autoneg, - ethtool_cmd_speed(cmd), cmd->duplex); + return enc28j60_setlink(dev, cmd->base.autoneg, + cmd->base.speed, cmd->base.duplex); } static u32 enc28j60_get_msglevel(struct net_device *dev) @@ -1523,11 +1526,11 @@ static void enc28j60_set_msglevel(struct net_device *dev, u32 val) } static const struct ethtool_ops enc28j60_ethtool_ops = { - .get_settings = enc28j60_get_settings, - .set_settings = enc28j60_set_settings, .get_drvinfo = enc28j60_get_drvinfo, .get_msglevel = enc28j60_get_msglevel, .set_msglevel = enc28j60_set_msglevel, + .get_link_ksettings = enc28j60_get_link_ksettings, + .set_link_ksettings = enc28j60_set_link_ksettings, }; static int enc28j60_chipset_init(struct net_device *dev) diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c index fbce6166504e..f831238d9793 100644 --- a/drivers/net/ethernet/microchip/encx24j600.c +++ b/drivers/net/ethernet/microchip/encx24j600.c @@ -940,29 +940,33 @@ static void encx24j600_get_drvinfo(struct net_device *dev, sizeof(info->bus_info)); } -static int encx24j600_get_settings(struct net_device *dev, - struct ethtool_cmd *cmd) +static int encx24j600_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct encx24j600_priv *priv = netdev_priv(dev); + u32 supported; - cmd->transceiver = XCVR_INTERNAL; - cmd->supported = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | + supported = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_Autoneg | SUPPORTED_TP; - ethtool_cmd_speed_set(cmd, priv->speed); - cmd->duplex = priv->full_duplex ? DUPLEX_FULL : DUPLEX_HALF; - cmd->port = PORT_TP; - cmd->autoneg = priv->autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + + cmd->base.speed = priv->speed; + cmd->base.duplex = priv->full_duplex ? DUPLEX_FULL : DUPLEX_HALF; + cmd->base.port = PORT_TP; + cmd->base.autoneg = priv->autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; return 0; } -static int encx24j600_set_settings(struct net_device *dev, - struct ethtool_cmd *cmd) +static int +encx24j600_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { - return encx24j600_setlink(dev, cmd->autoneg, - ethtool_cmd_speed(cmd), cmd->duplex); + return encx24j600_setlink(dev, cmd->base.autoneg, + cmd->base.speed, cmd->base.duplex); } static u32 encx24j600_get_msglevel(struct net_device *dev) @@ -980,13 +984,13 @@ static void encx24j600_set_msglevel(struct net_device *dev, u32 val) } static const struct ethtool_ops encx24j600_ethtool_ops = { - .get_settings = encx24j600_get_settings, - .set_settings = encx24j600_set_settings, .get_drvinfo = encx24j600_get_drvinfo, .get_msglevel = encx24j600_get_msglevel, .set_msglevel = encx24j600_set_msglevel, .get_regs_len = encx24j600_get_regs_len, .get_regs = encx24j600_get_regs, + .get_link_ksettings = encx24j600_get_link_ksettings, + .set_link_ksettings = encx24j600_set_link_ksettings, }; static const struct net_device_ops encx24j600_netdev_ops = { diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c index 9774b50cff6e..06c9f4100cb9 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.c +++ b/drivers/net/ethernet/moxa/moxart_ether.c @@ -269,7 +269,7 @@ rx_next: } if (rx < budget) { - napi_complete(napi); + napi_complete_done(napi, rx); } priv->reg_imr |= RPKT_FINISH_M; @@ -436,7 +436,7 @@ static void moxart_mac_set_rx_mode(struct net_device *ndev) spin_unlock_irq(&priv->txlock); } -static struct net_device_ops moxart_netdev_ops = { +static const struct net_device_ops moxart_netdev_ops = { .ndo_open = moxart_mac_open, .ndo_stop = moxart_mac_stop, .ndo_start_xmit = moxart_mac_start_xmit, diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index e506ca876d0d..b171ed2015fe 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -191,21 +191,6 @@ struct myri10ge_slice_state { int cpu; __be32 __iomem *dca_tag; #endif -#ifdef CONFIG_NET_RX_BUSY_POLL - unsigned int state; -#define SLICE_STATE_IDLE 0 -#define SLICE_STATE_NAPI 1 /* NAPI owns this slice */ -#define SLICE_STATE_POLL 2 /* poll owns this slice */ -#define SLICE_LOCKED (SLICE_STATE_NAPI | SLICE_STATE_POLL) -#define SLICE_STATE_NAPI_YIELD 4 /* NAPI yielded this slice */ -#define SLICE_STATE_POLL_YIELD 8 /* poll yielded this slice */ -#define SLICE_USER_PEND (SLICE_STATE_POLL | SLICE_STATE_POLL_YIELD) - spinlock_t lock; - unsigned long lock_napi_yield; - unsigned long lock_poll_yield; - unsigned long busy_poll_miss; - unsigned long busy_poll_cnt; -#endif /* CONFIG_NET_RX_BUSY_POLL */ char irq_desc[32]; }; @@ -378,8 +363,8 @@ static inline void put_be32(__be32 val, __be32 __iomem * p) __raw_writel((__force __u32) val, (__force void __iomem *)p); } -static struct rtnl_link_stats64 *myri10ge_get_stats(struct net_device *dev, - struct rtnl_link_stats64 *stats); +static void myri10ge_get_stats(struct net_device *dev, + struct rtnl_link_stats64 *stats); static void set_fw_name(struct myri10ge_priv *mgp, char *name, bool allocated) { @@ -925,92 +910,6 @@ abort: return status; } -#ifdef CONFIG_NET_RX_BUSY_POLL -static inline void myri10ge_ss_init_lock(struct myri10ge_slice_state *ss) -{ - spin_lock_init(&ss->lock); - ss->state = SLICE_STATE_IDLE; -} - -static inline bool myri10ge_ss_lock_napi(struct myri10ge_slice_state *ss) -{ - bool rc = true; - spin_lock(&ss->lock); - if ((ss->state & SLICE_LOCKED)) { - WARN_ON((ss->state & SLICE_STATE_NAPI)); - ss->state |= SLICE_STATE_NAPI_YIELD; - rc = false; - ss->lock_napi_yield++; - } else - ss->state = SLICE_STATE_NAPI; - spin_unlock(&ss->lock); - return rc; -} - -static inline void myri10ge_ss_unlock_napi(struct myri10ge_slice_state *ss) -{ - spin_lock(&ss->lock); - WARN_ON((ss->state & (SLICE_STATE_POLL | SLICE_STATE_NAPI_YIELD))); - ss->state = SLICE_STATE_IDLE; - spin_unlock(&ss->lock); -} - -static inline bool myri10ge_ss_lock_poll(struct myri10ge_slice_state *ss) -{ - bool rc = true; - spin_lock_bh(&ss->lock); - if ((ss->state & SLICE_LOCKED)) { - ss->state |= SLICE_STATE_POLL_YIELD; - rc = false; - ss->lock_poll_yield++; - } else - ss->state |= SLICE_STATE_POLL; - spin_unlock_bh(&ss->lock); - return rc; -} - -static inline void myri10ge_ss_unlock_poll(struct myri10ge_slice_state *ss) -{ - spin_lock_bh(&ss->lock); - WARN_ON((ss->state & SLICE_STATE_NAPI)); - ss->state = SLICE_STATE_IDLE; - spin_unlock_bh(&ss->lock); -} - -static inline bool myri10ge_ss_busy_polling(struct myri10ge_slice_state *ss) -{ - WARN_ON(!(ss->state & SLICE_LOCKED)); - return (ss->state & SLICE_USER_PEND); -} -#else /* CONFIG_NET_RX_BUSY_POLL */ -static inline void myri10ge_ss_init_lock(struct myri10ge_slice_state *ss) -{ -} - -static inline bool myri10ge_ss_lock_napi(struct myri10ge_slice_state *ss) -{ - return false; -} - -static inline void myri10ge_ss_unlock_napi(struct myri10ge_slice_state *ss) -{ -} - -static inline bool myri10ge_ss_lock_poll(struct myri10ge_slice_state *ss) -{ - return false; -} - -static inline void myri10ge_ss_unlock_poll(struct myri10ge_slice_state *ss) -{ -} - -static inline bool myri10ge_ss_busy_polling(struct myri10ge_slice_state *ss) -{ - return false; -} -#endif - static int myri10ge_reset(struct myri10ge_priv *mgp) { struct myri10ge_cmd cmd; @@ -1426,7 +1325,6 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum) struct pci_dev *pdev = mgp->pdev; struct net_device *dev = mgp->dev; u8 *va; - bool polling; if (len <= mgp->small_bytes) { rx = &ss->rx_small; @@ -1441,15 +1339,7 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum) va = page_address(rx->info[idx].page) + rx->info[idx].page_offset; prefetch(va); - /* When busy polling in user context, allocate skb and copy headers to - * skb's linear memory ourselves. When not busy polling, use the napi - * gro api. - */ - polling = myri10ge_ss_busy_polling(ss); - if (polling) - skb = netdev_alloc_skb(dev, MYRI10GE_HLEN + 16); - else - skb = napi_get_frags(&ss->napi); + skb = napi_get_frags(&ss->napi); if (unlikely(skb == NULL)) { ss->stats.rx_dropped++; for (i = 0, remainder = len; remainder > 0; i++) { @@ -1489,27 +1379,7 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum) myri10ge_vlan_rx(mgp->dev, va, skb); skb_record_rx_queue(skb, ss - &mgp->ss[0]); - if (polling) { - int hlen; - - /* myri10ge_vlan_rx might have moved the header, so compute - * length and address again. - */ - hlen = MYRI10GE_HLEN > skb->len ? skb->len : MYRI10GE_HLEN; - va = page_address(skb_frag_page(&rx_frags[0])) + - rx_frags[0].page_offset; - /* Copy header into the skb linear memory */ - skb_copy_to_linear_data(skb, va, hlen); - rx_frags[0].page_offset += hlen; - rx_frags[0].size -= hlen; - skb->data_len -= hlen; - skb->tail += hlen; - skb->protocol = eth_type_trans(skb, dev); - skb_mark_napi_id(skb, &ss->napi); - netif_receive_skb(skb); - } - else - napi_gro_frags(&ss->napi); + napi_gro_frags(&ss->napi); return 1; } @@ -1669,49 +1539,16 @@ static int myri10ge_poll(struct napi_struct *napi, int budget) if (ss->mgp->dca_enabled) myri10ge_update_dca(ss); #endif - /* Try later if the busy_poll handler is running. */ - if (!myri10ge_ss_lock_napi(ss)) - return budget; - /* process as many rx events as NAPI will allow */ work_done = myri10ge_clean_rx_done(ss, budget); - myri10ge_ss_unlock_napi(ss); if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); put_be32(htonl(3), ss->irq_claim); } return work_done; } -#ifdef CONFIG_NET_RX_BUSY_POLL -static int myri10ge_busy_poll(struct napi_struct *napi) -{ - struct myri10ge_slice_state *ss = - container_of(napi, struct myri10ge_slice_state, napi); - struct myri10ge_priv *mgp = ss->mgp; - int work_done; - - /* Poll only when the link is up */ - if (mgp->link_state != MXGEFW_LINK_UP) - return LL_FLUSH_FAILED; - - if (!myri10ge_ss_lock_poll(ss)) - return LL_FLUSH_BUSY; - - /* Process a small number of packets */ - work_done = myri10ge_clean_rx_done(ss, 4); - if (work_done) - ss->busy_poll_cnt += work_done; - else - ss->busy_poll_miss++; - - myri10ge_ss_unlock_poll(ss); - - return work_done; -} -#endif /* CONFIG_NET_RX_BUSY_POLL */ - static irqreturn_t myri10ge_intr(int irq, void *arg) { struct myri10ge_slice_state *ss = arg; @@ -1773,15 +1610,16 @@ static irqreturn_t myri10ge_intr(int irq, void *arg) } static int -myri10ge_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd) +myri10ge_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) { struct myri10ge_priv *mgp = netdev_priv(netdev); char *ptr; int i; - cmd->autoneg = AUTONEG_DISABLE; - ethtool_cmd_speed_set(cmd, SPEED_10000); - cmd->duplex = DUPLEX_FULL; + cmd->base.autoneg = AUTONEG_DISABLE; + cmd->base.speed = SPEED_10000; + cmd->base.duplex = DUPLEX_FULL; /* * parse the product code to deterimine the interface type @@ -1806,16 +1644,12 @@ myri10ge_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd) ptr++; if (*ptr == 'R' || *ptr == 'Q' || *ptr == 'S') { /* We've found either an XFP, quad ribbon fiber, or SFP+ */ - cmd->port = PORT_FIBRE; - cmd->supported |= SUPPORTED_FIBRE; - cmd->advertising |= ADVERTISED_FIBRE; + cmd->base.port = PORT_FIBRE; + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); } else { - cmd->port = PORT_OTHER; + cmd->base.port = PORT_OTHER; } - if (*ptr == 'R' || *ptr == 'S') - cmd->transceiver = XCVR_EXTERNAL; - else - cmd->transceiver = XCVR_INTERNAL; return 0; } @@ -1919,10 +1753,6 @@ static const char myri10ge_gstrings_slice_stats[][ETH_GSTRING_LEN] = { "tx_pkt_start", "tx_pkt_done", "tx_req", "tx_done", "rx_small_cnt", "rx_big_cnt", "wake_queue", "stop_queue", "tx_linearized", -#ifdef CONFIG_NET_RX_BUSY_POLL - "rx_lock_napi_yield", "rx_lock_poll_yield", "rx_busy_poll_miss", - "rx_busy_poll_cnt", -#endif }; #define MYRI10GE_NET_STATS_LEN 21 @@ -2022,12 +1852,6 @@ myri10ge_get_ethtool_stats(struct net_device *netdev, data[i++] = (unsigned int)ss->tx.wake_queue; data[i++] = (unsigned int)ss->tx.stop_queue; data[i++] = (unsigned int)ss->tx.linearized; -#ifdef CONFIG_NET_RX_BUSY_POLL - data[i++] = ss->lock_napi_yield; - data[i++] = ss->lock_poll_yield; - data[i++] = ss->busy_poll_miss; - data[i++] = ss->busy_poll_cnt; -#endif } } @@ -2098,7 +1922,6 @@ myri10ge_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) } static const struct ethtool_ops myri10ge_ethtool_ops = { - .get_settings = myri10ge_get_settings, .get_drvinfo = myri10ge_get_drvinfo, .get_coalesce = myri10ge_get_coalesce, .set_coalesce = myri10ge_set_coalesce, @@ -2112,6 +1935,7 @@ static const struct ethtool_ops myri10ge_ethtool_ops = { .set_msglevel = myri10ge_set_msglevel, .get_msglevel = myri10ge_get_msglevel, .set_phys_id = myri10ge_phys_id, + .get_link_ksettings = myri10ge_get_link_ksettings, }; static int myri10ge_allocate_rings(struct myri10ge_slice_state *ss) @@ -2589,9 +2413,6 @@ static int myri10ge_open(struct net_device *dev) goto abort_with_rings; } - /* Initialize the slice spinlock and state used for polling */ - myri10ge_ss_init_lock(ss); - /* must happen prior to any irq */ napi_enable(&(ss)->napi); } @@ -2668,19 +2489,9 @@ static int myri10ge_close(struct net_device *dev) del_timer_sync(&mgp->watchdog_timer); mgp->running = MYRI10GE_ETH_STOPPING; - for (i = 0; i < mgp->num_slices; i++) { + for (i = 0; i < mgp->num_slices; i++) napi_disable(&mgp->ss[i].napi); - local_bh_disable(); /* myri10ge_ss_lock_napi needs this */ - /* Lock the slice to prevent the busy_poll handler from - * accessing it. Later when we bring the NIC up, myri10ge_open - * resets the slice including this lock. - */ - while (!myri10ge_ss_lock_napi(&mgp->ss[i])) { - pr_info("Slice %d locked\n", i); - mdelay(1); - } - local_bh_enable(); - } + netif_carrier_off(dev); netif_tx_stop_all_queues(dev); @@ -3119,8 +2930,8 @@ drop: return NETDEV_TX_OK; } -static struct rtnl_link_stats64 *myri10ge_get_stats(struct net_device *dev, - struct rtnl_link_stats64 *stats) +static void myri10ge_get_stats(struct net_device *dev, + struct rtnl_link_stats64 *stats) { const struct myri10ge_priv *mgp = netdev_priv(dev); const struct myri10ge_slice_netstats *slice_stats; @@ -3135,7 +2946,6 @@ static struct rtnl_link_stats64 *myri10ge_get_stats(struct net_device *dev, stats->rx_dropped += slice_stats->rx_dropped; stats->tx_dropped += slice_stats->tx_dropped; } - return stats; } static void myri10ge_set_multicast_list(struct net_device *dev) @@ -3954,9 +3764,6 @@ static const struct net_device_ops myri10ge_netdev_ops = { .ndo_change_mtu = myri10ge_change_mtu, .ndo_set_rx_mode = myri10ge_set_multicast_list, .ndo_set_mac_address = myri10ge_set_mac_address, -#ifdef CONFIG_NET_RX_BUSY_POLL - .ndo_busy_poll = myri10ge_busy_poll, -#endif }; static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c index 90eac63f9606..18af2a23a933 100644 --- a/drivers/net/ethernet/natsemi/natsemi.c +++ b/drivers/net/ethernet/natsemi/natsemi.c @@ -640,8 +640,10 @@ static int netdev_set_wol(struct net_device *dev, u32 newval); static int netdev_get_wol(struct net_device *dev, u32 *supported, u32 *cur); static int netdev_set_sopass(struct net_device *dev, u8 *newval); static int netdev_get_sopass(struct net_device *dev, u8 *data); -static int netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd); -static int netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd); +static int netdev_get_ecmd(struct net_device *dev, + struct ethtool_link_ksettings *ecmd); +static int netdev_set_ecmd(struct net_device *dev, + const struct ethtool_link_ksettings *ecmd); static void enable_wol_mode(struct net_device *dev, int enable_intr); static int netdev_close(struct net_device *dev); static int netdev_get_regs(struct net_device *dev, u8 *buf); @@ -2265,7 +2267,7 @@ static int natsemi_poll(struct napi_struct *napi, int budget) np->intr_status = readl(ioaddr + IntrStatus); } while (np->intr_status); - napi_complete(napi); + napi_complete_done(napi, work_done); /* Reenable interrupts providing nothing is trying to shut * the chip down. */ @@ -2584,7 +2586,8 @@ static int get_eeprom_len(struct net_device *dev) return np->eeprom_size; } -static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +static int get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *ecmd) { struct netdev_private *np = netdev_priv(dev); spin_lock_irq(&np->lock); @@ -2593,7 +2596,8 @@ static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) return 0; } -static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +static int set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *ecmd) { struct netdev_private *np = netdev_priv(dev); int res; @@ -2689,8 +2693,6 @@ static const struct ethtool_ops ethtool_ops = { .get_drvinfo = get_drvinfo, .get_regs_len = get_regs_len, .get_eeprom_len = get_eeprom_len, - .get_settings = get_settings, - .set_settings = set_settings, .get_wol = get_wol, .set_wol = set_wol, .get_regs = get_regs, @@ -2699,6 +2701,8 @@ static const struct ethtool_ops ethtool_ops = { .nway_reset = nway_reset, .get_link = get_link, .get_eeprom = get_eeprom, + .get_link_ksettings = get_link_ksettings, + .set_link_ksettings = set_link_ksettings, }; static int netdev_set_wol(struct net_device *dev, u32 newval) @@ -2828,29 +2832,32 @@ static int netdev_get_sopass(struct net_device *dev, u8 *data) return 0; } -static int netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd) +static int netdev_get_ecmd(struct net_device *dev, + struct ethtool_link_ksettings *ecmd) { struct netdev_private *np = netdev_priv(dev); + u32 supported, advertising; u32 tmp; - ecmd->port = dev->if_port; - ethtool_cmd_speed_set(ecmd, np->speed); - ecmd->duplex = np->duplex; - ecmd->autoneg = np->autoneg; - ecmd->advertising = 0; + ecmd->base.port = dev->if_port; + ecmd->base.speed = np->speed; + ecmd->base.duplex = np->duplex; + ecmd->base.autoneg = np->autoneg; + advertising = 0; + if (np->advertising & ADVERTISE_10HALF) - ecmd->advertising |= ADVERTISED_10baseT_Half; + advertising |= ADVERTISED_10baseT_Half; if (np->advertising & ADVERTISE_10FULL) - ecmd->advertising |= ADVERTISED_10baseT_Full; + advertising |= ADVERTISED_10baseT_Full; if (np->advertising & ADVERTISE_100HALF) - ecmd->advertising |= ADVERTISED_100baseT_Half; + advertising |= ADVERTISED_100baseT_Half; if (np->advertising & ADVERTISE_100FULL) - ecmd->advertising |= ADVERTISED_100baseT_Full; - ecmd->supported = (SUPPORTED_Autoneg | + advertising |= ADVERTISED_100baseT_Full; + supported = (SUPPORTED_Autoneg | SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_TP | SUPPORTED_MII | SUPPORTED_FIBRE); - ecmd->phy_address = np->phy_addr_external; + ecmd->base.phy_address = np->phy_addr_external; /* * We intentionally report the phy address of the external * phy, even if the internal phy is used. This is necessary @@ -2870,62 +2877,70 @@ static int netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd) */ /* set information based on active port type */ - switch (ecmd->port) { + switch (ecmd->base.port) { default: case PORT_TP: - ecmd->advertising |= ADVERTISED_TP; - ecmd->transceiver = XCVR_INTERNAL; + advertising |= ADVERTISED_TP; break; case PORT_MII: - ecmd->advertising |= ADVERTISED_MII; - ecmd->transceiver = XCVR_EXTERNAL; + advertising |= ADVERTISED_MII; break; case PORT_FIBRE: - ecmd->advertising |= ADVERTISED_FIBRE; - ecmd->transceiver = XCVR_EXTERNAL; + advertising |= ADVERTISED_FIBRE; break; } /* if autonegotiation is on, try to return the active speed/duplex */ - if (ecmd->autoneg == AUTONEG_ENABLE) { - ecmd->advertising |= ADVERTISED_Autoneg; + if (ecmd->base.autoneg == AUTONEG_ENABLE) { + advertising |= ADVERTISED_Autoneg; tmp = mii_nway_result( np->advertising & mdio_read(dev, MII_LPA)); if (tmp == LPA_100FULL || tmp == LPA_100HALF) - ethtool_cmd_speed_set(ecmd, SPEED_100); + ecmd->base.speed = SPEED_100; else - ethtool_cmd_speed_set(ecmd, SPEED_10); + ecmd->base.speed = SPEED_10; if (tmp == LPA_100FULL || tmp == LPA_10FULL) - ecmd->duplex = DUPLEX_FULL; + ecmd->base.duplex = DUPLEX_FULL; else - ecmd->duplex = DUPLEX_HALF; + ecmd->base.duplex = DUPLEX_HALF; } /* ignore maxtxpkt, maxrxpkt for now */ + ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.advertising, + advertising); + return 0; } -static int netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd) +static int netdev_set_ecmd(struct net_device *dev, + const struct ethtool_link_ksettings *ecmd) { struct netdev_private *np = netdev_priv(dev); + u32 advertising; - if (ecmd->port != PORT_TP && ecmd->port != PORT_MII && ecmd->port != PORT_FIBRE) - return -EINVAL; - if (ecmd->transceiver != XCVR_INTERNAL && ecmd->transceiver != XCVR_EXTERNAL) + ethtool_convert_link_mode_to_legacy_u32(&advertising, + ecmd->link_modes.advertising); + + if (ecmd->base.port != PORT_TP && + ecmd->base.port != PORT_MII && + ecmd->base.port != PORT_FIBRE) return -EINVAL; - if (ecmd->autoneg == AUTONEG_ENABLE) { - if ((ecmd->advertising & (ADVERTISED_10baseT_Half | + if (ecmd->base.autoneg == AUTONEG_ENABLE) { + if ((advertising & (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full)) == 0) { return -EINVAL; } - } else if (ecmd->autoneg == AUTONEG_DISABLE) { - u32 speed = ethtool_cmd_speed(ecmd); + } else if (ecmd->base.autoneg == AUTONEG_DISABLE) { + u32 speed = ecmd->base.speed; if (speed != SPEED_10 && speed != SPEED_100) return -EINVAL; - if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL) + if (ecmd->base.duplex != DUPLEX_HALF && + ecmd->base.duplex != DUPLEX_FULL) return -EINVAL; } else { return -EINVAL; @@ -2936,8 +2951,8 @@ static int netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd) * transceiver are really not going to work so don't let the * user select them. */ - if (np->ignore_phy && (ecmd->autoneg == AUTONEG_ENABLE || - ecmd->port == PORT_TP)) + if (np->ignore_phy && (ecmd->base.autoneg == AUTONEG_ENABLE || + ecmd->base.port == PORT_TP)) return -EINVAL; /* @@ -2956,30 +2971,30 @@ static int netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd) /* WHEW! now lets bang some bits */ /* save the parms */ - dev->if_port = ecmd->port; - np->autoneg = ecmd->autoneg; - np->phy_addr_external = ecmd->phy_address & PhyAddrMask; + dev->if_port = ecmd->base.port; + np->autoneg = ecmd->base.autoneg; + np->phy_addr_external = ecmd->base.phy_address & PhyAddrMask; if (np->autoneg == AUTONEG_ENABLE) { /* advertise only what has been requested */ np->advertising &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4); - if (ecmd->advertising & ADVERTISED_10baseT_Half) + if (advertising & ADVERTISED_10baseT_Half) np->advertising |= ADVERTISE_10HALF; - if (ecmd->advertising & ADVERTISED_10baseT_Full) + if (advertising & ADVERTISED_10baseT_Full) np->advertising |= ADVERTISE_10FULL; - if (ecmd->advertising & ADVERTISED_100baseT_Half) + if (advertising & ADVERTISED_100baseT_Half) np->advertising |= ADVERTISE_100HALF; - if (ecmd->advertising & ADVERTISED_100baseT_Full) + if (advertising & ADVERTISED_100baseT_Full) np->advertising |= ADVERTISE_100FULL; } else { - np->speed = ethtool_cmd_speed(ecmd); - np->duplex = ecmd->duplex; + np->speed = ecmd->base.speed; + np->duplex = ecmd->base.duplex; /* user overriding the initial full duplex parm? */ if (np->duplex == DUPLEX_HALF) np->full_duplex = 0; } /* get the right phy enabled */ - if (ecmd->port == PORT_TP) + if (ecmd->base.port == PORT_TP) switch_port_internal(dev); else switch_port_external(dev); diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c index f9d2eb9a920a..729095db3e08 100644 --- a/drivers/net/ethernet/natsemi/ns83820.c +++ b/drivers/net/ethernet/natsemi/ns83820.c @@ -1217,12 +1217,13 @@ static struct net_device_stats *ns83820_get_stats(struct net_device *ndev) } /* Let ethtool retrieve info */ -static int ns83820_get_settings(struct net_device *ndev, - struct ethtool_cmd *cmd) +static int ns83820_get_link_ksettings(struct net_device *ndev, + struct ethtool_link_ksettings *cmd) { struct ns83820 *dev = PRIV(ndev); u32 cfg, tanar, tbicr; int fullduplex = 0; + u32 supported; /* * Here's the list of available ethtool commands from other drivers: @@ -1244,44 +1245,47 @@ static int ns83820_get_settings(struct net_device *ndev, fullduplex = (cfg & CFG_DUPSTS) ? 1 : 0; - cmd->supported = SUPPORTED_Autoneg; + supported = SUPPORTED_Autoneg; if (dev->CFG_cache & CFG_TBI_EN) { /* we have optical interface */ - cmd->supported |= SUPPORTED_1000baseT_Half | + supported |= SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE; - cmd->port = PORT_FIBRE; + cmd->base.port = PORT_FIBRE; } else { /* we have copper */ - cmd->supported |= SUPPORTED_10baseT_Half | + supported |= SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full | SUPPORTED_MII; - cmd->port = PORT_MII; + cmd->base.port = PORT_MII; } - cmd->duplex = fullduplex ? DUPLEX_FULL : DUPLEX_HALF; + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + + cmd->base.duplex = fullduplex ? DUPLEX_FULL : DUPLEX_HALF; switch (cfg / CFG_SPDSTS0 & 3) { case 2: - ethtool_cmd_speed_set(cmd, SPEED_1000); + cmd->base.speed = SPEED_1000; break; case 1: - ethtool_cmd_speed_set(cmd, SPEED_100); + cmd->base.speed = SPEED_100; break; default: - ethtool_cmd_speed_set(cmd, SPEED_10); + cmd->base.speed = SPEED_10; break; } - cmd->autoneg = (tbicr & TBICR_MR_AN_ENABLE) + cmd->base.autoneg = (tbicr & TBICR_MR_AN_ENABLE) ? AUTONEG_ENABLE : AUTONEG_DISABLE; return 0; } /* Let ethool change settings*/ -static int ns83820_set_settings(struct net_device *ndev, - struct ethtool_cmd *cmd) +static int ns83820_set_link_ksettings(struct net_device *ndev, + const struct ethtool_link_ksettings *cmd) { struct ns83820 *dev = PRIV(ndev); u32 cfg, tanar; @@ -1306,10 +1310,10 @@ static int ns83820_set_settings(struct net_device *ndev, spin_lock(&dev->tx_lock); /* Set duplex */ - if (cmd->duplex != fullduplex) { + if (cmd->base.duplex != fullduplex) { if (have_optical) { /*set full duplex*/ - if (cmd->duplex == DUPLEX_FULL) { + if (cmd->base.duplex == DUPLEX_FULL) { /* force full duplex */ writel(readl(dev->base + TXCFG) | TXCFG_CSI | TXCFG_HBI | TXCFG_ATP, @@ -1333,7 +1337,7 @@ static int ns83820_set_settings(struct net_device *ndev, /* Set autonegotiation */ if (1) { - if (cmd->autoneg == AUTONEG_ENABLE) { + if (cmd->base.autoneg == AUTONEG_ENABLE) { /* restart auto negotiation */ writel(TBICR_MR_AN_ENABLE | TBICR_MR_RESTART_AN, dev->base + TBICR); @@ -1348,7 +1352,7 @@ static int ns83820_set_settings(struct net_device *ndev, } printk(KERN_INFO "%s: autoneg %s via ethtool\n", ndev->name, - cmd->autoneg ? "ENABLED" : "DISABLED"); + cmd->base.autoneg ? "ENABLED" : "DISABLED"); } phy_intr(ndev); @@ -1375,10 +1379,10 @@ static u32 ns83820_get_link(struct net_device *ndev) } static const struct ethtool_ops ops = { - .get_settings = ns83820_get_settings, - .set_settings = ns83820_set_settings, .get_drvinfo = ns83820_get_drvinfo, - .get_link = ns83820_get_link + .get_link = ns83820_get_link, + .get_link_ksettings = ns83820_get_link_ksettings, + .set_link_ksettings = ns83820_set_link_ksettings, }; static inline void ns83820_disable_interrupts(struct ns83820 *dev) diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index 564f682fa4dc..118723ea681a 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c @@ -2783,7 +2783,7 @@ static int s2io_poll_msix(struct napi_struct *napi, int budget) s2io_chk_rx_buffers(nic, ring); if (pkts_processed < budget_org) { - napi_complete(napi); + napi_complete_done(napi, pkts_processed); /*Re Enable MSI-Rx Vector*/ addr = (u8 __iomem *)&bar0->xmsi_mask_reg; addr += 7 - ring->ring_no; @@ -2817,7 +2817,7 @@ static int s2io_poll_inta(struct napi_struct *napi, int budget) break; } if (pkts_processed < budget_org) { - napi_complete(napi); + napi_complete_done(napi, pkts_processed); /* Re enable the Rx interrupts for the ring */ writeq(0, &bar0->rx_traffic_mask); readl(&bar0->rx_traffic_mask); @@ -5300,10 +5300,10 @@ static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr) } /** - * s2io_ethtool_sset - Sets different link parameters. + * s2io_ethtool_set_link_ksettings - Sets different link parameters. * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure. - * @info: pointer to the structure with parameters given by ethtool to set + * @cmd: pointer to the structure with parameters given by ethtool to set * link information. * Description: * The function sets different link parameters provided by the user onto @@ -5312,13 +5312,14 @@ static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr) * 0 on success. */ -static int s2io_ethtool_sset(struct net_device *dev, - struct ethtool_cmd *info) +static int +s2io_ethtool_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct s2io_nic *sp = netdev_priv(dev); - if ((info->autoneg == AUTONEG_ENABLE) || - (ethtool_cmd_speed(info) != SPEED_10000) || - (info->duplex != DUPLEX_FULL)) + if ((cmd->base.autoneg == AUTONEG_ENABLE) || + (cmd->base.speed != SPEED_10000) || + (cmd->base.duplex != DUPLEX_FULL)) return -EINVAL; else { s2io_close(sp->dev); @@ -5329,10 +5330,10 @@ static int s2io_ethtool_sset(struct net_device *dev, } /** - * s2io_ethtol_gset - Return link specific information. + * s2io_ethtol_get_link_ksettings - Return link specific information. * @sp : private member of the device structure, pointer to the * s2io_nic structure. - * @info : pointer to the structure with parameters given by ethtool + * @cmd : pointer to the structure with parameters given by ethtool * to return link information. * Description: * Returns link specific information like speed, duplex etc.. to ethtool. @@ -5340,25 +5341,31 @@ static int s2io_ethtool_sset(struct net_device *dev, * return 0 on success. */ -static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info) +static int +s2io_ethtool_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct s2io_nic *sp = netdev_priv(dev); - info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); - info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); - info->port = PORT_FIBRE; - /* info->transceiver */ - info->transceiver = XCVR_EXTERNAL; + ethtool_link_ksettings_zero_link_mode(cmd, supported); + ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); + + ethtool_link_ksettings_zero_link_mode(cmd, advertising); + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); + + cmd->base.port = PORT_FIBRE; if (netif_carrier_ok(sp->dev)) { - ethtool_cmd_speed_set(info, SPEED_10000); - info->duplex = DUPLEX_FULL; + cmd->base.speed = SPEED_10000; + cmd->base.duplex = DUPLEX_FULL; } else { - ethtool_cmd_speed_set(info, SPEED_UNKNOWN); - info->duplex = DUPLEX_UNKNOWN; + cmd->base.speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; } - info->autoneg = AUTONEG_DISABLE; + cmd->base.autoneg = AUTONEG_DISABLE; return 0; } @@ -5390,7 +5397,7 @@ static void s2io_ethtool_gdrvinfo(struct net_device *dev, * s2io_nic structure. * @regs : pointer to the structure with parameters given by ethtool for * dumping the registers. - * @reg_space: The input argumnet into which all the registers are dumped. + * @reg_space: The input argument into which all the registers are dumped. * Description: * Dumps the entire register space of xFrame NIC into the user given * buffer area. @@ -6626,8 +6633,6 @@ static int s2io_set_features(struct net_device *dev, netdev_features_t features) } static const struct ethtool_ops netdev_ethtool_ops = { - .get_settings = s2io_ethtool_gset, - .set_settings = s2io_ethtool_sset, .get_drvinfo = s2io_ethtool_gdrvinfo, .get_regs_len = s2io_ethtool_get_regs_len, .get_regs = s2io_ethtool_gregs, @@ -6643,6 +6648,8 @@ static const struct ethtool_ops netdev_ethtool_ops = { .set_phys_id = s2io_ethtool_set_led, .get_ethtool_stats = s2io_get_ethtool_stats, .get_sset_count = s2io_get_sset_count, + .get_link_ksettings = s2io_ethtool_get_link_ksettings, + .set_link_ksettings = s2io_ethtool_set_link_ksettings, }; /** diff --git a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c index 9a2967016c18..0452848d1316 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c @@ -38,9 +38,9 @@ static const char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = { }; /** - * vxge_ethtool_sset - Sets different link parameters. + * vxge_ethtool_set_link_ksettings - Sets different link parameters. * @dev: device pointer. - * @info: pointer to the structure with parameters given by ethtool to set + * @cmd: pointer to the structure with parameters given by ethtool to set * link information. * * The function sets different link parameters provided by the user onto @@ -48,44 +48,51 @@ static const char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = { * Return value: * 0 on success. */ -static int vxge_ethtool_sset(struct net_device *dev, struct ethtool_cmd *info) +static int +vxge_ethtool_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { /* We currently only support 10Gb/FULL */ - if ((info->autoneg == AUTONEG_ENABLE) || - (ethtool_cmd_speed(info) != SPEED_10000) || - (info->duplex != DUPLEX_FULL)) + if ((cmd->base.autoneg == AUTONEG_ENABLE) || + (cmd->base.speed != SPEED_10000) || + (cmd->base.duplex != DUPLEX_FULL)) return -EINVAL; return 0; } /** - * vxge_ethtool_gset - Return link specific information. + * vxge_ethtool_get_link_ksettings - Return link specific information. * @dev: device pointer. - * @info: pointer to the structure with parameters given by ethtool + * @cmd: pointer to the structure with parameters given by ethtool * to return link information. * * Returns link specific information like speed, duplex etc.. to ethtool. * Return value : * return 0 on success. */ -static int vxge_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info) +static int vxge_ethtool_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { - info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); - info->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); - info->port = PORT_FIBRE; + ethtool_link_ksettings_zero_link_mode(cmd, supported); + ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); - info->transceiver = XCVR_EXTERNAL; + ethtool_link_ksettings_zero_link_mode(cmd, advertising); + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); + + cmd->base.port = PORT_FIBRE; if (netif_carrier_ok(dev)) { - ethtool_cmd_speed_set(info, SPEED_10000); - info->duplex = DUPLEX_FULL; + cmd->base.speed = SPEED_10000; + cmd->base.duplex = DUPLEX_FULL; } else { - ethtool_cmd_speed_set(info, SPEED_UNKNOWN); - info->duplex = DUPLEX_UNKNOWN; + cmd->base.speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; } - info->autoneg = AUTONEG_DISABLE; + cmd->base.autoneg = AUTONEG_DISABLE; return 0; } @@ -112,7 +119,7 @@ static void vxge_ethtool_gdrvinfo(struct net_device *dev, * @dev: device pointer. * @regs: pointer to the structure with parameters given by ethtool for * dumping the registers. - * @reg_space: The input argumnet into which all the registers are dumped. + * @reg_space: The input argument into which all the registers are dumped. * * Dumps the vpath register space of Titan NIC into the user given * buffer area. @@ -1126,8 +1133,6 @@ static int vxge_fw_flash(struct net_device *dev, struct ethtool_flash *parms) } static const struct ethtool_ops vxge_ethtool_ops = { - .get_settings = vxge_ethtool_gset, - .set_settings = vxge_ethtool_sset, .get_drvinfo = vxge_ethtool_gdrvinfo, .get_regs_len = vxge_ethtool_get_regs_len, .get_regs = vxge_ethtool_gregs, @@ -1139,6 +1144,8 @@ static const struct ethtool_ops vxge_ethtool_ops = { .get_sset_count = vxge_ethtool_get_sset_count, .get_ethtool_stats = vxge_get_ethtool_stats, .flash_device = vxge_fw_flash, + .get_link_ksettings = vxge_ethtool_get_link_ksettings, + .set_link_ksettings = vxge_ethtool_set_link_ksettings, }; void vxge_initialize_ethtool_ops(struct net_device *ndev) diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c index e07b936f64ec..6a4310af5d97 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c @@ -1823,8 +1823,8 @@ static int vxge_poll_msix(struct napi_struct *napi, int budget) vxge_hw_vpath_poll_rx(ring->handle); pkts_processed = ring->pkts_processed; - if (ring->pkts_processed < budget_org) { - napi_complete(napi); + if (pkts_processed < budget_org) { + napi_complete_done(napi, pkts_processed); /* Re enable the Rx interrupts for the vpath */ vxge_hw_channel_msix_unmask( @@ -1863,7 +1863,7 @@ static int vxge_poll_inta(struct napi_struct *napi, int budget) VXGE_COMPLETE_ALL_TX(vdev); if (pkts_processed < budget_org) { - napi_complete(napi); + napi_complete_done(napi, pkts_processed); /* Re enable the Rx interrupts for the ring */ vxge_hw_device_unmask_all(hldev); vxge_hw_device_flush_io(hldev); @@ -3111,7 +3111,7 @@ static int vxge_change_mtu(struct net_device *dev, int new_mtu) * @stats: pointer to struct rtnl_link_stats64 * */ -static struct rtnl_link_stats64 * +static void vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats) { struct vxgedev *vdev = netdev_priv(dev); @@ -3150,8 +3150,6 @@ vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats) net_stats->tx_bytes += bytes; net_stats->tx_errors += txstats->tx_errors; } - - return net_stats; } static enum vxge_hw_status vxge_timestamp_config(struct __vxge_hw_device *devh) diff --git a/drivers/net/ethernet/netronome/Kconfig b/drivers/net/ethernet/netronome/Kconfig index 9508ad782c30..967d7ca8c28c 100644 --- a/drivers/net/ethernet/netronome/Kconfig +++ b/drivers/net/ethernet/netronome/Kconfig @@ -15,21 +15,21 @@ config NET_VENDOR_NETRONOME if NET_VENDOR_NETRONOME -config NFP_NETVF - tristate "Netronome(R) NFP4000/NFP6000 VF NIC driver" +config NFP + tristate "Netronome(R) NFP4000/NFP6000 NIC driver" depends on PCI && PCI_MSI depends on VXLAN || VXLAN=n ---help--- - This driver supports SR-IOV virtual functions of - the Netronome(R) NFP4000/NFP6000 cards working as - a advanced Ethernet NIC. + This driver supports the Netronome(R) NFP4000/NFP6000 based + cards working as a advanced Ethernet NIC. It works with both + SR-IOV physical and virtual functions. -config NFP_NET_DEBUG - bool "Debug support for Netronome(R) NFP3200/NFP6000 NIC drivers" - depends on NFP_NET || NFP_NETVF +config NFP_DEBUG + bool "Debug support for Netronome(R) NFP4000/NFP6000 NIC drivers" + depends on NFP ---help--- Enable extra sanity checks and debugfs support in - Netronome(R) NFP3200/NFP6000 NIC PF and VF drivers. + Netronome(R) NFP4000/NFP6000 NIC drivers. Note: selecting this option may adversely impact performance. diff --git a/drivers/net/ethernet/netronome/Makefile b/drivers/net/ethernet/netronome/Makefile index dcb7b383f634..7fb3b84b5556 100644 --- a/drivers/net/ethernet/netronome/Makefile +++ b/drivers/net/ethernet/netronome/Makefile @@ -2,4 +2,4 @@ # Makefile for the Netronome network device drivers # -obj-$(CONFIG_NFP_NETVF) += nfp/ +obj-$(CONFIG_NFP) += nfp/ diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile index 0efb2ba9a558..6933afa69df2 100644 --- a/drivers/net/ethernet/netronome/nfp/Makefile +++ b/drivers/net/ethernet/netronome/nfp/Makefile @@ -1,15 +1,28 @@ -obj-$(CONFIG_NFP_NETVF) += nfp_netvf.o +obj-$(CONFIG_NFP) += nfp.o -nfp_netvf-objs := \ +nfp-objs := \ + nfpcore/nfp6000_pcie.o \ + nfpcore/nfp_cppcore.o \ + nfpcore/nfp_cpplib.o \ + nfpcore/nfp_hwinfo.o \ + nfpcore/nfp_mip.o \ + nfpcore/nfp_nffw.o \ + nfpcore/nfp_nsp.o \ + nfpcore/nfp_nsp_eth.o \ + nfpcore/nfp_resource.o \ + nfpcore/nfp_rtsym.o \ + nfpcore/nfp_target.o \ + nfp_main.o \ nfp_net_common.o \ nfp_net_ethtool.o \ nfp_net_offload.o \ + nfp_net_main.o \ nfp_netvf_main.o ifeq ($(CONFIG_BPF_SYSCALL),y) -nfp_netvf-objs += \ +nfp-objs += \ nfp_bpf_verifier.o \ nfp_bpf_jit.o endif -nfp_netvf-$(CONFIG_NFP_NET_DEBUG) += nfp_net_debugfs.o +nfp-$(CONFIG_NFP_DEBUG) += nfp_net_debugfs.o diff --git a/drivers/net/ethernet/netronome/nfp/nfp_bpf.h b/drivers/net/ethernet/netronome/nfp/nfp_bpf.h index 76a19f1796af..9513c80f7be5 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_bpf.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_bpf.h @@ -39,8 +39,6 @@ #include <linux/list.h> #include <linux/types.h> -#define FIELD_FIT(mask, val) (!((((u64)val) << __bf_shf(mask)) & ~(mask))) - /* For branch fixup logic use up-most byte of branch instruction as scratch * area. Remember to clear this before sending instructions to HW! */ diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c new file mode 100644 index 000000000000..dedac720fb29 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -0,0 +1,460 @@ +/* + * Copyright (C) 2015-2017 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* + * nfp_main.c + * Authors: Jakub Kicinski <jakub.kicinski@netronome.com> + * Alejandro Lucero <alejandro.lucero@netronome.com> + * Jason McMullan <jason.mcmullan@netronome.com> + * Rolf Neugebauer <rolf.neugebauer@netronome.com> + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/firmware.h> +#include <linux/vermagic.h> + +#include "nfpcore/nfp.h" +#include "nfpcore/nfp_cpp.h" +#include "nfpcore/nfp_nffw.h" +#include "nfpcore/nfp_nsp_eth.h" + +#include "nfpcore/nfp6000_pcie.h" + +#include "nfp_main.h" +#include "nfp_net.h" + +static const char nfp_driver_name[] = "nfp"; +const char nfp_driver_version[] = VERMAGIC_STRING; + +static const struct pci_device_id nfp_pci_device_ids[] = { + { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP6000, + PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID, + PCI_ANY_ID, 0, + }, + { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP4000, + PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID, + PCI_ANY_ID, 0, + }, + { 0, } /* Required last entry. */ +}; +MODULE_DEVICE_TABLE(pci, nfp_pci_device_ids); + +static void nfp_pcie_sriov_read_nfd_limit(struct nfp_pf *pf) +{ +#ifdef CONFIG_PCI_IOV + int err; + + pf->limit_vfs = nfp_rtsym_read_le(pf->cpp, "nfd_vf_cfg_max_vfs", &err); + if (!err) + return; + + pf->limit_vfs = ~0; + /* Allow any setting for backwards compatibility if symbol not found */ + if (err != -ENOENT) + nfp_warn(pf->cpp, "Warning: VF limit read failed: %d\n", err); +#endif +} + +static int nfp_pcie_sriov_enable(struct pci_dev *pdev, int num_vfs) +{ +#ifdef CONFIG_PCI_IOV + struct nfp_pf *pf = pci_get_drvdata(pdev); + int err; + + if (num_vfs > pf->limit_vfs) { + nfp_info(pf->cpp, "Firmware limits number of VFs to %u\n", + pf->limit_vfs); + return -EINVAL; + } + + err = pci_enable_sriov(pdev, num_vfs); + if (err) { + dev_warn(&pdev->dev, "Failed to enable PCI sriov: %d\n", err); + return err; + } + + pf->num_vfs = num_vfs; + + dev_dbg(&pdev->dev, "Created %d VFs.\n", pf->num_vfs); + + return num_vfs; +#endif + return 0; +} + +static int nfp_pcie_sriov_disable(struct pci_dev *pdev) +{ +#ifdef CONFIG_PCI_IOV + struct nfp_pf *pf = pci_get_drvdata(pdev); + + /* If the VFs are assigned we cannot shut down SR-IOV without + * causing issues, so just leave the hardware available but + * disabled + */ + if (pci_vfs_assigned(pdev)) { + dev_warn(&pdev->dev, "Disabling while VFs assigned - VFs will not be deallocated\n"); + return -EPERM; + } + + pf->num_vfs = 0; + + pci_disable_sriov(pdev); + dev_dbg(&pdev->dev, "Removed VFs.\n"); +#endif + return 0; +} + +static int nfp_pcie_sriov_configure(struct pci_dev *pdev, int num_vfs) +{ + if (num_vfs == 0) + return nfp_pcie_sriov_disable(pdev); + else + return nfp_pcie_sriov_enable(pdev, num_vfs); +} + +/** + * nfp_net_fw_find() - Find the correct firmware image for netdev mode + * @pdev: PCI Device structure + * @pf: NFP PF Device structure + * + * Return: firmware if found and requested successfully. + */ +static const struct firmware * +nfp_net_fw_find(struct pci_dev *pdev, struct nfp_pf *pf) +{ + const struct firmware *fw = NULL; + struct nfp_eth_table_port *port; + const char *fw_model; + char fw_name[256]; + int spc, err = 0; + int i, j; + + if (!pf->eth_tbl) { + dev_err(&pdev->dev, "Error: can't identify media config\n"); + return NULL; + } + + fw_model = nfp_hwinfo_lookup(pf->cpp, "assembly.partno"); + if (!fw_model) { + dev_err(&pdev->dev, "Error: can't read part number\n"); + return NULL; + } + + spc = ARRAY_SIZE(fw_name); + spc -= snprintf(fw_name, spc, "netronome/nic_%s", fw_model); + + for (i = 0; spc > 0 && i < pf->eth_tbl->count; i += j) { + port = &pf->eth_tbl->ports[i]; + j = 1; + while (i + j < pf->eth_tbl->count && + port->speed == port[j].speed) + j++; + + spc -= snprintf(&fw_name[ARRAY_SIZE(fw_name) - spc], spc, + "_%dx%d", j, port->speed / 1000); + } + + if (spc <= 0) + return NULL; + + spc -= snprintf(&fw_name[ARRAY_SIZE(fw_name) - spc], spc, ".nffw"); + if (spc <= 0) + return NULL; + + err = request_firmware(&fw, fw_name, &pdev->dev); + if (err) + return NULL; + + dev_info(&pdev->dev, "Loading FW image: %s\n", fw_name); + + return fw; +} + +/** + * nfp_net_fw_load() - Load the firmware image + * @pdev: PCI Device structure + * @pf: NFP PF Device structure + * @nsp: NFP SP handle + * + * Return: -ERRNO, 0 for no firmware loaded, 1 for firmware loaded + */ +static int +nfp_fw_load(struct pci_dev *pdev, struct nfp_pf *pf, struct nfp_nsp *nsp) +{ + const struct firmware *fw; + u16 interface; + int err; + + interface = nfp_cpp_interface(pf->cpp); + if (NFP_CPP_INTERFACE_UNIT_of(interface) != 0) { + /* Only Unit 0 should reset or load firmware */ + dev_info(&pdev->dev, "Firmware will be loaded by partner\n"); + return 0; + } + + fw = nfp_net_fw_find(pdev, pf); + if (!fw) + return 0; + + dev_info(&pdev->dev, "Soft-reset, loading FW image\n"); + err = nfp_nsp_device_soft_reset(nsp); + if (err < 0) { + dev_err(&pdev->dev, "Failed to soft reset the NFP: %d\n", + err); + goto exit_release_fw; + } + + err = nfp_nsp_load_fw(nsp, fw); + + if (err < 0) { + dev_err(&pdev->dev, "FW loading failed: %d\n", err); + goto exit_release_fw; + } + + dev_info(&pdev->dev, "Finished loading FW image\n"); + +exit_release_fw: + release_firmware(fw); + + return err < 0 ? err : 1; +} + +static int nfp_nsp_init(struct pci_dev *pdev, struct nfp_pf *pf) +{ + struct nfp_nsp *nsp; + int err; + + nsp = nfp_nsp_open(pf->cpp); + if (IS_ERR(nsp)) { + err = PTR_ERR(nsp); + dev_err(&pdev->dev, "Failed to access the NSP: %d\n", err); + return err; + } + + err = nfp_nsp_wait(nsp); + if (err < 0) + goto exit_close_nsp; + + pf->eth_tbl = __nfp_eth_read_ports(pf->cpp, nsp); + + err = nfp_fw_load(pdev, pf, nsp); + if (err < 0) { + kfree(pf->eth_tbl); + dev_err(&pdev->dev, "Failed to load FW\n"); + goto exit_close_nsp; + } + + pf->fw_loaded = !!err; + err = 0; + +exit_close_nsp: + nfp_nsp_close(nsp); + + return err; +} + +static void nfp_fw_unload(struct nfp_pf *pf) +{ + struct nfp_nsp *nsp; + int err; + + nsp = nfp_nsp_open(pf->cpp); + if (IS_ERR(nsp)) { + nfp_err(pf->cpp, "Reset failed, can't open NSP\n"); + return; + } + + err = nfp_nsp_device_soft_reset(nsp); + if (err < 0) + dev_warn(&pf->pdev->dev, "Couldn't unload firmware: %d\n", err); + else + dev_info(&pf->pdev->dev, "Firmware safely unloaded\n"); + + nfp_nsp_close(nsp); +} + +static int nfp_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *pci_id) +{ + struct nfp_pf *pf; + int err; + + err = pci_enable_device(pdev); + if (err < 0) + return err; + + pci_set_master(pdev); + + err = dma_set_mask_and_coherent(&pdev->dev, + DMA_BIT_MASK(NFP_NET_MAX_DMA_BITS)); + if (err) + goto err_pci_disable; + + err = pci_request_regions(pdev, nfp_driver_name); + if (err < 0) { + dev_err(&pdev->dev, "Unable to reserve pci resources.\n"); + goto err_pci_disable; + } + + pf = kzalloc(sizeof(*pf), GFP_KERNEL); + if (!pf) { + err = -ENOMEM; + goto err_rel_regions; + } + INIT_LIST_HEAD(&pf->ports); + pci_set_drvdata(pdev, pf); + pf->pdev = pdev; + + pf->cpp = nfp_cpp_from_nfp6000_pcie(pdev); + if (IS_ERR_OR_NULL(pf->cpp)) { + err = PTR_ERR(pf->cpp); + if (err >= 0) + err = -ENOMEM; + goto err_disable_msix; + } + + dev_info(&pdev->dev, "Assembly: %s%s%s-%s CPLD: %s\n", + nfp_hwinfo_lookup(pf->cpp, "assembly.vendor"), + nfp_hwinfo_lookup(pf->cpp, "assembly.partno"), + nfp_hwinfo_lookup(pf->cpp, "assembly.serial"), + nfp_hwinfo_lookup(pf->cpp, "assembly.revision"), + nfp_hwinfo_lookup(pf->cpp, "cpld.version")); + + err = nfp_nsp_init(pdev, pf); + if (err) + goto err_cpp_free; + + nfp_pcie_sriov_read_nfd_limit(pf); + + err = nfp_net_pci_probe(pf); + if (err) + goto err_fw_unload; + + return 0; + +err_fw_unload: + if (pf->fw_loaded) + nfp_fw_unload(pf); + kfree(pf->eth_tbl); +err_cpp_free: + nfp_cpp_free(pf->cpp); +err_disable_msix: + pci_set_drvdata(pdev, NULL); + kfree(pf); +err_rel_regions: + pci_release_regions(pdev); +err_pci_disable: + pci_disable_device(pdev); + + return err; +} + +static void nfp_pci_remove(struct pci_dev *pdev) +{ + struct nfp_pf *pf = pci_get_drvdata(pdev); + + if (!list_empty(&pf->ports)) + nfp_net_pci_remove(pf); + + nfp_pcie_sriov_disable(pdev); + + if (pf->fw_loaded) + nfp_fw_unload(pf); + + pci_set_drvdata(pdev, NULL); + nfp_cpp_free(pf->cpp); + + kfree(pf->eth_tbl); + kfree(pf); + pci_release_regions(pdev); + pci_disable_device(pdev); +} + +static struct pci_driver nfp_pci_driver = { + .name = nfp_driver_name, + .id_table = nfp_pci_device_ids, + .probe = nfp_pci_probe, + .remove = nfp_pci_remove, + .sriov_configure = nfp_pcie_sriov_configure, +}; + +static int __init nfp_main_init(void) +{ + int err; + + pr_info("%s: NFP PCIe Driver, Copyright (C) 2014-2017 Netronome Systems\n", + nfp_driver_name); + + nfp_net_debugfs_create(); + + err = pci_register_driver(&nfp_pci_driver); + if (err < 0) + goto err_destroy_debugfs; + + err = pci_register_driver(&nfp_netvf_pci_driver); + if (err) + goto err_unreg_pf; + + return err; + +err_unreg_pf: + pci_unregister_driver(&nfp_pci_driver); +err_destroy_debugfs: + nfp_net_debugfs_destroy(); + return err; +} + +static void __exit nfp_main_exit(void) +{ + pci_unregister_driver(&nfp_netvf_pci_driver); + pci_unregister_driver(&nfp_pci_driver); + nfp_net_debugfs_destroy(); +} + +module_init(nfp_main_init); +module_exit(nfp_main_exit); + +MODULE_FIRMWARE("netronome/nic_AMDA0081-0001_1x40.nffw"); +MODULE_FIRMWARE("netronome/nic_AMDA0081-0001_4x10.nffw"); +MODULE_FIRMWARE("netronome/nic_AMDA0096-0001_2x10.nffw"); +MODULE_FIRMWARE("netronome/nic_AMDA0097-0001_2x40.nffw"); +MODULE_FIRMWARE("netronome/nic_AMDA0097-0001_4x10_1x40.nffw"); +MODULE_FIRMWARE("netronome/nic_AMDA0097-0001_8x10.nffw"); +MODULE_FIRMWARE("netronome/nic_AMDA0099-0001_2x10.nffw"); +MODULE_FIRMWARE("netronome/nic_AMDA0099-0001_2x25.nffw"); + +MODULE_AUTHOR("Netronome Systems <oss-drivers@netronome.com>"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("The Netronome Flow Processor (NFP) driver."); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h new file mode 100644 index 000000000000..39105d0435e9 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h @@ -0,0 +1,99 @@ +/* + * Copyright (C) 2015-2017 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* + * nfp_main.h + * Author: Jason McMullan <jason.mcmullan@netronome.com> + */ + +#ifndef NFP_MAIN_H +#define NFP_MAIN_H + +#include <linux/list.h> +#include <linux/types.h> +#include <linux/msi.h> +#include <linux/pci.h> + +struct dentry; +struct pci_dev; + +struct nfp_cpp; +struct nfp_cpp_area; +struct nfp_eth_table; + +/** + * struct nfp_pf - NFP PF-specific device structure + * @pdev: Backpointer to PCI device + * @cpp: Pointer to the CPP handle + * @ctrl_area: Pointer to the CPP area for the control BAR + * @tx_area: Pointer to the CPP area for the TX queues + * @rx_area: Pointer to the CPP area for the FL/RX queues + * @irq_entries: Array of MSI-X entries for all ports + * @limit_vfs: Number of VFs supported by firmware (~0 for PCI limit) + * @num_vfs: Number of SR-IOV VFs enabled + * @fw_loaded: Is the firmware loaded? + * @eth_tbl: NSP ETH table + * @ddir: Per-device debugfs directory + * @num_ports: Number of adapter ports + * @ports: Linked list of port structures (struct nfp_net) + */ +struct nfp_pf { + struct pci_dev *pdev; + + struct nfp_cpp *cpp; + + struct nfp_cpp_area *ctrl_area; + struct nfp_cpp_area *tx_area; + struct nfp_cpp_area *rx_area; + + struct msix_entry *irq_entries; + + unsigned int limit_vfs; + unsigned int num_vfs; + + bool fw_loaded; + + struct nfp_eth_table *eth_tbl; + + struct dentry *ddir; + + unsigned int num_ports; + struct list_head ports; +}; + +extern struct pci_driver nfp_netvf_pci_driver; + +int nfp_net_pci_probe(struct nfp_pf *pf); +void nfp_net_pci_remove(struct nfp_pf *pf); + +#endif /* NFP_MAIN_H */ diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h index 2115f446031e..e614a376b595 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2015 Netronome Systems, Inc. + * Copyright (C) 2015-2017 Netronome Systems, Inc. * * This software is dual licensed under the GNU General License Version 2, * June 1991 as shown in the file COPYING in the top-level directory of this @@ -43,6 +43,7 @@ #define _NFP_NET_H_ #include <linux/interrupt.h> +#include <linux/list.h> #include <linux/netdevice.h> #include <linux/pci.h> #include <linux/io-64-nonatomic-hi-lo.h> @@ -83,6 +84,7 @@ #define NFP_NET_NON_Q_VECTORS 2 #define NFP_NET_IRQ_LSC_IDX 0 #define NFP_NET_IRQ_EXN_IDX 1 +#define NFP_NET_MIN_PORT_IRQS (NFP_NET_NON_Q_VECTORS + 1) /* Queue/Ring definitions */ #define NFP_NET_MAX_TX_RINGS 64 /* Max. # of Tx rings per device */ @@ -109,6 +111,7 @@ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) /* Forward declarations */ +struct nfp_cpp; struct nfp_net; struct nfp_net_r_vector; @@ -345,7 +348,7 @@ struct nfp_net_rx_ring { * @tx_ring: Pointer to TX ring * @rx_ring: Pointer to RX ring * @xdp_ring: Pointer to an extra TX ring for XDP - * @irq_idx: Index into MSI-X table + * @irq_entry: MSI-X table entry (use for talking to the device) * @rx_sync: Seqlock for atomic updates of RX stats * @rx_pkts: Number of received packets * @rx_bytes: Number of received bytes @@ -362,6 +365,7 @@ struct nfp_net_rx_ring { * @tx_lso: Counter of LSO packets sent * @tx_errors: How many TX errors were encountered * @tx_busy: How often was TX busy (no space)? + * @irq_vector: Interrupt vector number (use for talking to the OS) * @handler: Interrupt handler for this ring vector * @name: Name of the interrupt vector * @affinity_mask: SMP affinity mask for this vector @@ -378,7 +382,7 @@ struct nfp_net_r_vector { struct nfp_net_tx_ring *tx_ring; struct nfp_net_rx_ring *rx_ring; - int irq_idx; + u16 irq_entry; struct u64_stats_sync rx_sync; u64 rx_pkts; @@ -400,6 +404,7 @@ struct nfp_net_r_vector { u64 tx_errors; u64 tx_busy; + u32 irq_vector; irq_handler_t handler; char name[IFNAMSIZ + 8]; cpumask_t affinity_mask; @@ -431,20 +436,13 @@ struct nfp_stat_pair { * struct nfp_net - NFP network device structure * @pdev: Backpointer to PCI device * @netdev: Backpointer to net_device structure - * @nfp_fallback: Is the driver used in fallback mode? * @is_vf: Is the driver attached to a VF? - * @fw_loaded: Is the firmware loaded? * @bpf_offload_skip_sw: Offloaded BPF program will not be rerun by cls_bpf * @bpf_offload_xdp: Offloaded BPF program is XDP * @ctrl: Local copy of the control register/word. * @fl_bufsz: Currently configured size of the freelist buffers * @rx_offset: Offset in the RX buffers where packet data starts * @xdp_prog: Installed XDP program - * @cpp: Pointer to the CPP handle - * @nfp_dev_cpp: Pointer to the NFP Device handle - * @ctrl_area: Pointer to the CPP area for the control BAR - * @tx_area: Pointer to the CPP area for the TX queues - * @rx_area: Pointer to the CPP area for the FL/RX queues * @fw_ver: Firmware version * @cap: Capabilities advertised by the Firmware * @max_mtu: Maximum support MTU advertised by the Firmware @@ -494,14 +492,15 @@ struct nfp_stat_pair { * @tx_bar: Pointer to mapped TX queues * @rx_bar: Pointer to mapped FL/RX queues * @debugfs_dir: Device directory in debugfs + * @ethtool_dump_flag: Ethtool dump flag + * @port_list: Entry on device port list + * @cpp: CPP device handle if available */ struct nfp_net { struct pci_dev *pdev; struct net_device *netdev; - unsigned nfp_fallback:1; unsigned is_vf:1; - unsigned fw_loaded:1; unsigned bpf_offload_skip_sw:1; unsigned bpf_offload_xdp:1; @@ -515,18 +514,6 @@ struct nfp_net { struct nfp_net_tx_ring *tx_rings; struct nfp_net_rx_ring *rx_rings; -#ifdef CONFIG_PCI_IOV - unsigned int num_vfs; - struct vf_data_storage *vfinfo; - int vf_rate_link_speed; -#endif - - struct nfp_cpp *cpp; - struct platform_device *nfp_dev_cpp; - struct nfp_cpp_area *ctrl_area; - struct nfp_cpp_area *tx_area; - struct nfp_cpp_area *rx_area; - struct nfp_net_fw_version fw_ver; u32 cap; u32 max_mtu; @@ -589,11 +576,15 @@ struct nfp_net { u8 __iomem *qcp_cfg; u8 __iomem *ctrl_bar; - u8 __iomem *q_bar; u8 __iomem *tx_bar; u8 __iomem *rx_bar; struct dentry *debugfs_dir; + u32 ethtool_dump_flag; + + struct list_head port_list; + + struct nfp_cpp *cpp; }; struct nfp_net_ring_set { @@ -770,8 +761,7 @@ static inline u32 nfp_qcp_wr_ptr_read(u8 __iomem *q) } /* Globals */ -extern const char nfp_net_driver_name[]; -extern const char nfp_net_driver_version[]; +extern const char nfp_driver_version[]; /* Prototypes */ void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver, @@ -789,17 +779,24 @@ int nfp_net_reconfig(struct nfp_net *nn, u32 update); void nfp_net_rss_write_itbl(struct nfp_net *nn); void nfp_net_rss_write_key(struct nfp_net *nn); void nfp_net_coalesce_write_cfg(struct nfp_net *nn); -int nfp_net_irqs_alloc(struct nfp_net *nn); -void nfp_net_irqs_disable(struct nfp_net *nn); + +unsigned int +nfp_net_irqs_alloc(struct pci_dev *pdev, struct msix_entry *irq_entries, + unsigned int min_irqs, unsigned int want_irqs); +void nfp_net_irqs_disable(struct pci_dev *pdev); +void +nfp_net_irqs_assign(struct nfp_net *nn, struct msix_entry *irq_entries, + unsigned int n); int nfp_net_ring_reconfig(struct nfp_net *nn, struct bpf_prog **xdp_prog, struct nfp_net_ring_set *rx, struct nfp_net_ring_set *tx); -#ifdef CONFIG_NFP_NET_DEBUG +#ifdef CONFIG_NFP_DEBUG void nfp_net_debugfs_create(void); void nfp_net_debugfs_destroy(void); -void nfp_net_debugfs_adapter_add(struct nfp_net *nn); -void nfp_net_debugfs_adapter_del(struct nfp_net *nn); +struct dentry *nfp_net_debugfs_device_add(struct pci_dev *pdev); +void nfp_net_debugfs_port_add(struct nfp_net *nn, struct dentry *ddir, int id); +void nfp_net_debugfs_dir_clean(struct dentry **dir); #else static inline void nfp_net_debugfs_create(void) { @@ -809,14 +806,20 @@ static inline void nfp_net_debugfs_destroy(void) { } -static inline void nfp_net_debugfs_adapter_add(struct nfp_net *nn) +static inline struct dentry *nfp_net_debugfs_device_add(struct pci_dev *pdev) +{ + return NULL; +} + +static inline void +nfp_net_debugfs_port_add(struct nfp_net *nn, struct dentry *ddir, int id) { } -static inline void nfp_net_debugfs_adapter_del(struct nfp_net *nn) +static inline void nfp_net_debugfs_dir_clean(struct dentry **dir) { } -#endif /* CONFIG_NFP_NET_DEBUG */ +#endif /* CONFIG_NFP_DEBUG */ void nfp_net_filter_stats_timer(unsigned long data); int nfp_net_bpf_offload(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index e8d448109e03..9179a99563af 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2015 Netronome Systems, Inc. + * Copyright (C) 2015-2017 Netronome Systems, Inc. * * This software is dual licensed under the GNU General License Version 2, * June 1991 as shown in the file COPYING in the top-level directory of this @@ -42,6 +42,7 @@ */ #include <linux/bpf.h> +#include <linux/bpf_trace.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> @@ -280,72 +281,76 @@ static void nfp_net_irq_unmask(struct nfp_net *nn, unsigned int entry_nr) } /** - * nfp_net_msix_alloc() - Try to allocate MSI-X irqs - * @nn: NFP Network structure - * @nr_vecs: Number of MSI-X vectors to allocate - * - * For MSI-X we want at least NFP_NET_NON_Q_VECTORS + 1 vectors. + * nfp_net_irqs_alloc() - allocates MSI-X irqs + * @pdev: PCI device structure + * @irq_entries: Array to be initialized and used to hold the irq entries + * @min_irqs: Minimal acceptable number of interrupts + * @wanted_irqs: Target number of interrupts to allocate * - * Return: Number of MSI-X vectors obtained or 0 on error. + * Return: Number of irqs obtained or 0 on error. */ -static int nfp_net_msix_alloc(struct nfp_net *nn, int nr_vecs) +unsigned int +nfp_net_irqs_alloc(struct pci_dev *pdev, struct msix_entry *irq_entries, + unsigned int min_irqs, unsigned int wanted_irqs) { - struct pci_dev *pdev = nn->pdev; - int nvecs; - int i; + unsigned int i; + int got_irqs; - for (i = 0; i < nr_vecs; i++) - nn->irq_entries[i].entry = i; + for (i = 0; i < wanted_irqs; i++) + irq_entries[i].entry = i; - nvecs = pci_enable_msix_range(pdev, nn->irq_entries, - NFP_NET_NON_Q_VECTORS + 1, nr_vecs); - if (nvecs < 0) { - nn_warn(nn, "Failed to enable MSI-X. Wanted %d-%d (err=%d)\n", - NFP_NET_NON_Q_VECTORS + 1, nr_vecs, nvecs); + got_irqs = pci_enable_msix_range(pdev, irq_entries, + min_irqs, wanted_irqs); + if (got_irqs < 0) { + dev_err(&pdev->dev, "Failed to enable %d-%d MSI-X (err=%d)\n", + min_irqs, wanted_irqs, got_irqs); return 0; } - return nvecs; + if (got_irqs < wanted_irqs) + dev_warn(&pdev->dev, "Unable to allocate %d IRQs got only %d\n", + wanted_irqs, got_irqs); + + return got_irqs; } /** - * nfp_net_irqs_alloc() - allocates MSI-X irqs - * @nn: NFP Network structure + * nfp_net_irqs_assign() - Assign interrupts allocated externally to netdev + * @nn: NFP Network structure + * @irq_entries: Table of allocated interrupts + * @n: Size of @irq_entries (number of entries to grab) * - * Return: Number of irqs obtained or 0 on error. + * After interrupts are allocated with nfp_net_irqs_alloc() this function + * should be called to assign them to a specific netdev (port). */ -int nfp_net_irqs_alloc(struct nfp_net *nn) +void +nfp_net_irqs_assign(struct nfp_net *nn, struct msix_entry *irq_entries, + unsigned int n) { - int wanted_irqs; - unsigned int n; - - wanted_irqs = nn->num_r_vecs + NFP_NET_NON_Q_VECTORS; - - n = nfp_net_msix_alloc(nn, wanted_irqs); - if (n == 0) { - nn_err(nn, "Failed to allocate MSI-X IRQs\n"); - return 0; - } - nn->max_r_vecs = n - NFP_NET_NON_Q_VECTORS; nn->num_r_vecs = nn->max_r_vecs; - if (n < wanted_irqs) - nn_warn(nn, "Unable to allocate %d vectors. Got %d instead\n", - wanted_irqs, n); + memcpy(nn->irq_entries, irq_entries, sizeof(*irq_entries) * n); - return n; + if (nn->num_rx_rings > nn->num_r_vecs || + nn->num_tx_rings > nn->num_r_vecs) + nn_warn(nn, "More rings (%d,%d) than vectors (%d).\n", + nn->num_rx_rings, nn->num_tx_rings, nn->num_r_vecs); + + nn->num_rx_rings = min(nn->num_r_vecs, nn->num_rx_rings); + nn->num_tx_rings = min(nn->num_r_vecs, nn->num_tx_rings); + nn->num_stack_tx_rings = nn->num_tx_rings; } /** * nfp_net_irqs_disable() - Disable interrupts - * @nn: NFP Network structure + * @pdev: PCI device structure * * Undoes what @nfp_net_irqs_alloc() does. */ -void nfp_net_irqs_disable(struct nfp_net *nn) +void nfp_net_irqs_disable(struct pci_dev *pdev) { - pci_disable_msix(nn->pdev); + pci_disable_msix(pdev); } /** @@ -409,10 +414,13 @@ out: static irqreturn_t nfp_net_irq_lsc(int irq, void *data) { struct nfp_net *nn = data; + struct msix_entry *entry; + + entry = &nn->irq_entries[NFP_NET_IRQ_LSC_IDX]; nfp_net_read_link_status(nn); - nfp_net_irq_unmask(nn, NFP_NET_IRQ_LSC_IDX); + nfp_net_irq_unmask(nn, entry->entry); return IRQ_HANDLED; } @@ -475,32 +483,28 @@ nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring, } /** - * nfp_net_irqs_assign() - Assign IRQs and setup rvecs. + * nfp_net_vecs_init() - Assign IRQs and setup rvecs. * @netdev: netdev structure */ -static void nfp_net_irqs_assign(struct net_device *netdev) +static void nfp_net_vecs_init(struct net_device *netdev) { struct nfp_net *nn = netdev_priv(netdev); struct nfp_net_r_vector *r_vec; int r; - if (nn->num_rx_rings > nn->num_r_vecs || - nn->num_tx_rings > nn->num_r_vecs) - nn_warn(nn, "More rings (%d,%d) than vectors (%d).\n", - nn->num_rx_rings, nn->num_tx_rings, nn->num_r_vecs); - - nn->num_rx_rings = min(nn->num_r_vecs, nn->num_rx_rings); - nn->num_tx_rings = min(nn->num_r_vecs, nn->num_tx_rings); - nn->num_stack_tx_rings = nn->num_tx_rings; - nn->lsc_handler = nfp_net_irq_lsc; nn->exn_handler = nfp_net_irq_exn; for (r = 0; r < nn->max_r_vecs; r++) { + struct msix_entry *entry; + + entry = &nn->irq_entries[NFP_NET_NON_Q_VECTORS + r]; + r_vec = &nn->r_vecs[r]; r_vec->nfp_net = nn; r_vec->handler = nfp_net_irq_rxtx; - r_vec->irq_idx = NFP_NET_NON_Q_VECTORS + r; + r_vec->irq_entry = entry->entry; + r_vec->irq_vector = entry->vector; cpumask_set_cpu(r, &r_vec->affinity_mask); } @@ -533,7 +537,7 @@ nfp_net_aux_irq_request(struct nfp_net *nn, u32 ctrl_offset, entry->vector, err); return err; } - nn_writeb(nn, ctrl_offset, vector_idx); + nn_writeb(nn, ctrl_offset, entry->entry); return 0; } @@ -1459,7 +1463,7 @@ nfp_net_rx_drop(struct nfp_net_r_vector *r_vec, struct nfp_net_rx_ring *rx_ring, dev_kfree_skb_any(skb); } -static void +static bool nfp_net_tx_xdp_buf(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring, struct nfp_net_tx_ring *tx_ring, struct nfp_net_rx_buf *rxbuf, unsigned int pkt_off, @@ -1473,13 +1477,13 @@ nfp_net_tx_xdp_buf(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring, if (unlikely(nfp_net_tx_full(tx_ring, 1))) { nfp_net_rx_drop(rx_ring->r_vec, rx_ring, rxbuf, NULL); - return; + return false; } new_frag = nfp_net_napi_alloc_one(nn, DMA_BIDIRECTIONAL, &new_dma_addr); if (unlikely(!new_frag)) { nfp_net_rx_drop(rx_ring->r_vec, rx_ring, rxbuf, NULL); - return; + return false; } nfp_net_rx_give_one(rx_ring, new_frag, new_dma_addr); @@ -1494,7 +1498,7 @@ nfp_net_tx_xdp_buf(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring, txbuf->real_len = pkt_len; dma_sync_single_for_device(&nn->pdev->dev, rxbuf->dma_addr + pkt_off, - pkt_len, DMA_TO_DEVICE); + pkt_len, DMA_BIDIRECTIONAL); /* Build TX descriptor */ txd = &tx_ring->txds[wr_idx]; @@ -1509,6 +1513,7 @@ nfp_net_tx_xdp_buf(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring, tx_ring->wr_p++; tx_ring->wr_ptr_add++; + return true; } static int nfp_net_run_xdp(struct bpf_prog *prog, void *data, unsigned int len) @@ -1606,19 +1611,22 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) dma_sync_single_for_cpu(&nn->pdev->dev, rxbuf->dma_addr + pkt_off, - pkt_len, DMA_FROM_DEVICE); + pkt_len, DMA_BIDIRECTIONAL); act = nfp_net_run_xdp(xdp_prog, rxbuf->frag + data_off, pkt_len); switch (act) { case XDP_PASS: break; case XDP_TX: - nfp_net_tx_xdp_buf(nn, rx_ring, tx_ring, rxbuf, - pkt_off, pkt_len); + if (unlikely(!nfp_net_tx_xdp_buf(nn, rx_ring, + tx_ring, rxbuf, + pkt_off, pkt_len))) + trace_xdp_exception(nn->netdev, xdp_prog, act); continue; default: bpf_warn_invalid_xdp_action(act); case XDP_ABORTED: + trace_xdp_exception(nn->netdev, xdp_prog, act); case XDP_DROP: nfp_net_rx_give_one(rx_ring, rxbuf->frag, rxbuf->dma_addr); @@ -1701,7 +1709,7 @@ static int nfp_net_poll(struct napi_struct *napi, int budget) if (pkts_polled < budget) { napi_complete_done(napi, pkts_polled); - nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_idx); + nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry); } return pkts_polled; @@ -1983,7 +1991,6 @@ static int nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, int idx) { - struct msix_entry *entry = &nn->irq_entries[r_vec->irq_idx]; int err; /* Setup NAPI */ @@ -1992,17 +1999,19 @@ nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, snprintf(r_vec->name, sizeof(r_vec->name), "%s-rxtx-%d", nn->netdev->name, idx); - err = request_irq(entry->vector, r_vec->handler, 0, r_vec->name, r_vec); + err = request_irq(r_vec->irq_vector, r_vec->handler, 0, r_vec->name, + r_vec); if (err) { netif_napi_del(&r_vec->napi); - nn_err(nn, "Error requesting IRQ %d\n", entry->vector); + nn_err(nn, "Error requesting IRQ %d\n", r_vec->irq_vector); return err; } - disable_irq(entry->vector); + disable_irq(r_vec->irq_vector); - irq_set_affinity_hint(entry->vector, &r_vec->affinity_mask); + irq_set_affinity_hint(r_vec->irq_vector, &r_vec->affinity_mask); - nn_dbg(nn, "RV%02d: irq=%03d/%03d\n", idx, entry->vector, entry->entry); + nn_dbg(nn, "RV%02d: irq=%03d/%03d\n", idx, r_vec->irq_vector, + r_vec->irq_entry); return 0; } @@ -2010,11 +2019,9 @@ nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, static void nfp_net_cleanup_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec) { - struct msix_entry *entry = &nn->irq_entries[r_vec->irq_idx]; - - irq_set_affinity_hint(entry->vector, NULL); + irq_set_affinity_hint(r_vec->irq_vector, NULL); netif_napi_del(&r_vec->napi); - free_irq(entry->vector, r_vec); + free_irq(r_vec->irq_vector, r_vec); } /** @@ -2143,7 +2150,7 @@ nfp_net_rx_ring_hw_cfg_write(struct nfp_net *nn, /* Write the DMA address, size and MSI-X info to the device */ nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), rx_ring->dma); nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), ilog2(rx_ring->cnt)); - nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), rx_ring->r_vec->irq_idx); + nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), rx_ring->r_vec->irq_entry); } static void @@ -2152,7 +2159,7 @@ nfp_net_tx_ring_hw_cfg_write(struct nfp_net *nn, { nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), tx_ring->dma); nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), ilog2(tx_ring->cnt)); - nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), tx_ring->r_vec->irq_idx); + nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), tx_ring->r_vec->irq_entry); } static int __nfp_net_set_config_and_enable(struct nfp_net *nn) @@ -2191,7 +2198,8 @@ static int __nfp_net_set_config_and_enable(struct nfp_net *nn) nfp_net_write_mac_addr(nn); nn_writel(nn, NFP_NET_CFG_MTU, nn->netdev->mtu); - nn_writel(nn, NFP_NET_CFG_FLBUFSZ, nn->fl_bufsz); + nn_writel(nn, NFP_NET_CFG_FLBUFSZ, + nn->fl_bufsz - NFP_NET_RX_BUF_NON_DATA); /* Enable device */ new_ctrl |= NFP_NET_CFG_CTRL_ENABLE; @@ -2246,7 +2254,7 @@ static void nfp_net_open_stack(struct nfp_net *nn) for (r = 0; r < nn->num_r_vecs; r++) { napi_enable(&nn->r_vecs[r].napi); - enable_irq(nn->irq_entries[nn->r_vecs[r].irq_idx].vector); + enable_irq(nn->r_vecs[r].irq_vector); } netif_tx_wake_all_queues(nn->netdev); @@ -2370,7 +2378,7 @@ static void nfp_net_close_stack(struct nfp_net *nn) nn->link_up = false; for (r = 0; r < nn->num_r_vecs; r++) { - disable_irq(nn->irq_entries[nn->r_vecs[r].irq_idx].vector); + disable_irq(nn->r_vecs[r].irq_vector); napi_disable(&nn->r_vecs[r].napi); } @@ -2638,8 +2646,8 @@ static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu) return nfp_net_ring_reconfig(nn, &nn->xdp_prog, &rx, NULL); } -static struct rtnl_link_stats64 *nfp_net_stat64(struct net_device *netdev, - struct rtnl_link_stats64 *stats) +static void nfp_net_stat64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) { struct nfp_net *nn = netdev_priv(netdev); int r; @@ -2669,8 +2677,6 @@ static struct rtnl_link_stats64 *nfp_net_stat64(struct net_device *netdev, stats->tx_bytes += data[1]; stats->tx_errors += data[2]; } - - return stats; } static bool nfp_net_ebpf_capable(struct nfp_net *nn) @@ -3256,7 +3262,7 @@ int nfp_net_netdev_init(struct net_device *netdev) netif_carrier_off(netdev); nfp_net_set_ethtool_ops(netdev); - nfp_net_irqs_assign(netdev); + nfp_net_vecs_init(netdev); return register_netdev(netdev); } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c index c66f3f954aa8..6e9372a18375 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2015 Netronome Systems, Inc. + * Copyright (C) 2015-2017 Netronome Systems, Inc. * * This software is dual licensed under the GNU General License Version 2, * June 1991 as shown in the file COPYING in the top-level directory of this @@ -202,16 +202,17 @@ static const struct file_operations nfp_xdp_q_fops = { .llseek = seq_lseek }; -void nfp_net_debugfs_adapter_add(struct nfp_net *nn) +void nfp_net_debugfs_port_add(struct nfp_net *nn, struct dentry *ddir, int id) { struct dentry *queues, *tx, *rx, *xdp; - char int_name[16]; + char name[20]; int i; if (IS_ERR_OR_NULL(nfp_dir)) return; - nn->debugfs_dir = debugfs_create_dir(pci_name(nn->pdev), nfp_dir); + sprintf(name, "port%d", id); + nn->debugfs_dir = debugfs_create_dir(name, ddir); if (IS_ERR_OR_NULL(nn->debugfs_dir)) return; @@ -227,24 +228,38 @@ void nfp_net_debugfs_adapter_add(struct nfp_net *nn) return; for (i = 0; i < min(nn->max_rx_rings, nn->max_r_vecs); i++) { - sprintf(int_name, "%d", i); - debugfs_create_file(int_name, S_IRUSR, rx, + sprintf(name, "%d", i); + debugfs_create_file(name, S_IRUSR, rx, &nn->r_vecs[i], &nfp_rx_q_fops); - debugfs_create_file(int_name, S_IRUSR, xdp, + debugfs_create_file(name, S_IRUSR, xdp, &nn->r_vecs[i], &nfp_xdp_q_fops); } for (i = 0; i < min(nn->max_tx_rings, nn->max_r_vecs); i++) { - sprintf(int_name, "%d", i); - debugfs_create_file(int_name, S_IRUSR, tx, + sprintf(name, "%d", i); + debugfs_create_file(name, S_IRUSR, tx, &nn->r_vecs[i], &nfp_tx_q_fops); } } -void nfp_net_debugfs_adapter_del(struct nfp_net *nn) +struct dentry *nfp_net_debugfs_device_add(struct pci_dev *pdev) { - debugfs_remove_recursive(nn->debugfs_dir); - nn->debugfs_dir = NULL; + struct dentry *dev_dir; + + if (IS_ERR_OR_NULL(nfp_dir)) + return NULL; + + dev_dir = debugfs_create_dir(pci_name(pdev), nfp_dir); + if (IS_ERR_OR_NULL(dev_dir)) + return NULL; + + return dev_dir; +} + +void nfp_net_debugfs_dir_clean(struct dentry **dir) +{ + debugfs_remove_recursive(*dir); + *dir = NULL; } void nfp_net_debugfs_create(void) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index 1b26e9646574..2649f7523c81 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2015 Netronome Systems, Inc. + * Copyright (C) 2015-2017 Netronome Systems, Inc. * * This software is dual licensed under the GNU General License Version 2, * June 1991 as shown in the file COPYING in the top-level directory of this @@ -47,9 +47,14 @@ #include <linux/pci.h> #include <linux/ethtool.h> +#include "nfpcore/nfp.h" #include "nfp_net_ctrl.h" #include "nfp_net.h" +enum nfp_dump_diag { + NFP_DUMP_NSP_DIAG = 0, +}; + /* Support for stats. Returns netdev, driver, and device stats */ enum { NETDEV_ET_STATS, NFP_NET_DRV_ET_STATS, NFP_NET_DEV_ET_STATS }; struct _nfp_net_et_stats { @@ -127,19 +132,39 @@ static const struct _nfp_net_et_stats nfp_net_et_stats[] = { #define NN_ET_STATS_LEN (NN_ET_GLOBAL_STATS_LEN + NN_ET_RVEC_GATHER_STATS + \ NN_ET_RVEC_STATS_LEN + NN_ET_QUEUE_STATS_LEN) +static void nfp_net_get_nspinfo(struct nfp_net *nn, char *version) +{ + struct nfp_nsp *nsp; + + if (!nn->cpp) + return; + + nsp = nfp_nsp_open(nn->cpp); + if (IS_ERR(nsp)) + return; + + snprintf(version, ETHTOOL_FWVERS_LEN, "sp:%hu.%hu", + nfp_nsp_get_abi_ver_major(nsp), + nfp_nsp_get_abi_ver_minor(nsp)); + + nfp_nsp_close(nsp); +} + static void nfp_net_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { + char nsp_version[ETHTOOL_FWVERS_LEN] = {}; struct nfp_net *nn = netdev_priv(netdev); - strlcpy(drvinfo->driver, nfp_net_driver_name, sizeof(drvinfo->driver)); - strlcpy(drvinfo->version, nfp_net_driver_version, - sizeof(drvinfo->version)); + strlcpy(drvinfo->driver, nn->pdev->driver->name, + sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, nfp_driver_version, sizeof(drvinfo->version)); + nfp_net_get_nspinfo(nn, nsp_version); snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), - "%d.%d.%d.%d", + "%d.%d.%d.%d %s", nn->fw_ver.resv, nn->fw_ver.class, - nn->fw_ver.major, nn->fw_ver.minor); + nn->fw_ver.major, nn->fw_ver.minor, nsp_version); strlcpy(drvinfo->bus_info, pci_name(nn->pdev), sizeof(drvinfo->bus_info)); @@ -558,6 +583,75 @@ static int nfp_net_get_coalesce(struct net_device *netdev, return 0; } +/* Other debug dumps + */ +static int +nfp_dump_nsp_diag(struct nfp_net *nn, struct ethtool_dump *dump, void *buffer) +{ + struct nfp_resource *res; + int ret; + + if (!nn->cpp) + return -EOPNOTSUPP; + + dump->version = 1; + dump->flag = NFP_DUMP_NSP_DIAG; + + res = nfp_resource_acquire(nn->cpp, NFP_RESOURCE_NSP_DIAG); + if (IS_ERR(res)) + return PTR_ERR(res); + + if (buffer) { + if (dump->len != nfp_resource_size(res)) { + ret = -EINVAL; + goto exit_release; + } + + ret = nfp_cpp_read(nn->cpp, nfp_resource_cpp_id(res), + nfp_resource_address(res), + buffer, dump->len); + if (ret != dump->len) + ret = ret < 0 ? ret : -EIO; + else + ret = 0; + } else { + dump->len = nfp_resource_size(res); + ret = 0; + } +exit_release: + nfp_resource_release(res); + + return ret; +} + +static int nfp_net_set_dump(struct net_device *netdev, struct ethtool_dump *val) +{ + struct nfp_net *nn = netdev_priv(netdev); + + if (!nn->cpp) + return -EOPNOTSUPP; + + if (val->flag != NFP_DUMP_NSP_DIAG) + return -EINVAL; + + nn->ethtool_dump_flag = val->flag; + + return 0; +} + +static int +nfp_net_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump) +{ + return nfp_dump_nsp_diag(netdev_priv(netdev), dump, NULL); +} + +static int +nfp_net_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump, + void *buffer) +{ + return nfp_dump_nsp_diag(netdev_priv(netdev), dump, buffer); +} + static int nfp_net_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec) { @@ -722,6 +816,9 @@ static const struct ethtool_ops nfp_net_ethtool_ops = { .set_rxfh = nfp_net_set_rxfh, .get_regs_len = nfp_net_get_regs_len, .get_regs = nfp_net_get_regs, + .set_dump = nfp_net_set_dump, + .get_dump_flag = nfp_net_get_dump_flag, + .get_dump_data = nfp_net_get_dump_data, .get_coalesce = nfp_net_get_coalesce, .set_coalesce = nfp_net_set_coalesce, .get_channels = nfp_net_get_channels, diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c new file mode 100644 index 000000000000..3afcdc11480c --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c @@ -0,0 +1,586 @@ +/* + * Copyright (C) 2015-2017 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* + * nfp_net_main.c + * Netronome network device driver: Main entry point + * Authors: Jakub Kicinski <jakub.kicinski@netronome.com> + * Alejandro Lucero <alejandro.lucero@netronome.com> + * Jason McMullan <jason.mcmullan@netronome.com> + * Rolf Neugebauer <rolf.neugebauer@netronome.com> + */ + +#include <linux/etherdevice.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/pci.h> +#include <linux/pci_regs.h> +#include <linux/msi.h> +#include <linux/random.h> + +#include "nfpcore/nfp.h" +#include "nfpcore/nfp_cpp.h" +#include "nfpcore/nfp_nffw.h" +#include "nfpcore/nfp_nsp_eth.h" +#include "nfpcore/nfp6000_pcie.h" + +#include "nfp_net_ctrl.h" +#include "nfp_net.h" +#include "nfp_main.h" + +#define NFP_PF_CSR_SLICE_SIZE (32 * 1024) + +static int nfp_is_ready(struct nfp_cpp *cpp) +{ + const char *cp; + long state; + int err; + + cp = nfp_hwinfo_lookup(cpp, "board.state"); + if (!cp) + return 0; + + err = kstrtol(cp, 0, &state); + if (err < 0) + return 0; + + return state == 15; +} + +/** + * nfp_net_map_area() - Help function to map an area + * @cpp: NFP CPP handler + * @name: Name for the area + * @target: CPP target + * @addr: CPP address + * @size: Size of the area + * @area: Area handle (returned). + * + * This function is primarily to simplify the code in the main probe + * function. To undo the effect of this functions call + * @nfp_cpp_area_release_free(*area); + * + * Return: Pointer to memory mapped area or ERR_PTR + */ +static u8 __iomem *nfp_net_map_area(struct nfp_cpp *cpp, + const char *name, int isl, int target, + unsigned long long addr, unsigned long size, + struct nfp_cpp_area **area) +{ + u8 __iomem *res; + u32 dest; + int err; + + dest = NFP_CPP_ISLAND_ID(target, NFP_CPP_ACTION_RW, 0, isl); + + *area = nfp_cpp_area_alloc_with_name(cpp, dest, name, addr, size); + if (!*area) { + err = -EIO; + goto err_area; + } + + err = nfp_cpp_area_acquire(*area); + if (err < 0) + goto err_acquire; + + res = nfp_cpp_area_iomem(*area); + if (!res) { + err = -EIO; + goto err_map; + } + + return res; + +err_map: + nfp_cpp_area_release(*area); +err_acquire: + nfp_cpp_area_free(*area); +err_area: + return (u8 __iomem *)ERR_PTR(err); +} + +static void +nfp_net_get_mac_addr_hwinfo(struct nfp_net *nn, struct nfp_cpp *cpp, + unsigned int id) +{ + u8 mac_addr[ETH_ALEN]; + const char *mac_str; + char name[32]; + + snprintf(name, sizeof(name), "eth%d.mac", id); + + mac_str = nfp_hwinfo_lookup(cpp, name); + if (!mac_str) { + dev_warn(&nn->pdev->dev, + "Can't lookup MAC address. Generate\n"); + eth_hw_addr_random(nn->netdev); + return; + } + + if (sscanf(mac_str, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx", + &mac_addr[0], &mac_addr[1], &mac_addr[2], + &mac_addr[3], &mac_addr[4], &mac_addr[5]) != 6) { + dev_warn(&nn->pdev->dev, + "Can't parse MAC address (%s). Generate.\n", mac_str); + eth_hw_addr_random(nn->netdev); + return; + } + + ether_addr_copy(nn->netdev->dev_addr, mac_addr); + ether_addr_copy(nn->netdev->perm_addr, mac_addr); +} + +/** + * nfp_net_get_mac_addr() - Get the MAC address. + * @nn: NFP Network structure + * @pf: NFP PF device structure + * @id: NFP port id + * + * First try to get the MAC address from NSP ETH table. If that + * fails try HWInfo. As a last resort generate a random address. + */ +static void +nfp_net_get_mac_addr(struct nfp_net *nn, struct nfp_pf *pf, unsigned int id) +{ + int i; + + for (i = 0; pf->eth_tbl && i < pf->eth_tbl->count; i++) + if (pf->eth_tbl->ports[i].eth_index == id) { + const u8 *mac_addr = pf->eth_tbl->ports[i].mac_addr; + + ether_addr_copy(nn->netdev->dev_addr, mac_addr); + ether_addr_copy(nn->netdev->perm_addr, mac_addr); + return; + } + + nfp_net_get_mac_addr_hwinfo(nn, pf->cpp, id); +} + +static unsigned int nfp_net_pf_get_num_ports(struct nfp_pf *pf) +{ + char name[256]; + u16 interface; + int pcie_pf; + int err = 0; + u64 val; + + interface = nfp_cpp_interface(pf->cpp); + pcie_pf = NFP_CPP_INTERFACE_UNIT_of(interface); + + snprintf(name, sizeof(name), "nfd_cfg_pf%d_num_ports", pcie_pf); + + val = nfp_rtsym_read_le(pf->cpp, name, &err); + /* Default to one port */ + if (err) { + if (err != -ENOENT) + nfp_err(pf->cpp, "Unable to read adapter port count\n"); + val = 1; + } + + return val; +} + +static unsigned int +nfp_net_pf_total_qcs(struct nfp_pf *pf, void __iomem *ctrl_bar, + unsigned int stride, u32 start_off, u32 num_off) +{ + unsigned int i, min_qc, max_qc; + + min_qc = readl(ctrl_bar + start_off); + max_qc = min_qc; + + for (i = 0; i < pf->num_ports; i++) { + /* To make our lives simpler only accept configuration where + * queues are allocated to PFs in order (queues of PFn all have + * indexes lower than PFn+1). + */ + if (max_qc > readl(ctrl_bar + start_off)) + return 0; + + max_qc = readl(ctrl_bar + start_off); + max_qc += readl(ctrl_bar + num_off) * stride; + ctrl_bar += NFP_PF_CSR_SLICE_SIZE; + } + + return max_qc - min_qc; +} + +static u8 __iomem *nfp_net_pf_map_ctrl_bar(struct nfp_pf *pf) +{ + const struct nfp_rtsym *ctrl_sym; + u8 __iomem *ctrl_bar; + char pf_symbol[256]; + u16 interface; + int pcie_pf; + + interface = nfp_cpp_interface(pf->cpp); + pcie_pf = NFP_CPP_INTERFACE_UNIT_of(interface); + + snprintf(pf_symbol, sizeof(pf_symbol), "_pf%d_net_bar0", pcie_pf); + + ctrl_sym = nfp_rtsym_lookup(pf->cpp, pf_symbol); + if (!ctrl_sym) { + dev_err(&pf->pdev->dev, + "Failed to find PF BAR0 symbol %s\n", pf_symbol); + return NULL; + } + + if (ctrl_sym->size < pf->num_ports * NFP_PF_CSR_SLICE_SIZE) { + dev_err(&pf->pdev->dev, + "PF BAR0 too small to contain %d ports\n", + pf->num_ports); + return NULL; + } + + ctrl_bar = nfp_net_map_area(pf->cpp, "net.ctrl", + ctrl_sym->domain, ctrl_sym->target, + ctrl_sym->addr, ctrl_sym->size, + &pf->ctrl_area); + if (IS_ERR(ctrl_bar)) { + dev_err(&pf->pdev->dev, "Failed to map PF BAR0: %ld\n", + PTR_ERR(ctrl_bar)); + return NULL; + } + + return ctrl_bar; +} + +static void nfp_net_pf_free_netdevs(struct nfp_pf *pf) +{ + struct nfp_net *nn; + + while (!list_empty(&pf->ports)) { + nn = list_first_entry(&pf->ports, struct nfp_net, port_list); + list_del(&nn->port_list); + + nfp_net_netdev_free(nn); + } +} + +static struct nfp_net * +nfp_net_pf_alloc_port_netdev(struct nfp_pf *pf, void __iomem *ctrl_bar, + void __iomem *tx_bar, void __iomem *rx_bar, + int stride, struct nfp_net_fw_version *fw_ver) +{ + u32 n_tx_rings, n_rx_rings; + struct nfp_net *nn; + + n_tx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_TXRINGS); + n_rx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_RXRINGS); + + /* Allocate and initialise the netdev */ + nn = nfp_net_netdev_alloc(pf->pdev, n_tx_rings, n_rx_rings); + if (IS_ERR(nn)) + return nn; + + nn->cpp = pf->cpp; + nn->fw_ver = *fw_ver; + nn->ctrl_bar = ctrl_bar; + nn->tx_bar = tx_bar; + nn->rx_bar = rx_bar; + nn->is_vf = 0; + nn->stride_rx = stride; + nn->stride_tx = stride; + + return nn; +} + +static int +nfp_net_pf_init_port_netdev(struct nfp_pf *pf, struct nfp_net *nn, + unsigned int id) +{ + int err; + + /* Get MAC address */ + nfp_net_get_mac_addr(nn, pf, id); + + /* Get ME clock frequency from ctrl BAR + * XXX for now frequency is hardcoded until we figure out how + * to get the value from nfp-hwinfo into ctrl bar + */ + nn->me_freq_mhz = 1200; + + err = nfp_net_netdev_init(nn->netdev); + if (err) + return err; + + nfp_net_debugfs_port_add(nn, pf->ddir, id); + + nfp_net_info(nn); + + return 0; +} + +static int +nfp_net_pf_alloc_netdevs(struct nfp_pf *pf, void __iomem *ctrl_bar, + void __iomem *tx_bar, void __iomem *rx_bar, + int stride, struct nfp_net_fw_version *fw_ver) +{ + u32 prev_tx_base, prev_rx_base, tgt_tx_base, tgt_rx_base; + struct nfp_net *nn; + unsigned int i; + int err; + + prev_tx_base = readl(ctrl_bar + NFP_NET_CFG_START_TXQ); + prev_rx_base = readl(ctrl_bar + NFP_NET_CFG_START_RXQ); + + for (i = 0; i < pf->num_ports; i++) { + tgt_tx_base = readl(ctrl_bar + NFP_NET_CFG_START_TXQ); + tgt_rx_base = readl(ctrl_bar + NFP_NET_CFG_START_RXQ); + tx_bar += (tgt_tx_base - prev_tx_base) * NFP_QCP_QUEUE_ADDR_SZ; + rx_bar += (tgt_rx_base - prev_rx_base) * NFP_QCP_QUEUE_ADDR_SZ; + prev_tx_base = tgt_tx_base; + prev_rx_base = tgt_rx_base; + + nn = nfp_net_pf_alloc_port_netdev(pf, ctrl_bar, tx_bar, rx_bar, + stride, fw_ver); + if (IS_ERR(nn)) { + err = PTR_ERR(nn); + goto err_free_prev; + } + list_add_tail(&nn->port_list, &pf->ports); + + ctrl_bar += NFP_PF_CSR_SLICE_SIZE; + } + + return 0; + +err_free_prev: + nfp_net_pf_free_netdevs(pf); + return err; +} + +static int +nfp_net_pf_spawn_netdevs(struct nfp_pf *pf, + void __iomem *ctrl_bar, void __iomem *tx_bar, + void __iomem *rx_bar, int stride, + struct nfp_net_fw_version *fw_ver) +{ + unsigned int id, wanted_irqs, num_irqs, ports_left, irqs_left; + struct nfp_net *nn; + int err; + + /* Allocate the netdevs and do basic init */ + err = nfp_net_pf_alloc_netdevs(pf, ctrl_bar, tx_bar, rx_bar, + stride, fw_ver); + if (err) + return err; + + /* Get MSI-X vectors */ + wanted_irqs = 0; + list_for_each_entry(nn, &pf->ports, port_list) + wanted_irqs += NFP_NET_NON_Q_VECTORS + nn->num_r_vecs; + pf->irq_entries = kcalloc(wanted_irqs, sizeof(*pf->irq_entries), + GFP_KERNEL); + if (!pf->irq_entries) { + err = -ENOMEM; + goto err_nn_free; + } + + num_irqs = nfp_net_irqs_alloc(pf->pdev, pf->irq_entries, + NFP_NET_MIN_PORT_IRQS * pf->num_ports, + wanted_irqs); + if (!num_irqs) { + nn_warn(nn, "Unable to allocate MSI-X Vectors. Exiting\n"); + err = -ENOMEM; + goto err_vec_free; + } + + /* Distribute IRQs to ports */ + irqs_left = num_irqs; + ports_left = pf->num_ports; + list_for_each_entry(nn, &pf->ports, port_list) { + unsigned int n; + + n = DIV_ROUND_UP(irqs_left, ports_left); + nfp_net_irqs_assign(nn, &pf->irq_entries[num_irqs - irqs_left], + n); + irqs_left -= n; + ports_left--; + } + + /* Finish netdev init and register */ + id = 0; + list_for_each_entry(nn, &pf->ports, port_list) { + err = nfp_net_pf_init_port_netdev(pf, nn, id); + if (err) + goto err_prev_deinit; + + id++; + } + + return 0; + +err_prev_deinit: + list_for_each_entry_continue_reverse(nn, &pf->ports, port_list) { + nfp_net_debugfs_dir_clean(&nn->debugfs_dir); + nfp_net_netdev_clean(nn->netdev); + } + nfp_net_irqs_disable(pf->pdev); +err_vec_free: + kfree(pf->irq_entries); +err_nn_free: + nfp_net_pf_free_netdevs(pf); + return err; +} + +/* + * PCI device functions + */ +int nfp_net_pci_probe(struct nfp_pf *pf) +{ + u8 __iomem *ctrl_bar, *tx_bar, *rx_bar; + u32 total_tx_qcs, total_rx_qcs; + struct nfp_net_fw_version fw_ver; + u32 tx_area_sz, rx_area_sz; + u32 start_q; + int stride; + int err; + + /* Verify that the board has completed initialization */ + if (!nfp_is_ready(pf->cpp)) { + nfp_err(pf->cpp, "NFP is not ready for NIC operation.\n"); + return -EINVAL; + } + + pf->num_ports = nfp_net_pf_get_num_ports(pf); + + ctrl_bar = nfp_net_pf_map_ctrl_bar(pf); + if (!ctrl_bar) + return pf->fw_loaded ? -EINVAL : -EPROBE_DEFER; + + nfp_net_get_fw_version(&fw_ver, ctrl_bar); + if (fw_ver.resv || fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) { + nfp_err(pf->cpp, "Unknown Firmware ABI %d.%d.%d.%d\n", + fw_ver.resv, fw_ver.class, fw_ver.major, fw_ver.minor); + err = -EINVAL; + goto err_ctrl_unmap; + } + + /* Determine stride */ + if (nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 1)) { + stride = 2; + nfp_warn(pf->cpp, "OBSOLETE Firmware detected - VF isolation not available\n"); + } else { + switch (fw_ver.major) { + case 1 ... 4: + stride = 4; + break; + default: + nfp_err(pf->cpp, "Unsupported Firmware ABI %d.%d.%d.%d\n", + fw_ver.resv, fw_ver.class, + fw_ver.major, fw_ver.minor); + err = -EINVAL; + goto err_ctrl_unmap; + } + } + + /* Find how many QC structs need to be mapped */ + total_tx_qcs = nfp_net_pf_total_qcs(pf, ctrl_bar, stride, + NFP_NET_CFG_START_TXQ, + NFP_NET_CFG_MAX_TXRINGS); + total_rx_qcs = nfp_net_pf_total_qcs(pf, ctrl_bar, stride, + NFP_NET_CFG_START_RXQ, + NFP_NET_CFG_MAX_RXRINGS); + if (!total_tx_qcs || !total_rx_qcs) { + nfp_err(pf->cpp, "Invalid PF QC configuration [%d,%d]\n", + total_tx_qcs, total_rx_qcs); + err = -EINVAL; + goto err_ctrl_unmap; + } + + tx_area_sz = NFP_QCP_QUEUE_ADDR_SZ * total_tx_qcs; + rx_area_sz = NFP_QCP_QUEUE_ADDR_SZ * total_rx_qcs; + + /* Map TX queues */ + start_q = readl(ctrl_bar + NFP_NET_CFG_START_TXQ); + tx_bar = nfp_net_map_area(pf->cpp, "net.tx", 0, 0, + NFP_PCIE_QUEUE(start_q), + tx_area_sz, &pf->tx_area); + if (IS_ERR(tx_bar)) { + nfp_err(pf->cpp, "Failed to map TX area.\n"); + err = PTR_ERR(tx_bar); + goto err_ctrl_unmap; + } + + /* Map RX queues */ + start_q = readl(ctrl_bar + NFP_NET_CFG_START_RXQ); + rx_bar = nfp_net_map_area(pf->cpp, "net.rx", 0, 0, + NFP_PCIE_QUEUE(start_q), + rx_area_sz, &pf->rx_area); + if (IS_ERR(rx_bar)) { + nfp_err(pf->cpp, "Failed to map RX area.\n"); + err = PTR_ERR(rx_bar); + goto err_unmap_tx; + } + + pf->ddir = nfp_net_debugfs_device_add(pf->pdev); + + err = nfp_net_pf_spawn_netdevs(pf, ctrl_bar, tx_bar, rx_bar, + stride, &fw_ver); + if (err) + goto err_clean_ddir; + + return 0; + +err_clean_ddir: + nfp_net_debugfs_dir_clean(&pf->ddir); + nfp_cpp_area_release_free(pf->rx_area); +err_unmap_tx: + nfp_cpp_area_release_free(pf->tx_area); +err_ctrl_unmap: + nfp_cpp_area_release_free(pf->ctrl_area); + return err; +} + +void nfp_net_pci_remove(struct nfp_pf *pf) +{ + struct nfp_net *nn; + + list_for_each_entry(nn, &pf->ports, port_list) { + nfp_net_debugfs_dir_clean(&nn->debugfs_dir); + + nfp_net_netdev_clean(nn->netdev); + } + + nfp_net_pf_free_netdevs(pf); + + nfp_net_debugfs_dir_clean(&pf->ddir); + + nfp_net_irqs_disable(pf->pdev); + kfree(pf->irq_entries); + + nfp_cpp_area_release_free(pf->rx_area); + nfp_cpp_area_release_free(pf->tx_area); + nfp_cpp_area_release_free(pf->ctrl_area); +} diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c index d065235034d4..39407f7cc586 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2015 Netronome Systems, Inc. + * Copyright (C) 2015-2017 Netronome Systems, Inc. * * This software is dual licensed under the GNU General License Version 2, * June 1991 as shown in the file COPYING in the top-level directory of this @@ -45,9 +45,27 @@ #include "nfp_net_ctrl.h" #include "nfp_net.h" +#include "nfp_main.h" + +/** + * struct nfp_net_vf - NFP VF-specific device structure + * @nn: NFP Net structure for this device + * @irq_entries: Pre-allocated array of MSI-X entries + * @q_bar: Pointer to mapped QC memory (NULL if TX/RX mapped directly) + * @ddir: Per-device debugfs directory + */ +struct nfp_net_vf { + struct nfp_net *nn; + + struct msix_entry irq_entries[NFP_NET_NON_Q_VECTORS + + NFP_NET_MAX_TX_RINGS]; + u8 __iomem *q_bar; + + struct dentry *ddir; +}; + +static const char nfp_net_driver_name[] = "nfp_netvf"; -const char nfp_net_driver_name[] = "nfp_netvf"; -const char nfp_net_driver_version[] = "0.1"; #define PCI_DEVICE_NFP6000VF 0x6003 static const struct pci_device_id nfp_netvf_pci_device_ids[] = { { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_NFP6000VF, @@ -82,15 +100,22 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev, u32 tx_bar_off, rx_bar_off; u32 tx_bar_sz, rx_bar_sz; int tx_bar_no, rx_bar_no; + struct nfp_net_vf *vf; + unsigned int num_irqs; u8 __iomem *ctrl_bar; struct nfp_net *nn; u32 startq; int stride; int err; + vf = kzalloc(sizeof(*vf), GFP_KERNEL); + if (!vf) + return -ENOMEM; + pci_set_drvdata(pdev, vf); + err = pci_enable_device_mem(pdev); if (err) - return err; + goto err_free_vf; err = pci_request_regions(pdev, nfp_net_driver_name); if (err) { @@ -182,6 +207,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev, err = PTR_ERR(nn); goto err_ctrl_unmap; } + vf->nn = nn; nn->fw_ver = fw_ver; nn->ctrl_bar = ctrl_bar; @@ -205,17 +231,17 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev, bar_sz = (rx_bar_off + rx_bar_sz) - bar_off; map_addr = pci_resource_start(pdev, tx_bar_no) + bar_off; - nn->q_bar = ioremap_nocache(map_addr, bar_sz); - if (!nn->q_bar) { + vf->q_bar = ioremap_nocache(map_addr, bar_sz); + if (!vf->q_bar) { nn_err(nn, "Failed to map resource %d\n", tx_bar_no); err = -EIO; goto err_netdev_free; } /* TX queues */ - nn->tx_bar = nn->q_bar + (tx_bar_off - bar_off); + nn->tx_bar = vf->q_bar + (tx_bar_off - bar_off); /* RX queues */ - nn->rx_bar = nn->q_bar + (rx_bar_off - bar_off); + nn->rx_bar = vf->q_bar + (rx_bar_off - bar_off); } else { resource_size_t map_addr; @@ -240,12 +266,15 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev, nfp_netvf_get_mac_addr(nn); - err = nfp_net_irqs_alloc(nn); - if (!err) { + num_irqs = nfp_net_irqs_alloc(pdev, vf->irq_entries, + NFP_NET_MIN_PORT_IRQS, + NFP_NET_NON_Q_VECTORS + nn->num_r_vecs); + if (!num_irqs) { nn_warn(nn, "Unable to allocate MSI-X Vectors. Exiting\n"); err = -EIO; goto err_unmap_rx; } + nfp_net_irqs_assign(nn, vf->irq_entries, num_irqs); /* Get ME clock frequency from ctrl BAR * XXX for now frequency is hardcoded until we figure out how @@ -257,25 +286,23 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev, if (err) goto err_irqs_disable; - pci_set_drvdata(pdev, nn); - nfp_net_info(nn); - nfp_net_debugfs_adapter_add(nn); + vf->ddir = nfp_net_debugfs_device_add(pdev); + nfp_net_debugfs_port_add(nn, vf->ddir, 0); return 0; err_irqs_disable: - nfp_net_irqs_disable(nn); + nfp_net_irqs_disable(pdev); err_unmap_rx: - if (!nn->q_bar) + if (!vf->q_bar) iounmap(nn->rx_bar); err_unmap_tx: - if (!nn->q_bar) + if (!vf->q_bar) iounmap(nn->tx_bar); else - iounmap(nn->q_bar); + iounmap(vf->q_bar); err_netdev_free: - pci_set_drvdata(pdev, NULL); nfp_net_netdev_free(nn); err_ctrl_unmap: iounmap(ctrl_bar); @@ -283,71 +310,47 @@ err_pci_regions: pci_release_regions(pdev); err_pci_disable: pci_disable_device(pdev); +err_free_vf: + pci_set_drvdata(pdev, NULL); + kfree(vf); return err; } static void nfp_netvf_pci_remove(struct pci_dev *pdev) { - struct nfp_net *nn = pci_get_drvdata(pdev); + struct nfp_net_vf *vf = pci_get_drvdata(pdev); + struct nfp_net *nn = vf->nn; /* Note, the order is slightly different from above as we need * to keep the nn pointer around till we have freed everything. */ - nfp_net_debugfs_adapter_del(nn); + nfp_net_debugfs_dir_clean(&nn->debugfs_dir); + nfp_net_debugfs_dir_clean(&vf->ddir); nfp_net_netdev_clean(nn->netdev); - nfp_net_irqs_disable(nn); + nfp_net_irqs_disable(pdev); - if (!nn->q_bar) { + if (!vf->q_bar) { iounmap(nn->rx_bar); iounmap(nn->tx_bar); } else { - iounmap(nn->q_bar); + iounmap(vf->q_bar); } iounmap(nn->ctrl_bar); - pci_set_drvdata(pdev, NULL); - nfp_net_netdev_free(nn); pci_release_regions(pdev); pci_disable_device(pdev); + + pci_set_drvdata(pdev, NULL); + kfree(vf); } -static struct pci_driver nfp_netvf_pci_driver = { +struct pci_driver nfp_netvf_pci_driver = { .name = nfp_net_driver_name, .id_table = nfp_netvf_pci_device_ids, .probe = nfp_netvf_pci_probe, .remove = nfp_netvf_pci_remove, }; - -static int __init nfp_netvf_init(void) -{ - int err; - - pr_info("%s: NFP VF Network driver, Copyright (C) 2014-2015 Netronome Systems\n", - nfp_net_driver_name); - - nfp_net_debugfs_create(); - err = pci_register_driver(&nfp_netvf_pci_driver); - if (err) { - nfp_net_debugfs_destroy(); - return err; - } - - return 0; -} - -static void __exit nfp_netvf_exit(void) -{ - pci_unregister_driver(&nfp_netvf_pci_driver); - nfp_net_debugfs_destroy(); -} - -module_init(nfp_netvf_init); -module_exit(nfp_netvf_exit); - -MODULE_AUTHOR("Netronome Systems <oss-drivers@netronome.com>"); -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("NFP VF network device driver"); diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/crc32.h b/drivers/net/ethernet/netronome/nfp/nfpcore/crc32.h new file mode 100644 index 000000000000..6cee6382deb4 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/crc32.h @@ -0,0 +1,65 @@ +/* + * Copyright (C) 2015-2017 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef NFP_CRC32_H +#define NFP_CRC32_H + +#include <linux/kernel.h> +#include <linux/crc32.h> + +/** + * crc32_posix_end() - Finalize POSIX CRC32 working state + * @crc: Current CRC32 working state + * @total_len: Total length of data that was CRC32'd + * + * Return: Final POSIX CRC32 value + */ +static inline u32 crc32_posix_end(u32 crc, size_t total_len) +{ + /* Extend with the length of the string. */ + while (total_len != 0) { + u8 c = total_len & 0xff; + + crc = crc32_be(crc, &c, 1); + total_len >>= 8; + } + + return ~crc; +} + +static inline u32 crc32_posix(const void *buff, size_t len) +{ + return crc32_posix_end(crc32_be(0, buff, len), len); +} + +#endif /* NFP_CRC32_H */ diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h new file mode 100644 index 000000000000..42cb720b696d --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h @@ -0,0 +1,110 @@ +/* + * Copyright (C) 2015-2017 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* + * nfp.h + * Interface for NFP device access and query functions. + */ + +#ifndef __NFP_H__ +#define __NFP_H__ + +#include <linux/device.h> +#include <linux/types.h> + +#include "nfp_cpp.h" + +/* Implemented in nfp_hwinfo.c */ + +const char *nfp_hwinfo_lookup(struct nfp_cpp *cpp, const char *lookup); + +/* Implemented in nfp_nsp.c */ + +struct nfp_nsp; +struct firmware; + +struct nfp_nsp *nfp_nsp_open(struct nfp_cpp *cpp); +void nfp_nsp_close(struct nfp_nsp *state); +u16 nfp_nsp_get_abi_ver_major(struct nfp_nsp *state); +u16 nfp_nsp_get_abi_ver_minor(struct nfp_nsp *state); +int nfp_nsp_wait(struct nfp_nsp *state); +int nfp_nsp_device_soft_reset(struct nfp_nsp *state); +int nfp_nsp_load_fw(struct nfp_nsp *state, const struct firmware *fw); +int nfp_nsp_read_eth_table(struct nfp_nsp *state, void *buf, unsigned int size); +int nfp_nsp_write_eth_table(struct nfp_nsp *state, + const void *buf, unsigned int size); + +/* Implemented in nfp_resource.c */ + +#define NFP_RESOURCE_TBL_TARGET NFP_CPP_TARGET_MU +#define NFP_RESOURCE_TBL_BASE 0x8100000000ULL + +/* NFP Resource Table self-identifier */ +#define NFP_RESOURCE_TBL_NAME "nfp.res" +#define NFP_RESOURCE_TBL_KEY 0x00000000 /* Special key for entry 0 */ + +/* All other keys are CRC32-POSIX of the 8-byte identification string */ + +/* ARM/PCI vNIC Interfaces 0..3 */ +#define NFP_RESOURCE_VNIC_PCI_0 "vnic.p0" +#define NFP_RESOURCE_VNIC_PCI_1 "vnic.p1" +#define NFP_RESOURCE_VNIC_PCI_2 "vnic.p2" +#define NFP_RESOURCE_VNIC_PCI_3 "vnic.p3" + +/* NFP Hardware Info Database */ +#define NFP_RESOURCE_NFP_HWINFO "nfp.info" + +/* Service Processor */ +#define NFP_RESOURCE_NSP "nfp.sp" +#define NFP_RESOURCE_NSP_DIAG "arm.diag" + +/* Netronone Flow Firmware Table */ +#define NFP_RESOURCE_NFP_NFFW "nfp.nffw" + +/* MAC Statistics Accumulator */ +#define NFP_RESOURCE_MAC_STATISTICS "mac.stat" + +struct nfp_resource * +nfp_resource_acquire(struct nfp_cpp *cpp, const char *name); + +void nfp_resource_release(struct nfp_resource *res); + +u32 nfp_resource_cpp_id(struct nfp_resource *res); + +const char *nfp_resource_name(struct nfp_resource *res); + +u64 nfp_resource_address(struct nfp_resource *res); + +u64 nfp_resource_size(struct nfp_resource *res); + +#endif /* !__NFP_H__ */ diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp6000.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp6000.h new file mode 100644 index 000000000000..0e497a6154db --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp6000.h @@ -0,0 +1,88 @@ +/* + * Copyright (C) 2015-2017 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef NFP6000_NFP6000_H +#define NFP6000_NFP6000_H + +#include <linux/errno.h> +#include <linux/types.h> + +/* CPP Target IDs */ +#define NFP_CPP_TARGET_INVALID 0 +#define NFP_CPP_TARGET_NBI 1 +#define NFP_CPP_TARGET_QDR 2 +#define NFP_CPP_TARGET_ILA 6 +#define NFP_CPP_TARGET_MU 7 +#define NFP_CPP_TARGET_PCIE 9 +#define NFP_CPP_TARGET_ARM 10 +#define NFP_CPP_TARGET_CRYPTO 12 +#define NFP_CPP_TARGET_ISLAND_XPB 14 /* Shared with CAP */ +#define NFP_CPP_TARGET_ISLAND_CAP 14 /* Shared with XPB */ +#define NFP_CPP_TARGET_CT_XPB 14 +#define NFP_CPP_TARGET_LOCAL_SCRATCH 15 +#define NFP_CPP_TARGET_CLS NFP_CPP_TARGET_LOCAL_SCRATCH + +#define NFP_ISL_EMEM0 24 + +#define NFP_MU_ADDR_ACCESS_TYPE_MASK 3ULL +#define NFP_MU_ADDR_ACCESS_TYPE_DIRECT 2ULL + +#define PUSHPULL(_pull, _push) ((_pull) << 4 | (_push) << 0) +#define PUSH_WIDTH(_pushpull) pushpull_width((_pushpull) >> 0) +#define PULL_WIDTH(_pushpull) pushpull_width((_pushpull) >> 4) + +static inline int pushpull_width(int pp) +{ + pp &= 0xf; + + if (pp == 0) + return -EINVAL; + return 2 << pp; +} + +static inline int nfp_cppat_mu_locality_lsb(int mode, bool addr40) +{ + switch (mode) { + case 0 ... 3: + return addr40 ? 38 : 30; + default: + return -EINVAL; + } +} + +int nfp_target_pushpull(u32 cpp_id, u64 address); +int nfp_target_cpp(u32 cpp_island_id, u64 cpp_island_address, + u32 *cpp_target_id, u64 *cpp_target_address, + const u32 *imb_table); + +#endif /* NFP6000_NFP6000_H */ diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp_xpb.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp_xpb.h new file mode 100644 index 000000000000..40fb19939505 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp_xpb.h @@ -0,0 +1,57 @@ +/* + * Copyright (C) 2015-2017 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* + * nfp_xpb.h + * Author: Jason McMullan <jason.mcmullan@netronome.com> + */ + +#ifndef NFP6000_XPB_H +#define NFP6000_XPB_H + +/* For use with NFP6000 Databook "XPB Addressing" section + */ +#define NFP_XPB_OVERLAY(island) (((island) & 0x3f) << 24) + +#define NFP_XPB_ISLAND(island) (NFP_XPB_OVERLAY(island) + 0x60000) + +#define NFP_XPB_ISLAND_of(offset) (((offset) >> 24) & 0x3F) + +/* For use with NFP6000 Databook "XPB Island and Device IDs" chapter + */ +#define NFP_XPB_DEVICE(island, slave, device) \ + (NFP_XPB_OVERLAY(island) | \ + (((slave) & 3) << 22) | \ + (((device) & 0x3f) << 16)) + +#endif /* NFP6000_XPB_H */ diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c new file mode 100644 index 000000000000..15cc3e77cf6a --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c @@ -0,0 +1,1364 @@ +/* + * Copyright (C) 2015-2017 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* + * nfp6000_pcie.c + * Authors: Jakub Kicinski <jakub.kicinski@netronome.com> + * Jason McMullan <jason.mcmullan@netronome.com> + * Rolf Neugebauer <rolf.neugebauer@netronome.com> + * + * Multiplexes the NFP BARs between NFP internal resources and + * implements the PCIe specific interface for generic CPP bus access. + * + * The BARs are managed with refcounts and are allocated/acquired + * using target, token and offset/size matching. The generic CPP bus + * abstraction builds upon this BAR interface. + */ + +#include <asm/unaligned.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/kref.h> +#include <linux/io.h> +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/sort.h> +#include <linux/sched.h> +#include <linux/types.h> +#include <linux/pci.h> + +#include "nfp_cpp.h" + +#include "nfp6000/nfp6000.h" + +#include "nfp6000_pcie.h" + +#define NFP_PCIE_BAR(_pf) (0x30000 + ((_pf) & 7) * 0xc0) +#define NFP_PCIE_BAR_EXPLICIT_BAR0(_x, _y) \ + (0x00000080 + (0x40 * ((_x) & 0x3)) + (0x10 * ((_y) & 0x3))) +#define NFP_PCIE_BAR_EXPLICIT_BAR0_SignalType(_x) (((_x) & 0x3) << 30) +#define NFP_PCIE_BAR_EXPLICIT_BAR0_SignalType_of(_x) (((_x) >> 30) & 0x3) +#define NFP_PCIE_BAR_EXPLICIT_BAR0_Token(_x) (((_x) & 0x3) << 28) +#define NFP_PCIE_BAR_EXPLICIT_BAR0_Token_of(_x) (((_x) >> 28) & 0x3) +#define NFP_PCIE_BAR_EXPLICIT_BAR0_Address(_x) (((_x) & 0xffffff) << 0) +#define NFP_PCIE_BAR_EXPLICIT_BAR0_Address_of(_x) (((_x) >> 0) & 0xffffff) +#define NFP_PCIE_BAR_EXPLICIT_BAR1(_x, _y) \ + (0x00000084 + (0x40 * ((_x) & 0x3)) + (0x10 * ((_y) & 0x3))) +#define NFP_PCIE_BAR_EXPLICIT_BAR1_SignalRef(_x) (((_x) & 0x7f) << 24) +#define NFP_PCIE_BAR_EXPLICIT_BAR1_SignalRef_of(_x) (((_x) >> 24) & 0x7f) +#define NFP_PCIE_BAR_EXPLICIT_BAR1_DataMaster(_x) (((_x) & 0x3ff) << 14) +#define NFP_PCIE_BAR_EXPLICIT_BAR1_DataMaster_of(_x) (((_x) >> 14) & 0x3ff) +#define NFP_PCIE_BAR_EXPLICIT_BAR1_DataRef(_x) (((_x) & 0x3fff) << 0) +#define NFP_PCIE_BAR_EXPLICIT_BAR1_DataRef_of(_x) (((_x) >> 0) & 0x3fff) +#define NFP_PCIE_BAR_EXPLICIT_BAR2(_x, _y) \ + (0x00000088 + (0x40 * ((_x) & 0x3)) + (0x10 * ((_y) & 0x3))) +#define NFP_PCIE_BAR_EXPLICIT_BAR2_Target(_x) (((_x) & 0xf) << 28) +#define NFP_PCIE_BAR_EXPLICIT_BAR2_Target_of(_x) (((_x) >> 28) & 0xf) +#define NFP_PCIE_BAR_EXPLICIT_BAR2_Action(_x) (((_x) & 0x1f) << 23) +#define NFP_PCIE_BAR_EXPLICIT_BAR2_Action_of(_x) (((_x) >> 23) & 0x1f) +#define NFP_PCIE_BAR_EXPLICIT_BAR2_Length(_x) (((_x) & 0x1f) << 18) +#define NFP_PCIE_BAR_EXPLICIT_BAR2_Length_of(_x) (((_x) >> 18) & 0x1f) +#define NFP_PCIE_BAR_EXPLICIT_BAR2_ByteMask(_x) (((_x) & 0xff) << 10) +#define NFP_PCIE_BAR_EXPLICIT_BAR2_ByteMask_of(_x) (((_x) >> 10) & 0xff) +#define NFP_PCIE_BAR_EXPLICIT_BAR2_SignalMaster(_x) (((_x) & 0x3ff) << 0) +#define NFP_PCIE_BAR_EXPLICIT_BAR2_SignalMaster_of(_x) (((_x) >> 0) & 0x3ff) + +#define NFP_PCIE_BAR_PCIE2CPP_Action_BaseAddress(_x) (((_x) & 0x1f) << 16) +#define NFP_PCIE_BAR_PCIE2CPP_Action_BaseAddress_of(_x) (((_x) >> 16) & 0x1f) +#define NFP_PCIE_BAR_PCIE2CPP_BaseAddress(_x) (((_x) & 0xffff) << 0) +#define NFP_PCIE_BAR_PCIE2CPP_BaseAddress_of(_x) (((_x) >> 0) & 0xffff) +#define NFP_PCIE_BAR_PCIE2CPP_LengthSelect(_x) (((_x) & 0x3) << 27) +#define NFP_PCIE_BAR_PCIE2CPP_LengthSelect_of(_x) (((_x) >> 27) & 0x3) +#define NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT 0 +#define NFP_PCIE_BAR_PCIE2CPP_LengthSelect_64BIT 1 +#define NFP_PCIE_BAR_PCIE2CPP_LengthSelect_0BYTE 3 +#define NFP_PCIE_BAR_PCIE2CPP_MapType(_x) (((_x) & 0x7) << 29) +#define NFP_PCIE_BAR_PCIE2CPP_MapType_of(_x) (((_x) >> 29) & 0x7) +#define NFP_PCIE_BAR_PCIE2CPP_MapType_FIXED 0 +#define NFP_PCIE_BAR_PCIE2CPP_MapType_BULK 1 +#define NFP_PCIE_BAR_PCIE2CPP_MapType_TARGET 2 +#define NFP_PCIE_BAR_PCIE2CPP_MapType_GENERAL 3 +#define NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT0 4 +#define NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT1 5 +#define NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT2 6 +#define NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT3 7 +#define NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress(_x) (((_x) & 0xf) << 23) +#define NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress_of(_x) (((_x) >> 23) & 0xf) +#define NFP_PCIE_BAR_PCIE2CPP_Token_BaseAddress(_x) (((_x) & 0x3) << 21) +#define NFP_PCIE_BAR_PCIE2CPP_Token_BaseAddress_of(_x) (((_x) >> 21) & 0x3) +#define NFP_PCIE_EM 0x020000 +#define NFP_PCIE_SRAM 0x000000 + +#define NFP_PCIE_P2C_FIXED_SIZE(bar) (1 << (bar)->bitsize) +#define NFP_PCIE_P2C_BULK_SIZE(bar) (1 << (bar)->bitsize) +#define NFP_PCIE_P2C_GENERAL_TARGET_OFFSET(bar, x) ((x) << ((bar)->bitsize - 2)) +#define NFP_PCIE_P2C_GENERAL_TOKEN_OFFSET(bar, x) ((x) << ((bar)->bitsize - 4)) +#define NFP_PCIE_P2C_GENERAL_SIZE(bar) (1 << ((bar)->bitsize - 4)) + +#define NFP_PCIE_CFG_BAR_PCIETOCPPEXPANSIONBAR(bar, slot) \ + (0x400 + ((bar) * 8 + (slot)) * 4) + +#define NFP_PCIE_CPP_BAR_PCIETOCPPEXPANSIONBAR(bar, slot) \ + (((bar) * 8 + (slot)) * 4) + +/* The number of explicit BARs to reserve. + * Minimum is 0, maximum is 4 on the NFP6000. + */ +#define NFP_PCIE_EXPLICIT_BARS 2 + +struct nfp6000_pcie; +struct nfp6000_area_priv; + +/** + * struct nfp_bar - describes BAR configuration and usage + * @nfp: backlink to owner + * @barcfg: cached contents of BAR config CSR + * @base: the BAR's base CPP offset + * @mask: mask for the BAR aperture (read only) + * @bitsize: bitsize of BAR aperture (read only) + * @index: index of the BAR + * @refcnt: number of current users + * @iomem: mapped IO memory + * @resource: iomem resource window + */ +struct nfp_bar { + struct nfp6000_pcie *nfp; + u32 barcfg; + u64 base; /* CPP address base */ + u64 mask; /* Bit mask of the bar */ + u32 bitsize; /* Bit size of the bar */ + int index; + atomic_t refcnt; + + void __iomem *iomem; + struct resource *resource; +}; + +#define NFP_PCI_BAR_MAX (PCI_64BIT_BAR_COUNT * 8) + +struct nfp6000_pcie { + struct pci_dev *pdev; + struct device *dev; + + /* PCI BAR management */ + spinlock_t bar_lock; /* Protect the PCI2CPP BAR cache */ + int bars; + struct nfp_bar bar[NFP_PCI_BAR_MAX]; + wait_queue_head_t bar_waiters; + + /* Reserved BAR access */ + struct { + void __iomem *csr; + void __iomem *em; + void __iomem *expl[4]; + } iomem; + + /* Explicit IO access */ + struct { + struct mutex mutex; /* Lock access to this explicit group */ + u8 master_id; + u8 signal_ref; + void __iomem *data; + struct { + void __iomem *addr; + int bitsize; + int free[4]; + } group[4]; + } expl; +}; + +static u32 nfp_bar_maptype(struct nfp_bar *bar) +{ + return NFP_PCIE_BAR_PCIE2CPP_MapType_of(bar->barcfg); +} + +static resource_size_t nfp_bar_resource_len(struct nfp_bar *bar) +{ + return pci_resource_len(bar->nfp->pdev, (bar->index / 8) * 2) / 8; +} + +static resource_size_t nfp_bar_resource_start(struct nfp_bar *bar) +{ + return pci_resource_start(bar->nfp->pdev, (bar->index / 8) * 2) + + nfp_bar_resource_len(bar) * (bar->index & 7); +} + +#define TARGET_WIDTH_32 4 +#define TARGET_WIDTH_64 8 + +static int +compute_bar(struct nfp6000_pcie *nfp, struct nfp_bar *bar, + u32 *bar_config, u64 *bar_base, + int tgt, int act, int tok, u64 offset, size_t size, int width) +{ + int bitsize; + u32 newcfg; + + if (tgt >= NFP_CPP_NUM_TARGETS) + return -EINVAL; + + switch (width) { + case 8: + newcfg = NFP_PCIE_BAR_PCIE2CPP_LengthSelect( + NFP_PCIE_BAR_PCIE2CPP_LengthSelect_64BIT); + break; + case 4: + newcfg = NFP_PCIE_BAR_PCIE2CPP_LengthSelect( + NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT); + break; + case 0: + newcfg = NFP_PCIE_BAR_PCIE2CPP_LengthSelect( + NFP_PCIE_BAR_PCIE2CPP_LengthSelect_0BYTE); + break; + default: + return -EINVAL; + } + + if (act != NFP_CPP_ACTION_RW && act != 0) { + /* Fixed CPP mapping with specific action */ + u64 mask = ~(NFP_PCIE_P2C_FIXED_SIZE(bar) - 1); + + newcfg |= NFP_PCIE_BAR_PCIE2CPP_MapType( + NFP_PCIE_BAR_PCIE2CPP_MapType_FIXED); + newcfg |= NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress(tgt); + newcfg |= NFP_PCIE_BAR_PCIE2CPP_Action_BaseAddress(act); + newcfg |= NFP_PCIE_BAR_PCIE2CPP_Token_BaseAddress(tok); + + if ((offset & mask) != ((offset + size - 1) & mask)) + return -EINVAL; + offset &= mask; + + bitsize = 40 - 16; + } else { + u64 mask = ~(NFP_PCIE_P2C_BULK_SIZE(bar) - 1); + + /* Bulk mapping */ + newcfg |= NFP_PCIE_BAR_PCIE2CPP_MapType( + NFP_PCIE_BAR_PCIE2CPP_MapType_BULK); + newcfg |= NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress(tgt); + newcfg |= NFP_PCIE_BAR_PCIE2CPP_Token_BaseAddress(tok); + + if ((offset & mask) != ((offset + size - 1) & mask)) + return -EINVAL; + + offset &= mask; + + bitsize = 40 - 21; + } + + if (bar->bitsize < bitsize) + return -EINVAL; + + newcfg |= offset >> bitsize; + + if (bar_base) + *bar_base = offset; + + if (bar_config) + *bar_config = newcfg; + + return 0; +} + +static int +nfp6000_bar_write(struct nfp6000_pcie *nfp, struct nfp_bar *bar, u32 newcfg) +{ + int base, slot; + int xbar; + + base = bar->index >> 3; + slot = bar->index & 7; + + if (nfp->iomem.csr) { + xbar = NFP_PCIE_CPP_BAR_PCIETOCPPEXPANSIONBAR(base, slot); + writel(newcfg, nfp->iomem.csr + xbar); + /* Readback to ensure BAR is flushed */ + readl(nfp->iomem.csr + xbar); + } else { + xbar = NFP_PCIE_CFG_BAR_PCIETOCPPEXPANSIONBAR(base, slot); + pci_write_config_dword(nfp->pdev, xbar, newcfg); + } + + bar->barcfg = newcfg; + + return 0; +} + +static int +reconfigure_bar(struct nfp6000_pcie *nfp, struct nfp_bar *bar, + int tgt, int act, int tok, u64 offset, size_t size, int width) +{ + u64 newbase; + u32 newcfg; + int err; + + err = compute_bar(nfp, bar, &newcfg, &newbase, + tgt, act, tok, offset, size, width); + if (err) + return err; + + bar->base = newbase; + + return nfp6000_bar_write(nfp, bar, newcfg); +} + +/* Check if BAR can be used with the given parameters. */ +static int matching_bar(struct nfp_bar *bar, u32 tgt, u32 act, u32 tok, + u64 offset, size_t size, int width) +{ + int bartgt, baract, bartok; + int barwidth; + u32 maptype; + + maptype = NFP_PCIE_BAR_PCIE2CPP_MapType_of(bar->barcfg); + bartgt = NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress_of(bar->barcfg); + bartok = NFP_PCIE_BAR_PCIE2CPP_Token_BaseAddress_of(bar->barcfg); + baract = NFP_PCIE_BAR_PCIE2CPP_Action_BaseAddress_of(bar->barcfg); + + barwidth = NFP_PCIE_BAR_PCIE2CPP_LengthSelect_of(bar->barcfg); + switch (barwidth) { + case NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT: + barwidth = 4; + break; + case NFP_PCIE_BAR_PCIE2CPP_LengthSelect_64BIT: + barwidth = 8; + break; + case NFP_PCIE_BAR_PCIE2CPP_LengthSelect_0BYTE: + barwidth = 0; + break; + default: + barwidth = -1; + break; + } + + switch (maptype) { + case NFP_PCIE_BAR_PCIE2CPP_MapType_TARGET: + bartok = -1; + /* FALLTHROUGH */ + case NFP_PCIE_BAR_PCIE2CPP_MapType_BULK: + baract = NFP_CPP_ACTION_RW; + if (act == 0) + act = NFP_CPP_ACTION_RW; + /* FALLTHROUGH */ + case NFP_PCIE_BAR_PCIE2CPP_MapType_FIXED: + break; + default: + /* We don't match explicit bars through the area interface */ + return 0; + } + + /* Make sure to match up the width */ + if (barwidth != width) + return 0; + + if ((bartgt < 0 || bartgt == tgt) && + (bartok < 0 || bartok == tok) && + (baract == act) && + bar->base <= offset && + (bar->base + (1 << bar->bitsize)) >= (offset + size)) + return 1; + + /* No match */ + return 0; +} + +static int +find_matching_bar(struct nfp6000_pcie *nfp, + u32 tgt, u32 act, u32 tok, u64 offset, size_t size, int width) +{ + int n; + + for (n = 0; n < nfp->bars; n++) { + struct nfp_bar *bar = &nfp->bar[n]; + + if (matching_bar(bar, tgt, act, tok, offset, size, width)) + return n; + } + + return -1; +} + +/* Return EAGAIN if no resource is available */ +static int +find_unused_bar_noblock(struct nfp6000_pcie *nfp, + int tgt, int act, int tok, + u64 offset, size_t size, int width) +{ + int n, invalid = 0; + + for (n = 0; n < nfp->bars; n++) { + struct nfp_bar *bar = &nfp->bar[n]; + int err; + + if (bar->bitsize == 0) { + invalid++; + continue; + } + + if (atomic_read(&bar->refcnt) != 0) + continue; + + /* Just check to see if we can make it fit... */ + err = compute_bar(nfp, bar, NULL, NULL, + tgt, act, tok, offset, size, width); + + if (err < 0) + invalid++; + else + return n; + } + + return (n == invalid) ? -EINVAL : -EAGAIN; +} + +static int +find_unused_bar_and_lock(struct nfp6000_pcie *nfp, + int tgt, int act, int tok, + u64 offset, size_t size, int width) +{ + unsigned long flags; + int n; + + spin_lock_irqsave(&nfp->bar_lock, flags); + + n = find_unused_bar_noblock(nfp, tgt, act, tok, offset, size, width); + if (n < 0) + spin_unlock_irqrestore(&nfp->bar_lock, flags); + else + __release(&nfp->bar_lock); + + return n; +} + +static void nfp_bar_get(struct nfp6000_pcie *nfp, struct nfp_bar *bar) +{ + atomic_inc(&bar->refcnt); +} + +static void nfp_bar_put(struct nfp6000_pcie *nfp, struct nfp_bar *bar) +{ + if (atomic_dec_and_test(&bar->refcnt)) + wake_up_interruptible(&nfp->bar_waiters); +} + +static int +nfp_wait_for_bar(struct nfp6000_pcie *nfp, int *barnum, + u32 tgt, u32 act, u32 tok, u64 offset, size_t size, int width) +{ + return wait_event_interruptible(nfp->bar_waiters, + (*barnum = find_unused_bar_and_lock(nfp, tgt, act, tok, + offset, size, width)) + != -EAGAIN); +} + +static int +nfp_alloc_bar(struct nfp6000_pcie *nfp, + u32 tgt, u32 act, u32 tok, + u64 offset, size_t size, int width, int nonblocking) +{ + unsigned long irqflags; + int barnum, retval; + + if (size > (1 << 24)) + return -EINVAL; + + spin_lock_irqsave(&nfp->bar_lock, irqflags); + barnum = find_matching_bar(nfp, tgt, act, tok, offset, size, width); + if (barnum >= 0) { + /* Found a perfect match. */ + nfp_bar_get(nfp, &nfp->bar[barnum]); + spin_unlock_irqrestore(&nfp->bar_lock, irqflags); + return barnum; + } + + barnum = find_unused_bar_noblock(nfp, tgt, act, tok, + offset, size, width); + if (barnum < 0) { + if (nonblocking) + goto err_nobar; + + /* Wait until a BAR becomes available. The + * find_unused_bar function will reclaim the bar_lock + * if a free BAR is found. + */ + spin_unlock_irqrestore(&nfp->bar_lock, irqflags); + retval = nfp_wait_for_bar(nfp, &barnum, tgt, act, tok, + offset, size, width); + if (retval) + return retval; + __acquire(&nfp->bar_lock); + } + + nfp_bar_get(nfp, &nfp->bar[barnum]); + retval = reconfigure_bar(nfp, &nfp->bar[barnum], + tgt, act, tok, offset, size, width); + if (retval < 0) { + nfp_bar_put(nfp, &nfp->bar[barnum]); + barnum = retval; + } + +err_nobar: + spin_unlock_irqrestore(&nfp->bar_lock, irqflags); + return barnum; +} + +static void disable_bars(struct nfp6000_pcie *nfp); + +static int bar_cmp(const void *aptr, const void *bptr) +{ + const struct nfp_bar *a = aptr, *b = bptr; + + if (a->bitsize == b->bitsize) + return a->index - b->index; + else + return a->bitsize - b->bitsize; +} + +/* Map all PCI bars and fetch the actual BAR configurations from the + * board. We assume that the BAR with the PCIe config block is + * already mapped. + * + * BAR0.0: Reserved for General Mapping (for MSI-X access to PCIe SRAM) + * BAR0.1: Reserved for XPB access (for MSI-X access to PCIe PBA) + * BAR0.2: -- + * BAR0.3: -- + * BAR0.4: Reserved for Explicit 0.0-0.3 access + * BAR0.5: Reserved for Explicit 1.0-1.3 access + * BAR0.6: Reserved for Explicit 2.0-2.3 access + * BAR0.7: Reserved for Explicit 3.0-3.3 access + * + * BAR1.0-BAR1.7: -- + * BAR2.0-BAR2.7: -- + */ +static int enable_bars(struct nfp6000_pcie *nfp, u16 interface) +{ + const u32 barcfg_msix_general = + NFP_PCIE_BAR_PCIE2CPP_MapType( + NFP_PCIE_BAR_PCIE2CPP_MapType_GENERAL) | + NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT; + const u32 barcfg_msix_xpb = + NFP_PCIE_BAR_PCIE2CPP_MapType( + NFP_PCIE_BAR_PCIE2CPP_MapType_BULK) | + NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT | + NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress( + NFP_CPP_TARGET_ISLAND_XPB); + const u32 barcfg_explicit[4] = { + NFP_PCIE_BAR_PCIE2CPP_MapType( + NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT0), + NFP_PCIE_BAR_PCIE2CPP_MapType( + NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT1), + NFP_PCIE_BAR_PCIE2CPP_MapType( + NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT2), + NFP_PCIE_BAR_PCIE2CPP_MapType( + NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT3), + }; + struct nfp_bar *bar; + int i, bars_free; + int expl_groups; + + bar = &nfp->bar[0]; + for (i = 0; i < ARRAY_SIZE(nfp->bar); i++, bar++) { + struct resource *res; + + res = &nfp->pdev->resource[(i >> 3) * 2]; + + /* Skip over BARs that are not IORESOURCE_MEM */ + if (!(resource_type(res) & IORESOURCE_MEM)) { + bar--; + continue; + } + + bar->resource = res; + bar->barcfg = 0; + + bar->nfp = nfp; + bar->index = i; + bar->mask = nfp_bar_resource_len(bar) - 1; + bar->bitsize = fls(bar->mask); + bar->base = 0; + bar->iomem = NULL; + } + + nfp->bars = bar - &nfp->bar[0]; + if (nfp->bars < 8) { + dev_err(nfp->dev, "No usable BARs found!\n"); + return -EINVAL; + } + + bars_free = nfp->bars; + + /* Convert unit ID (0..3) to signal master/data master ID (0x40..0x70) + */ + mutex_init(&nfp->expl.mutex); + + nfp->expl.master_id = ((NFP_CPP_INTERFACE_UNIT_of(interface) & 3) + 4) + << 4; + nfp->expl.signal_ref = 0x10; + + /* Configure, and lock, BAR0.0 for General Target use (MSI-X SRAM) */ + bar = &nfp->bar[0]; + bar->iomem = ioremap_nocache(nfp_bar_resource_start(bar), + nfp_bar_resource_len(bar)); + if (bar->iomem) { + dev_info(nfp->dev, + "BAR0.0 RESERVED: General Mapping/MSI-X SRAM\n"); + atomic_inc(&bar->refcnt); + bars_free--; + + nfp6000_bar_write(nfp, bar, barcfg_msix_general); + + nfp->expl.data = bar->iomem + NFP_PCIE_SRAM + 0x1000; + } + + if (nfp->pdev->device == PCI_DEVICE_ID_NETRONOME_NFP4000 || + nfp->pdev->device == PCI_DEVICE_ID_NETRONOME_NFP6000) { + nfp->iomem.csr = bar->iomem + NFP_PCIE_BAR(0); + expl_groups = 4; + } else { + int pf = nfp->pdev->devfn & 7; + + nfp->iomem.csr = bar->iomem + NFP_PCIE_BAR(pf); + expl_groups = 1; + } + nfp->iomem.em = bar->iomem + NFP_PCIE_EM; + + /* Configure, and lock, BAR0.1 for PCIe XPB (MSI-X PBA) */ + bar = &nfp->bar[1]; + dev_info(nfp->dev, "BAR0.1 RESERVED: PCIe XPB/MSI-X PBA\n"); + atomic_inc(&bar->refcnt); + bars_free--; + + nfp6000_bar_write(nfp, bar, barcfg_msix_xpb); + + /* Use BAR0.4..BAR0.7 for EXPL IO */ + for (i = 0; i < 4; i++) { + int j; + + if (i >= NFP_PCIE_EXPLICIT_BARS || i >= expl_groups) { + nfp->expl.group[i].bitsize = 0; + continue; + } + + bar = &nfp->bar[4 + i]; + bar->iomem = ioremap_nocache(nfp_bar_resource_start(bar), + nfp_bar_resource_len(bar)); + if (bar->iomem) { + dev_info(nfp->dev, + "BAR0.%d RESERVED: Explicit%d Mapping\n", + 4 + i, i); + atomic_inc(&bar->refcnt); + bars_free--; + + nfp->expl.group[i].bitsize = bar->bitsize; + nfp->expl.group[i].addr = bar->iomem; + nfp6000_bar_write(nfp, bar, barcfg_explicit[i]); + + for (j = 0; j < 4; j++) + nfp->expl.group[i].free[j] = true; + } + nfp->iomem.expl[i] = bar->iomem; + } + + /* Sort bars by bit size - use the smallest possible first. */ + sort(&nfp->bar[0], nfp->bars, sizeof(nfp->bar[0]), + bar_cmp, NULL); + + dev_info(nfp->dev, "%d NFP PCI2CPP BARs, %d free\n", + nfp->bars, bars_free); + + return 0; +} + +static void disable_bars(struct nfp6000_pcie *nfp) +{ + struct nfp_bar *bar = &nfp->bar[0]; + int n; + + for (n = 0; n < nfp->bars; n++, bar++) { + if (bar->iomem) { + iounmap(bar->iomem); + bar->iomem = NULL; + } + } +} + +/* + * Generic CPP bus access interface. + */ + +struct nfp6000_area_priv { + atomic_t refcnt; + + struct nfp_bar *bar; + u32 bar_offset; + + u32 target; + u32 action; + u32 token; + u64 offset; + struct { + int read; + int write; + int bar; + } width; + size_t size; + + void __iomem *iomem; + phys_addr_t phys; + struct resource resource; +}; + +static int nfp6000_area_init(struct nfp_cpp_area *area, u32 dest, + unsigned long long address, unsigned long size) +{ + struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area); + u32 target = NFP_CPP_ID_TARGET_of(dest); + u32 action = NFP_CPP_ID_ACTION_of(dest); + u32 token = NFP_CPP_ID_TOKEN_of(dest); + int pp; + + pp = nfp_target_pushpull(NFP_CPP_ID(target, action, token), address); + if (pp < 0) + return pp; + + priv->width.read = PUSH_WIDTH(pp); + priv->width.write = PULL_WIDTH(pp); + if (priv->width.read > 0 && + priv->width.write > 0 && + priv->width.read != priv->width.write) { + return -EINVAL; + } + + if (priv->width.read > 0) + priv->width.bar = priv->width.read; + else + priv->width.bar = priv->width.write; + + atomic_set(&priv->refcnt, 0); + priv->bar = NULL; + + priv->target = target; + priv->action = action; + priv->token = token; + priv->offset = address; + priv->size = size; + memset(&priv->resource, 0, sizeof(priv->resource)); + + return 0; +} + +static void nfp6000_area_cleanup(struct nfp_cpp_area *area) +{ +} + +static void priv_area_get(struct nfp_cpp_area *area) +{ + struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area); + + atomic_inc(&priv->refcnt); +} + +static int priv_area_put(struct nfp_cpp_area *area) +{ + struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area); + + if (WARN_ON(!atomic_read(&priv->refcnt))) + return 0; + + return atomic_dec_and_test(&priv->refcnt); +} + +static int nfp6000_area_acquire(struct nfp_cpp_area *area) +{ + struct nfp6000_pcie *nfp = nfp_cpp_priv(nfp_cpp_area_cpp(area)); + struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area); + int barnum, err; + + if (priv->bar) { + /* Already allocated. */ + priv_area_get(area); + return 0; + } + + barnum = nfp_alloc_bar(nfp, priv->target, priv->action, priv->token, + priv->offset, priv->size, priv->width.bar, 1); + + if (barnum < 0) { + err = barnum; + goto err_alloc_bar; + } + priv->bar = &nfp->bar[barnum]; + + /* Calculate offset into BAR. */ + if (nfp_bar_maptype(priv->bar) == + NFP_PCIE_BAR_PCIE2CPP_MapType_GENERAL) { + priv->bar_offset = priv->offset & + (NFP_PCIE_P2C_GENERAL_SIZE(priv->bar) - 1); + priv->bar_offset += NFP_PCIE_P2C_GENERAL_TARGET_OFFSET( + priv->bar, priv->target); + priv->bar_offset += NFP_PCIE_P2C_GENERAL_TOKEN_OFFSET( + priv->bar, priv->token); + } else { + priv->bar_offset = priv->offset & priv->bar->mask; + } + + /* We don't actually try to acquire the resource area using + * request_resource. This would prevent sharing the mapped + * BAR between multiple CPP areas and prevent us from + * effectively utilizing the limited amount of BAR resources. + */ + priv->phys = nfp_bar_resource_start(priv->bar) + priv->bar_offset; + priv->resource.name = nfp_cpp_area_name(area); + priv->resource.start = priv->phys; + priv->resource.end = priv->resource.start + priv->size - 1; + priv->resource.flags = IORESOURCE_MEM; + + /* If the bar is already mapped in, use its mapping */ + if (priv->bar->iomem) + priv->iomem = priv->bar->iomem + priv->bar_offset; + else + /* Must have been too big. Sub-allocate. */ + priv->iomem = ioremap_nocache(priv->phys, priv->size); + + if (IS_ERR_OR_NULL(priv->iomem)) { + dev_err(nfp->dev, "Can't ioremap() a %d byte region of BAR %d\n", + (int)priv->size, priv->bar->index); + err = !priv->iomem ? -ENOMEM : PTR_ERR(priv->iomem); + priv->iomem = NULL; + goto err_iomem_remap; + } + + priv_area_get(area); + return 0; + +err_iomem_remap: + nfp_bar_put(nfp, priv->bar); + priv->bar = NULL; +err_alloc_bar: + return err; +} + +static void nfp6000_area_release(struct nfp_cpp_area *area) +{ + struct nfp6000_pcie *nfp = nfp_cpp_priv(nfp_cpp_area_cpp(area)); + struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area); + + if (!priv_area_put(area)) + return; + + if (!priv->bar->iomem) + iounmap(priv->iomem); + + nfp_bar_put(nfp, priv->bar); + + priv->bar = NULL; + priv->iomem = NULL; +} + +static phys_addr_t nfp6000_area_phys(struct nfp_cpp_area *area) +{ + struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area); + + return priv->phys; +} + +static void __iomem *nfp6000_area_iomem(struct nfp_cpp_area *area) +{ + struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area); + + return priv->iomem; +} + +static struct resource *nfp6000_area_resource(struct nfp_cpp_area *area) +{ + /* Use the BAR resource as the resource for the CPP area. + * This enables us to share the BAR among multiple CPP areas + * without resource conflicts. + */ + struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area); + + return priv->bar->resource; +} + +static int nfp6000_area_read(struct nfp_cpp_area *area, void *kernel_vaddr, + unsigned long offset, unsigned int length) +{ + u64 __maybe_unused *wrptr64 = kernel_vaddr; + const u64 __iomem __maybe_unused *rdptr64; + struct nfp6000_area_priv *priv; + u32 *wrptr32 = kernel_vaddr; + const u32 __iomem *rdptr32; + int n, width; + bool is_64; + + priv = nfp_cpp_area_priv(area); + rdptr64 = priv->iomem + offset; + rdptr32 = priv->iomem + offset; + + if (offset + length > priv->size) + return -EFAULT; + + width = priv->width.read; + + if (width <= 0) + return -EINVAL; + + /* Unaligned? Translate to an explicit access */ + if ((priv->offset + offset) & (width - 1)) + return nfp_cpp_explicit_read(nfp_cpp_area_cpp(area), + NFP_CPP_ID(priv->target, + priv->action, + priv->token), + priv->offset + offset, + kernel_vaddr, length, width); + + is_64 = width == TARGET_WIDTH_64; + + /* MU reads via a PCIe2CPP BAR supports 32bit (and other) lengths */ + if (priv->target == (NFP_CPP_TARGET_ID_MASK & NFP_CPP_TARGET_MU) && + priv->action == NFP_CPP_ACTION_RW) + is_64 = false; + + if (is_64) { + if (offset % sizeof(u64) != 0 || length % sizeof(u64) != 0) + return -EINVAL; + } else { + if (offset % sizeof(u32) != 0 || length % sizeof(u32) != 0) + return -EINVAL; + } + + if (WARN_ON(!priv->bar)) + return -EFAULT; + + if (is_64) +#ifndef __raw_readq + return -EINVAL; +#else + for (n = 0; n < length; n += sizeof(u64)) + *wrptr64++ = __raw_readq(rdptr64++); +#endif + else + for (n = 0; n < length; n += sizeof(u32)) + *wrptr32++ = __raw_readl(rdptr32++); + + return n; +} + +static int +nfp6000_area_write(struct nfp_cpp_area *area, + const void *kernel_vaddr, + unsigned long offset, unsigned int length) +{ + const u64 __maybe_unused *rdptr64 = kernel_vaddr; + u64 __iomem __maybe_unused *wrptr64; + const u32 *rdptr32 = kernel_vaddr; + struct nfp6000_area_priv *priv; + u32 __iomem *wrptr32; + int n, width; + bool is_64; + + priv = nfp_cpp_area_priv(area); + wrptr64 = priv->iomem + offset; + wrptr32 = priv->iomem + offset; + + if (offset + length > priv->size) + return -EFAULT; + + width = priv->width.write; + + if (width <= 0) + return -EINVAL; + + /* Unaligned? Translate to an explicit access */ + if ((priv->offset + offset) & (width - 1)) + return nfp_cpp_explicit_write(nfp_cpp_area_cpp(area), + NFP_CPP_ID(priv->target, + priv->action, + priv->token), + priv->offset + offset, + kernel_vaddr, length, width); + + is_64 = width == TARGET_WIDTH_64; + + /* MU writes via a PCIe2CPP BAR supports 32bit (and other) lengths */ + if (priv->target == (NFP_CPP_TARGET_ID_MASK & NFP_CPP_TARGET_MU) && + priv->action == NFP_CPP_ACTION_RW) + is_64 = false; + + if (is_64) { + if (offset % sizeof(u64) != 0 || length % sizeof(u64) != 0) + return -EINVAL; + } else { + if (offset % sizeof(u32) != 0 || length % sizeof(u32) != 0) + return -EINVAL; + } + + if (WARN_ON(!priv->bar)) + return -EFAULT; + + if (is_64) +#ifndef __raw_writeq + return -EINVAL; +#else + for (n = 0; n < length; n += sizeof(u64)) { + __raw_writeq(*rdptr64++, wrptr64++); + wmb(); + } +#endif + else + for (n = 0; n < length; n += sizeof(u32)) { + __raw_writel(*rdptr32++, wrptr32++); + wmb(); + } + + return n; +} + +struct nfp6000_explicit_priv { + struct nfp6000_pcie *nfp; + struct { + int group; + int area; + } bar; + int bitsize; + void __iomem *data; + void __iomem *addr; +}; + +static int nfp6000_explicit_acquire(struct nfp_cpp_explicit *expl) +{ + struct nfp6000_pcie *nfp = nfp_cpp_priv(nfp_cpp_explicit_cpp(expl)); + struct nfp6000_explicit_priv *priv = nfp_cpp_explicit_priv(expl); + int i, j; + + mutex_lock(&nfp->expl.mutex); + for (i = 0; i < ARRAY_SIZE(nfp->expl.group); i++) { + if (!nfp->expl.group[i].bitsize) + continue; + + for (j = 0; j < ARRAY_SIZE(nfp->expl.group[i].free); j++) { + u16 data_offset; + + if (!nfp->expl.group[i].free[j]) + continue; + + priv->nfp = nfp; + priv->bar.group = i; + priv->bar.area = j; + priv->bitsize = nfp->expl.group[i].bitsize - 2; + + data_offset = (priv->bar.group << 9) + + (priv->bar.area << 7); + priv->data = nfp->expl.data + data_offset; + priv->addr = nfp->expl.group[i].addr + + (priv->bar.area << priv->bitsize); + nfp->expl.group[i].free[j] = false; + + mutex_unlock(&nfp->expl.mutex); + return 0; + } + } + mutex_unlock(&nfp->expl.mutex); + + return -EAGAIN; +} + +static void nfp6000_explicit_release(struct nfp_cpp_explicit *expl) +{ + struct nfp6000_explicit_priv *priv = nfp_cpp_explicit_priv(expl); + struct nfp6000_pcie *nfp = priv->nfp; + + mutex_lock(&nfp->expl.mutex); + nfp->expl.group[priv->bar.group].free[priv->bar.area] = true; + mutex_unlock(&nfp->expl.mutex); +} + +static int nfp6000_explicit_put(struct nfp_cpp_explicit *expl, + const void *buff, size_t len) +{ + struct nfp6000_explicit_priv *priv = nfp_cpp_explicit_priv(expl); + const u32 *src = buff; + size_t i; + + for (i = 0; i < len; i += sizeof(u32)) + writel(*(src++), priv->data + i); + + return i; +} + +static int +nfp6000_explicit_do(struct nfp_cpp_explicit *expl, + const struct nfp_cpp_explicit_command *cmd, u64 address) +{ + struct nfp6000_explicit_priv *priv = nfp_cpp_explicit_priv(expl); + u8 signal_master, signal_ref, data_master; + struct nfp6000_pcie *nfp = priv->nfp; + int sigmask = 0; + u16 data_ref; + u32 csr[3]; + + if (cmd->siga_mode) + sigmask |= 1 << cmd->siga; + if (cmd->sigb_mode) + sigmask |= 1 << cmd->sigb; + + signal_master = cmd->signal_master; + if (!signal_master) + signal_master = nfp->expl.master_id; + + signal_ref = cmd->signal_ref; + if (signal_master == nfp->expl.master_id) + signal_ref = nfp->expl.signal_ref + + ((priv->bar.group * 4 + priv->bar.area) << 1); + + data_master = cmd->data_master; + if (!data_master) + data_master = nfp->expl.master_id; + + data_ref = cmd->data_ref; + if (data_master == nfp->expl.master_id) + data_ref = 0x1000 + + (priv->bar.group << 9) + (priv->bar.area << 7); + + csr[0] = NFP_PCIE_BAR_EXPLICIT_BAR0_SignalType(sigmask) | + NFP_PCIE_BAR_EXPLICIT_BAR0_Token( + NFP_CPP_ID_TOKEN_of(cmd->cpp_id)) | + NFP_PCIE_BAR_EXPLICIT_BAR0_Address(address >> 16); + + csr[1] = NFP_PCIE_BAR_EXPLICIT_BAR1_SignalRef(signal_ref) | + NFP_PCIE_BAR_EXPLICIT_BAR1_DataMaster(data_master) | + NFP_PCIE_BAR_EXPLICIT_BAR1_DataRef(data_ref); + + csr[2] = NFP_PCIE_BAR_EXPLICIT_BAR2_Target( + NFP_CPP_ID_TARGET_of(cmd->cpp_id)) | + NFP_PCIE_BAR_EXPLICIT_BAR2_Action( + NFP_CPP_ID_ACTION_of(cmd->cpp_id)) | + NFP_PCIE_BAR_EXPLICIT_BAR2_Length(cmd->len) | + NFP_PCIE_BAR_EXPLICIT_BAR2_ByteMask(cmd->byte_mask) | + NFP_PCIE_BAR_EXPLICIT_BAR2_SignalMaster(signal_master); + + if (nfp->iomem.csr) { + writel(csr[0], nfp->iomem.csr + + NFP_PCIE_BAR_EXPLICIT_BAR0(priv->bar.group, + priv->bar.area)); + writel(csr[1], nfp->iomem.csr + + NFP_PCIE_BAR_EXPLICIT_BAR1(priv->bar.group, + priv->bar.area)); + writel(csr[2], nfp->iomem.csr + + NFP_PCIE_BAR_EXPLICIT_BAR2(priv->bar.group, + priv->bar.area)); + /* Readback to ensure BAR is flushed */ + readl(nfp->iomem.csr + + NFP_PCIE_BAR_EXPLICIT_BAR0(priv->bar.group, + priv->bar.area)); + readl(nfp->iomem.csr + + NFP_PCIE_BAR_EXPLICIT_BAR1(priv->bar.group, + priv->bar.area)); + readl(nfp->iomem.csr + + NFP_PCIE_BAR_EXPLICIT_BAR2(priv->bar.group, + priv->bar.area)); + } else { + pci_write_config_dword(nfp->pdev, 0x400 + + NFP_PCIE_BAR_EXPLICIT_BAR0( + priv->bar.group, priv->bar.area), + csr[0]); + + pci_write_config_dword(nfp->pdev, 0x400 + + NFP_PCIE_BAR_EXPLICIT_BAR1( + priv->bar.group, priv->bar.area), + csr[1]); + + pci_write_config_dword(nfp->pdev, 0x400 + + NFP_PCIE_BAR_EXPLICIT_BAR2( + priv->bar.group, priv->bar.area), + csr[2]); + } + + /* Issue the 'kickoff' transaction */ + readb(priv->addr + (address & ((1 << priv->bitsize) - 1))); + + return sigmask; +} + +static int nfp6000_explicit_get(struct nfp_cpp_explicit *expl, + void *buff, size_t len) +{ + struct nfp6000_explicit_priv *priv = nfp_cpp_explicit_priv(expl); + u32 *dst = buff; + size_t i; + + for (i = 0; i < len; i += sizeof(u32)) + *(dst++) = readl(priv->data + i); + + return i; +} + +static int nfp6000_init(struct nfp_cpp *cpp) +{ + nfp_cpp_area_cache_add(cpp, SZ_64K); + nfp_cpp_area_cache_add(cpp, SZ_64K); + nfp_cpp_area_cache_add(cpp, SZ_256K); + + return 0; +} + +static void nfp6000_free(struct nfp_cpp *cpp) +{ + struct nfp6000_pcie *nfp = nfp_cpp_priv(cpp); + + disable_bars(nfp); + kfree(nfp); +} + +static void nfp6000_read_serial(struct device *dev, u8 *serial) +{ + struct pci_dev *pdev = to_pci_dev(dev); + int pos; + u32 reg; + + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN); + if (!pos) { + memset(serial, 0, NFP_SERIAL_LEN); + return; + } + + pci_read_config_dword(pdev, pos + 4, ®); + put_unaligned_be16(reg >> 16, serial + 4); + pci_read_config_dword(pdev, pos + 8, ®); + put_unaligned_be32(reg, serial); +} + +static u16 nfp6000_get_interface(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + int pos; + u32 reg; + + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN); + if (!pos) + return NFP_CPP_INTERFACE(NFP_CPP_INTERFACE_TYPE_PCI, 0, 0xff); + + pci_read_config_dword(pdev, pos + 4, ®); + + return reg & 0xffff; +} + +static const struct nfp_cpp_operations nfp6000_pcie_ops = { + .owner = THIS_MODULE, + + .init = nfp6000_init, + .free = nfp6000_free, + + .read_serial = nfp6000_read_serial, + .get_interface = nfp6000_get_interface, + + .area_priv_size = sizeof(struct nfp6000_area_priv), + .area_init = nfp6000_area_init, + .area_cleanup = nfp6000_area_cleanup, + .area_acquire = nfp6000_area_acquire, + .area_release = nfp6000_area_release, + .area_phys = nfp6000_area_phys, + .area_iomem = nfp6000_area_iomem, + .area_resource = nfp6000_area_resource, + .area_read = nfp6000_area_read, + .area_write = nfp6000_area_write, + + .explicit_priv_size = sizeof(struct nfp6000_explicit_priv), + .explicit_acquire = nfp6000_explicit_acquire, + .explicit_release = nfp6000_explicit_release, + .explicit_put = nfp6000_explicit_put, + .explicit_do = nfp6000_explicit_do, + .explicit_get = nfp6000_explicit_get, +}; + +/** + * nfp_cpp_from_nfp6000_pcie() - Build a NFP CPP bus from a NFP6000 PCI device + * @pdev: NFP6000 PCI device + * + * Return: NFP CPP handle + */ +struct nfp_cpp *nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev) +{ + struct nfp6000_pcie *nfp; + u16 interface; + int err; + + /* Finished with card initialization. */ + dev_info(&pdev->dev, + "Netronome Flow Processor NFP4000/NFP6000 PCIe Card Probe\n"); + + nfp = kzalloc(sizeof(*nfp), GFP_KERNEL); + if (!nfp) { + err = -ENOMEM; + goto err_ret; + } + + nfp->dev = &pdev->dev; + nfp->pdev = pdev; + init_waitqueue_head(&nfp->bar_waiters); + spin_lock_init(&nfp->bar_lock); + + interface = nfp6000_get_interface(&pdev->dev); + + if (NFP_CPP_INTERFACE_TYPE_of(interface) != + NFP_CPP_INTERFACE_TYPE_PCI) { + dev_err(&pdev->dev, + "Interface type %d is not the expected %d\n", + NFP_CPP_INTERFACE_TYPE_of(interface), + NFP_CPP_INTERFACE_TYPE_PCI); + err = -ENODEV; + goto err_free_nfp; + } + + if (NFP_CPP_INTERFACE_CHANNEL_of(interface) != + NFP_CPP_INTERFACE_CHANNEL_PEROPENER) { + dev_err(&pdev->dev, "Interface channel %d is not the expected %d\n", + NFP_CPP_INTERFACE_CHANNEL_of(interface), + NFP_CPP_INTERFACE_CHANNEL_PEROPENER); + err = -ENODEV; + goto err_free_nfp; + } + + err = enable_bars(nfp, interface); + if (err) + goto err_free_nfp; + + /* Probe for all the common NFP devices */ + return nfp_cpp_from_operations(&nfp6000_pcie_ops, &pdev->dev, nfp); + +err_free_nfp: + kfree(nfp); +err_ret: + dev_err(&pdev->dev, "NFP6000 PCI setup failed\n"); + return ERR_PTR(err); +} diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h new file mode 100644 index 000000000000..245d8aaaa97d --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h @@ -0,0 +1,46 @@ +/* + * Copyright (C) 2015-2017 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* + * nfp6000_pcie.h + * Author: Jason McMullan <jason.mcmullan@netronome.com> + */ + +#ifndef NFP6000_PCIE_H +#define NFP6000_PCIE_H + +#include "nfp_cpp.h" + +struct nfp_cpp *nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev); + +#endif /* NFP6000_PCIE_H */ diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_arm.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_arm.h new file mode 100644 index 000000000000..31fe92247f51 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_arm.h @@ -0,0 +1,246 @@ +/* + * Copyright (C) 2015-2017 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* + * nfp_arm.h + * Definitions for ARM-based registers and memory spaces + */ + +#ifndef NFP_ARM_H +#define NFP_ARM_H + +#define NFP_ARM_QUEUE(_q) (0x100000 + (0x800 * ((_q) & 0xff))) +#define NFP_ARM_IM 0x200000 +#define NFP_ARM_EM 0x300000 +#define NFP_ARM_GCSR 0x400000 +#define NFP_ARM_MPCORE 0x800000 +#define NFP_ARM_PL310 0xa00000 +/* Register Type: BulkBARConfig */ +#define NFP_ARM_GCSR_BULK_BAR(_bar) (0x0 + (0x4 * ((_bar) & 0x7))) +#define NFP_ARM_GCSR_BULK_BAR_TYPE (0x1 << 31) +#define NFP_ARM_GCSR_BULK_BAR_TYPE_BULK (0x0) +#define NFP_ARM_GCSR_BULK_BAR_TYPE_EXPA (0x80000000) +#define NFP_ARM_GCSR_BULK_BAR_TGT(_x) (((_x) & 0xf) << 27) +#define NFP_ARM_GCSR_BULK_BAR_TGT_of(_x) (((_x) >> 27) & 0xf) +#define NFP_ARM_GCSR_BULK_BAR_TOK(_x) (((_x) & 0x3) << 25) +#define NFP_ARM_GCSR_BULK_BAR_TOK_of(_x) (((_x) >> 25) & 0x3) +#define NFP_ARM_GCSR_BULK_BAR_LEN (0x1 << 24) +#define NFP_ARM_GCSR_BULK_BAR_LEN_32BIT (0x0) +#define NFP_ARM_GCSR_BULK_BAR_LEN_64BIT (0x1000000) +#define NFP_ARM_GCSR_BULK_BAR_ADDR(_x) ((_x) & 0x7ff) +#define NFP_ARM_GCSR_BULK_BAR_ADDR_of(_x) ((_x) & 0x7ff) +/* Register Type: ExpansionBARConfig */ +#define NFP_ARM_GCSR_EXPA_BAR(_bar) (0x20 + (0x4 * ((_bar) & 0xf))) +#define NFP_ARM_GCSR_EXPA_BAR_TYPE (0x1 << 31) +#define NFP_ARM_GCSR_EXPA_BAR_TYPE_EXPA (0x0) +#define NFP_ARM_GCSR_EXPA_BAR_TYPE_EXPL (0x80000000) +#define NFP_ARM_GCSR_EXPA_BAR_TGT(_x) (((_x) & 0xf) << 27) +#define NFP_ARM_GCSR_EXPA_BAR_TGT_of(_x) (((_x) >> 27) & 0xf) +#define NFP_ARM_GCSR_EXPA_BAR_TOK(_x) (((_x) & 0x3) << 25) +#define NFP_ARM_GCSR_EXPA_BAR_TOK_of(_x) (((_x) >> 25) & 0x3) +#define NFP_ARM_GCSR_EXPA_BAR_LEN (0x1 << 24) +#define NFP_ARM_GCSR_EXPA_BAR_LEN_32BIT (0x0) +#define NFP_ARM_GCSR_EXPA_BAR_LEN_64BIT (0x1000000) +#define NFP_ARM_GCSR_EXPA_BAR_ACT(_x) (((_x) & 0x1f) << 19) +#define NFP_ARM_GCSR_EXPA_BAR_ACT_of(_x) (((_x) >> 19) & 0x1f) +#define NFP_ARM_GCSR_EXPA_BAR_ACT_DERIVED (0) +#define NFP_ARM_GCSR_EXPA_BAR_ADDR(_x) ((_x) & 0x7fff) +#define NFP_ARM_GCSR_EXPA_BAR_ADDR_of(_x) ((_x) & 0x7fff) +/* Register Type: ExplicitBARConfig0_Reg */ +#define NFP_ARM_GCSR_EXPL0_BAR(_bar) (0x60 + (0x4 * ((_bar) & 0x7))) +#define NFP_ARM_GCSR_EXPL0_BAR_ADDR(_x) ((_x) & 0x3ffff) +#define NFP_ARM_GCSR_EXPL0_BAR_ADDR_of(_x) ((_x) & 0x3ffff) +/* Register Type: ExplicitBARConfig1_Reg */ +#define NFP_ARM_GCSR_EXPL1_BAR(_bar) (0x80 + (0x4 * ((_bar) & 0x7))) +#define NFP_ARM_GCSR_EXPL1_BAR_POSTED (0x1 << 31) +#define NFP_ARM_GCSR_EXPL1_BAR_SIGNAL_REF(_x) (((_x) & 0x7f) << 24) +#define NFP_ARM_GCSR_EXPL1_BAR_SIGNAL_REF_of(_x) (((_x) >> 24) & 0x7f) +#define NFP_ARM_GCSR_EXPL1_BAR_DATA_MASTER(_x) (((_x) & 0xff) << 16) +#define NFP_ARM_GCSR_EXPL1_BAR_DATA_MASTER_of(_x) (((_x) >> 16) & 0xff) +#define NFP_ARM_GCSR_EXPL1_BAR_DATA_REF(_x) ((_x) & 0x3fff) +#define NFP_ARM_GCSR_EXPL1_BAR_DATA_REF_of(_x) ((_x) & 0x3fff) +/* Register Type: ExplicitBARConfig2_Reg */ +#define NFP_ARM_GCSR_EXPL2_BAR(_bar) (0xa0 + (0x4 * ((_bar) & 0x7))) +#define NFP_ARM_GCSR_EXPL2_BAR_TGT(_x) (((_x) & 0xf) << 28) +#define NFP_ARM_GCSR_EXPL2_BAR_TGT_of(_x) (((_x) >> 28) & 0xf) +#define NFP_ARM_GCSR_EXPL2_BAR_ACT(_x) (((_x) & 0x1f) << 23) +#define NFP_ARM_GCSR_EXPL2_BAR_ACT_of(_x) (((_x) >> 23) & 0x1f) +#define NFP_ARM_GCSR_EXPL2_BAR_LEN(_x) (((_x) & 0x1f) << 18) +#define NFP_ARM_GCSR_EXPL2_BAR_LEN_of(_x) (((_x) >> 18) & 0x1f) +#define NFP_ARM_GCSR_EXPL2_BAR_BYTE_MASK(_x) (((_x) & 0xff) << 10) +#define NFP_ARM_GCSR_EXPL2_BAR_BYTE_MASK_of(_x) (((_x) >> 10) & 0xff) +#define NFP_ARM_GCSR_EXPL2_BAR_TOK(_x) (((_x) & 0x3) << 8) +#define NFP_ARM_GCSR_EXPL2_BAR_TOK_of(_x) (((_x) >> 8) & 0x3) +#define NFP_ARM_GCSR_EXPL2_BAR_SIGNAL_MASTER(_x) ((_x) & 0xff) +#define NFP_ARM_GCSR_EXPL2_BAR_SIGNAL_MASTER_of(_x) ((_x) & 0xff) +/* Register Type: PostedCommandSignal */ +#define NFP_ARM_GCSR_EXPL_POST(_bar) (0xc0 + (0x4 * ((_bar) & 0x7))) +#define NFP_ARM_GCSR_EXPL_POST_SIG_B(_x) (((_x) & 0x7f) << 25) +#define NFP_ARM_GCSR_EXPL_POST_SIG_B_of(_x) (((_x) >> 25) & 0x7f) +#define NFP_ARM_GCSR_EXPL_POST_SIG_B_BUS (0x1 << 24) +#define NFP_ARM_GCSR_EXPL_POST_SIG_B_BUS_PULL (0x0) +#define NFP_ARM_GCSR_EXPL_POST_SIG_B_BUS_PUSH (0x1000000) +#define NFP_ARM_GCSR_EXPL_POST_SIG_A(_x) (((_x) & 0x7f) << 17) +#define NFP_ARM_GCSR_EXPL_POST_SIG_A_of(_x) (((_x) >> 17) & 0x7f) +#define NFP_ARM_GCSR_EXPL_POST_SIG_A_BUS (0x1 << 16) +#define NFP_ARM_GCSR_EXPL_POST_SIG_A_BUS_PULL (0x0) +#define NFP_ARM_GCSR_EXPL_POST_SIG_A_BUS_PUSH (0x10000) +#define NFP_ARM_GCSR_EXPL_POST_SIG_B_RCVD (0x1 << 7) +#define NFP_ARM_GCSR_EXPL_POST_SIG_B_VALID (0x1 << 6) +#define NFP_ARM_GCSR_EXPL_POST_SIG_A_RCVD (0x1 << 5) +#define NFP_ARM_GCSR_EXPL_POST_SIG_A_VALID (0x1 << 4) +#define NFP_ARM_GCSR_EXPL_POST_CMD_COMPLETE (0x1) +/* Register Type: MPCoreBaseAddress */ +#define NFP_ARM_GCSR_MPCORE_BASE 0x00e0 +#define NFP_ARM_GCSR_MPCORE_BASE_ADDR(_x) (((_x) & 0x7ffff) << 13) +#define NFP_ARM_GCSR_MPCORE_BASE_ADDR_of(_x) (((_x) >> 13) & 0x7ffff) +/* Register Type: PL310BaseAddress */ +#define NFP_ARM_GCSR_PL310_BASE 0x00e4 +#define NFP_ARM_GCSR_PL310_BASE_ADDR(_x) (((_x) & 0xfffff) << 12) +#define NFP_ARM_GCSR_PL310_BASE_ADDR_of(_x) (((_x) >> 12) & 0xfffff) +/* Register Type: MPCoreConfig */ +#define NFP_ARM_GCSR_MP0_CFG 0x00e8 +#define NFP_ARM_GCSR_MP0_CFG_SPI_BOOT (0x1 << 14) +#define NFP_ARM_GCSR_MP0_CFG_ENDIAN(_x) (((_x) & 0x3) << 12) +#define NFP_ARM_GCSR_MP0_CFG_ENDIAN_of(_x) (((_x) >> 12) & 0x3) +#define NFP_ARM_GCSR_MP0_CFG_ENDIAN_LITTLE (0) +#define NFP_ARM_GCSR_MP0_CFG_ENDIAN_BIG (1) +#define NFP_ARM_GCSR_MP0_CFG_RESET_VECTOR (0x1 << 8) +#define NFP_ARM_GCSR_MP0_CFG_RESET_VECTOR_LO (0x0) +#define NFP_ARM_GCSR_MP0_CFG_RESET_VECTOR_HI (0x100) +#define NFP_ARM_GCSR_MP0_CFG_OUTCLK_EN(_x) (((_x) & 0xf) << 4) +#define NFP_ARM_GCSR_MP0_CFG_OUTCLK_EN_of(_x) (((_x) >> 4) & 0xf) +#define NFP_ARM_GCSR_MP0_CFG_ARMID(_x) ((_x) & 0xf) +#define NFP_ARM_GCSR_MP0_CFG_ARMID_of(_x) ((_x) & 0xf) +/* Register Type: MPCoreIDCacheDataError */ +#define NFP_ARM_GCSR_MP0_CACHE_ERR 0x00ec +#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_D7 (0x1 << 15) +#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_D6 (0x1 << 14) +#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_D5 (0x1 << 13) +#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_D4 (0x1 << 12) +#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_D3 (0x1 << 11) +#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_D2 (0x1 << 10) +#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_D1 (0x1 << 9) +#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_D0 (0x1 << 8) +#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_I7 (0x1 << 7) +#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_I6 (0x1 << 6) +#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_I5 (0x1 << 5) +#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_I4 (0x1 << 4) +#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_I3 (0x1 << 3) +#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_I2 (0x1 << 2) +#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_I1 (0x1 << 1) +#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_I0 (0x1) +/* Register Type: ARMDFT */ +#define NFP_ARM_GCSR_DFT 0x0100 +#define NFP_ARM_GCSR_DFT_DBG_REQ (0x1 << 20) +#define NFP_ARM_GCSR_DFT_DBG_EN (0x1 << 19) +#define NFP_ARM_GCSR_DFT_WFE_EVT_TRG (0x1 << 18) +#define NFP_ARM_GCSR_DFT_ETM_WFI_RDY (0x1 << 17) +#define NFP_ARM_GCSR_DFT_ETM_PWR_ON (0x1 << 16) +#define NFP_ARM_GCSR_DFT_BIST_FAIL_of(_x) (((_x) >> 8) & 0xf) +#define NFP_ARM_GCSR_DFT_BIST_DONE_of(_x) (((_x) >> 4) & 0xf) +#define NFP_ARM_GCSR_DFT_BIST_RUN(_x) ((_x) & 0x7) +#define NFP_ARM_GCSR_DFT_BIST_RUN_of(_x) ((_x) & 0x7) + +/* Gasket CSRs */ +/* NOTE: These cannot be remapped, and are always at this location. + */ +#define NFP_ARM_GCSR_START (0xd6000000 + NFP_ARM_GCSR) +#define NFP_ARM_GCSR_SIZE SZ_64K + +/* BAR CSRs + */ +#define NFP_ARM_GCSR_BULK_BITS 11 +#define NFP_ARM_GCSR_EXPA_BITS 15 +#define NFP_ARM_GCSR_EXPL_BITS 18 + +#define NFP_ARM_GCSR_BULK_SHIFT (40 - 11) +#define NFP_ARM_GCSR_EXPA_SHIFT (40 - 15) +#define NFP_ARM_GCSR_EXPL_SHIFT (40 - 18) + +#define NFP_ARM_GCSR_BULK_SIZE (1 << NFP_ARM_GCSR_BULK_SHIFT) +#define NFP_ARM_GCSR_EXPA_SIZE (1 << NFP_ARM_GCSR_EXPA_SHIFT) +#define NFP_ARM_GCSR_EXPL_SIZE (1 << NFP_ARM_GCSR_EXPL_SHIFT) + +#define NFP_ARM_GCSR_EXPL2_CSR(target, action, length, \ + byte_mask, token, signal_master) \ + (NFP_ARM_GCSR_EXPL2_BAR_TGT(target) | \ + NFP_ARM_GCSR_EXPL2_BAR_ACT(action) | \ + NFP_ARM_GCSR_EXPL2_BAR_LEN(length) | \ + NFP_ARM_GCSR_EXPL2_BAR_BYTE_MASK(byte_mask) | \ + NFP_ARM_GCSR_EXPL2_BAR_TOK(token) | \ + NFP_ARM_GCSR_EXPL2_BAR_SIGNAL_MASTER(signal_master)) +#define NFP_ARM_GCSR_EXPL1_CSR(posted, signal_ref, data_master, data_ref) \ + (((posted) ? NFP_ARM_GCSR_EXPL1_BAR_POSTED : 0) | \ + NFP_ARM_GCSR_EXPL1_BAR_SIGNAL_REF(signal_ref) | \ + NFP_ARM_GCSR_EXPL1_BAR_DATA_MASTER(data_master) | \ + NFP_ARM_GCSR_EXPL1_BAR_DATA_REF(data_ref)) +#define NFP_ARM_GCSR_EXPL0_CSR(address) \ + NFP_ARM_GCSR_EXPL0_BAR_ADDR((address) >> NFP_ARM_GCSR_EXPL_SHIFT) +#define NFP_ARM_GCSR_EXPL_POST_EXPECT_A(sig_ref, is_push, is_required) \ + (NFP_ARM_GCSR_EXPL_POST_SIG_A(sig_ref) | \ + ((is_push) ? NFP_ARM_GCSR_EXPL_POST_SIG_A_BUS_PUSH : \ + NFP_ARM_GCSR_EXPL_POST_SIG_A_BUS_PULL) | \ + ((is_required) ? NFP_ARM_GCSR_EXPL_POST_SIG_A_VALID : 0)) +#define NFP_ARM_GCSR_EXPL_POST_EXPECT_B(sig_ref, is_push, is_required) \ + (NFP_ARM_GCSR_EXPL_POST_SIG_B(sig_ref) | \ + ((is_push) ? NFP_ARM_GCSR_EXPL_POST_SIG_B_BUS_PUSH : \ + NFP_ARM_GCSR_EXPL_POST_SIG_B_BUS_PULL) | \ + ((is_required) ? NFP_ARM_GCSR_EXPL_POST_SIG_B_VALID : 0)) + +#define NFP_ARM_GCSR_EXPA_CSR(mode, target, token, is_64, action, address) \ + (((mode) ? NFP_ARM_GCSR_EXPA_BAR_TYPE_EXPL : \ + NFP_ARM_GCSR_EXPA_BAR_TYPE_EXPA) | \ + NFP_ARM_GCSR_EXPA_BAR_TGT(target) | \ + NFP_ARM_GCSR_EXPA_BAR_TOK(token) | \ + ((is_64) ? NFP_ARM_GCSR_EXPA_BAR_LEN_64BIT : \ + NFP_ARM_GCSR_EXPA_BAR_LEN_32BIT) | \ + NFP_ARM_GCSR_EXPA_BAR_ACT(action) | \ + NFP_ARM_GCSR_EXPA_BAR_ADDR((address) >> NFP_ARM_GCSR_EXPA_SHIFT)) + +#define NFP_ARM_GCSR_BULK_CSR(mode, target, token, is_64, address) \ + (((mode) ? NFP_ARM_GCSR_BULK_BAR_TYPE_EXPA : \ + NFP_ARM_GCSR_BULK_BAR_TYPE_BULK) | \ + NFP_ARM_GCSR_BULK_BAR_TGT(target) | \ + NFP_ARM_GCSR_BULK_BAR_TOK(token) | \ + ((is_64) ? NFP_ARM_GCSR_BULK_BAR_LEN_64BIT : \ + NFP_ARM_GCSR_BULK_BAR_LEN_32BIT) | \ + NFP_ARM_GCSR_BULK_BAR_ADDR((address) >> NFP_ARM_GCSR_BULK_SHIFT)) + + /* MP Core CSRs */ +#define NFP_ARM_MPCORE_SIZE SZ_128K + + /* PL320 CSRs */ +#define NFP_ARM_PCSR_SIZE SZ_64K + +#endif /* NFP_ARM_H */ diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h new file mode 100644 index 000000000000..edecc0a27485 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h @@ -0,0 +1,433 @@ +/* + * Copyright (C) 2015-2017 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* + * nfp_cpp.h + * Interface for low-level NFP CPP access. + * Authors: Jason McMullan <jason.mcmullan@netronome.com> + * Rolf Neugebauer <rolf.neugebauer@netronome.com> + */ +#ifndef __NFP_CPP_H__ +#define __NFP_CPP_H__ + +#include <linux/ctype.h> +#include <linux/types.h> + +#ifndef NFP_SUBSYS +#define NFP_SUBSYS "nfp" +#endif + +#define nfp_err(cpp, fmt, args...) \ + dev_err(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args) +#define nfp_warn(cpp, fmt, args...) \ + dev_warn(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args) +#define nfp_info(cpp, fmt, args...) \ + dev_info(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args) +#define nfp_dbg(cpp, fmt, args...) \ + dev_dbg(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args) + +#define PCI_64BIT_BAR_COUNT 3 + +#define NFP_CPP_NUM_TARGETS 16 + +struct device; + +struct nfp_cpp_area; +struct nfp_cpp; +struct resource; + +/* Wildcard indicating a CPP read or write action + * + * The action used will be either read or write depending on whether a + * read or write instruction/call is performed on the NFP_CPP_ID. It + * is recomended that the RW action is used even if all actions to be + * performed on a NFP_CPP_ID are known to be only reads or writes. + * Doing so will in many cases save NFP CPP internal software + * resources. + */ +#define NFP_CPP_ACTION_RW 32 + +#define NFP_CPP_TARGET_ID_MASK 0x1f + +/** + * NFP_CPP_ID() - pack target, token, and action into a CPP ID. + * @target: NFP CPP target id + * @action: NFP CPP action id + * @token: NFP CPP token id + * + * Create a 32-bit CPP identifier representing the access to be made. + * These identifiers are used as parameters to other NFP CPP + * functions. Some CPP devices may allow wildcard identifiers to be + * specified. + * + * Return: NFP CPP ID + */ +#define NFP_CPP_ID(target, action, token) \ + ((((target) & 0x7f) << 24) | (((token) & 0xff) << 16) | \ + (((action) & 0xff) << 8)) + +/** + * NFP_CPP_ISLAND_ID() - pack target, token, action, and island into a CPP ID. + * @target: NFP CPP target id + * @action: NFP CPP action id + * @token: NFP CPP token id + * @island: NFP CPP island id + * + * Create a 32-bit CPP identifier representing the access to be made. + * These identifiers are used as parameters to other NFP CPP + * functions. Some CPP devices may allow wildcard identifiers to be + * specified. + * + * Return: NFP CPP ID + */ +#define NFP_CPP_ISLAND_ID(target, action, token, island) \ + ((((target) & 0x7f) << 24) | (((token) & 0xff) << 16) | \ + (((action) & 0xff) << 8) | (((island) & 0xff) << 0)) + +/** + * NFP_CPP_ID_TARGET_of() - Return the NFP CPP target of a NFP CPP ID + * @id: NFP CPP ID + * + * Return: NFP CPP target + */ +static inline u8 NFP_CPP_ID_TARGET_of(u32 id) +{ + return (id >> 24) & NFP_CPP_TARGET_ID_MASK; +} + +/** + * NFP_CPP_ID_TOKEN_of() - Return the NFP CPP token of a NFP CPP ID + * @id: NFP CPP ID + * Return: NFP CPP token + */ +static inline u8 NFP_CPP_ID_TOKEN_of(u32 id) +{ + return (id >> 16) & 0xff; +} + +/** + * NFP_CPP_ID_ACTION_of() - Return the NFP CPP action of a NFP CPP ID + * @id: NFP CPP ID + * + * Return: NFP CPP action + */ +static inline u8 NFP_CPP_ID_ACTION_of(u32 id) +{ + return (id >> 8) & 0xff; +} + +/** + * NFP_CPP_ID_ISLAND_of() - Return the NFP CPP island of a NFP CPP ID + * @id: NFP CPP ID + * + * Return: NFP CPP island + */ +static inline u8 NFP_CPP_ID_ISLAND_of(u32 id) +{ + return (id >> 0) & 0xff; +} + +/* NFP Interface types - logical interface for this CPP connection + * 4 bits are reserved for interface type. + */ +#define NFP_CPP_INTERFACE_TYPE_INVALID 0x0 +#define NFP_CPP_INTERFACE_TYPE_PCI 0x1 +#define NFP_CPP_INTERFACE_TYPE_ARM 0x2 +#define NFP_CPP_INTERFACE_TYPE_RPC 0x3 +#define NFP_CPP_INTERFACE_TYPE_ILA 0x4 + +/** + * NFP_CPP_INTERFACE() - Construct a 16-bit NFP Interface ID + * @type: NFP Interface Type + * @unit: Unit identifier for the interface type + * @channel: Channel identifier for the interface unit + * + * Interface IDs consists of 4 bits of interface type, + * 4 bits of unit identifier, and 8 bits of channel identifier. + * + * The NFP Interface ID is used in the implementation of + * NFP CPP API mutexes, which use the MU Atomic CompareAndWrite + * operation - hence the limit to 16 bits to be able to + * use the NFP Interface ID as a lock owner. + * + * Return: Interface ID + */ +#define NFP_CPP_INTERFACE(type, unit, channel) \ + ((((type) & 0xf) << 12) | \ + (((unit) & 0xf) << 8) | \ + (((channel) & 0xff) << 0)) + +/** + * NFP_CPP_INTERFACE_TYPE_of() - Get the interface type + * @interface: NFP Interface ID + * Return: NFP Interface ID's type + */ +#define NFP_CPP_INTERFACE_TYPE_of(interface) (((interface) >> 12) & 0xf) + +/** + * NFP_CPP_INTERFACE_UNIT_of() - Get the interface unit + * @interface: NFP Interface ID + * Return: NFP Interface ID's unit + */ +#define NFP_CPP_INTERFACE_UNIT_of(interface) (((interface) >> 8) & 0xf) + +/** + * NFP_CPP_INTERFACE_CHANNEL_of() - Get the interface channel + * @interface: NFP Interface ID + * Return: NFP Interface ID's channel + */ +#define NFP_CPP_INTERFACE_CHANNEL_of(interface) (((interface) >> 0) & 0xff) + +/* Implemented in nfp_cppcore.c */ +void nfp_cpp_free(struct nfp_cpp *cpp); +u32 nfp_cpp_model(struct nfp_cpp *cpp); +u16 nfp_cpp_interface(struct nfp_cpp *cpp); +int nfp_cpp_serial(struct nfp_cpp *cpp, const u8 **serial); + +void *nfp_hwinfo_cache(struct nfp_cpp *cpp); +void nfp_hwinfo_cache_set(struct nfp_cpp *cpp, void *val); +void *nfp_rtsym_cache(struct nfp_cpp *cpp); +void nfp_rtsym_cache_set(struct nfp_cpp *cpp, void *val); + +void nfp_nffw_cache_flush(struct nfp_cpp *cpp); + +struct nfp_cpp_area *nfp_cpp_area_alloc_with_name(struct nfp_cpp *cpp, + u32 cpp_id, + const char *name, + unsigned long long address, + unsigned long size); +struct nfp_cpp_area *nfp_cpp_area_alloc(struct nfp_cpp *cpp, u32 cpp_id, + unsigned long long address, + unsigned long size); +void nfp_cpp_area_free(struct nfp_cpp_area *area); +int nfp_cpp_area_acquire(struct nfp_cpp_area *area); +int nfp_cpp_area_acquire_nonblocking(struct nfp_cpp_area *area); +void nfp_cpp_area_release(struct nfp_cpp_area *area); +void nfp_cpp_area_release_free(struct nfp_cpp_area *area); +int nfp_cpp_area_read(struct nfp_cpp_area *area, unsigned long offset, + void *buffer, size_t length); +int nfp_cpp_area_write(struct nfp_cpp_area *area, unsigned long offset, + const void *buffer, size_t length); +int nfp_cpp_area_check_range(struct nfp_cpp_area *area, + unsigned long long offset, unsigned long size); +const char *nfp_cpp_area_name(struct nfp_cpp_area *cpp_area); +void *nfp_cpp_area_priv(struct nfp_cpp_area *cpp_area); +struct nfp_cpp *nfp_cpp_area_cpp(struct nfp_cpp_area *cpp_area); +struct resource *nfp_cpp_area_resource(struct nfp_cpp_area *area); +phys_addr_t nfp_cpp_area_phys(struct nfp_cpp_area *area); +void __iomem *nfp_cpp_area_iomem(struct nfp_cpp_area *area); + +int nfp_cpp_area_readl(struct nfp_cpp_area *area, unsigned long offset, + u32 *value); +int nfp_cpp_area_writel(struct nfp_cpp_area *area, unsigned long offset, + u32 value); +int nfp_cpp_area_readq(struct nfp_cpp_area *area, unsigned long offset, + u64 *value); +int nfp_cpp_area_writeq(struct nfp_cpp_area *area, unsigned long offset, + u64 value); +int nfp_cpp_area_fill(struct nfp_cpp_area *area, unsigned long offset, + u32 value, size_t length); + +int nfp_xpb_readl(struct nfp_cpp *cpp, u32 xpb_tgt, u32 *value); +int nfp_xpb_writel(struct nfp_cpp *cpp, u32 xpb_tgt, u32 value); +int nfp_xpb_writelm(struct nfp_cpp *cpp, u32 xpb_tgt, u32 mask, u32 value); + +/* Implemented in nfp_cpplib.c */ +int nfp_cpp_read(struct nfp_cpp *cpp, u32 cpp_id, + unsigned long long address, void *kernel_vaddr, size_t length); +int nfp_cpp_write(struct nfp_cpp *cpp, u32 cpp_id, + unsigned long long address, const void *kernel_vaddr, + size_t length); +int nfp_cpp_readl(struct nfp_cpp *cpp, u32 cpp_id, + unsigned long long address, u32 *value); +int nfp_cpp_writel(struct nfp_cpp *cpp, u32 cpp_id, + unsigned long long address, u32 value); +int nfp_cpp_readq(struct nfp_cpp *cpp, u32 cpp_id, + unsigned long long address, u64 *value); +int nfp_cpp_writeq(struct nfp_cpp *cpp, u32 cpp_id, + unsigned long long address, u64 value); + +struct nfp_cpp_mutex; + +int nfp_cpp_mutex_init(struct nfp_cpp *cpp, int target, + unsigned long long address, u32 key_id); +struct nfp_cpp_mutex *nfp_cpp_mutex_alloc(struct nfp_cpp *cpp, int target, + unsigned long long address, + u32 key_id); +void nfp_cpp_mutex_free(struct nfp_cpp_mutex *mutex); +int nfp_cpp_mutex_lock(struct nfp_cpp_mutex *mutex); +int nfp_cpp_mutex_unlock(struct nfp_cpp_mutex *mutex); +int nfp_cpp_mutex_trylock(struct nfp_cpp_mutex *mutex); + +struct nfp_cpp_explicit; + +struct nfp_cpp_explicit_command { + u32 cpp_id; + u16 data_ref; + u8 data_master; + u8 len; + u8 byte_mask; + u8 signal_master; + u8 signal_ref; + u8 posted; + u8 siga; + u8 sigb; + s8 siga_mode; + s8 sigb_mode; +}; + +#define NFP_SERIAL_LEN 6 + +/** + * struct nfp_cpp_operations - NFP CPP operations structure + * @area_priv_size: Size of the nfp_cpp_area private data + * @owner: Owner module + * @init: Initialize the NFP CPP bus + * @free: Free the bus + * @read_serial: Read serial number to memory provided + * @get_interface: Return CPP interface + * @area_init: Initialize a new NFP CPP area (not serialized) + * @area_cleanup: Clean up a NFP CPP area (not serialized) + * @area_acquire: Acquire the NFP CPP area (serialized) + * @area_release: Release area (serialized) + * @area_resource: Get resource range of area (not serialized) + * @area_phys: Get physical address of area (not serialized) + * @area_iomem: Get iomem of area (not serialized) + * @area_read: Perform a read from a NFP CPP area (serialized) + * @area_write: Perform a write to a NFP CPP area (serialized) + * @explicit_priv_size: Size of an explicit's private area + * @explicit_acquire: Acquire an explicit area + * @explicit_release: Release an explicit area + * @explicit_put: Write data to send + * @explicit_get: Read data received + * @explicit_do: Perform the transaction + */ +struct nfp_cpp_operations { + size_t area_priv_size; + struct module *owner; + + int (*init)(struct nfp_cpp *cpp); + void (*free)(struct nfp_cpp *cpp); + + void (*read_serial)(struct device *dev, u8 *serial); + u16 (*get_interface)(struct device *dev); + + int (*area_init)(struct nfp_cpp_area *area, + u32 dest, unsigned long long address, + unsigned long size); + void (*area_cleanup)(struct nfp_cpp_area *area); + int (*area_acquire)(struct nfp_cpp_area *area); + void (*area_release)(struct nfp_cpp_area *area); + struct resource *(*area_resource)(struct nfp_cpp_area *area); + phys_addr_t (*area_phys)(struct nfp_cpp_area *area); + void __iomem *(*area_iomem)(struct nfp_cpp_area *area); + int (*area_read)(struct nfp_cpp_area *area, void *kernel_vaddr, + unsigned long offset, unsigned int length); + int (*area_write)(struct nfp_cpp_area *area, const void *kernel_vaddr, + unsigned long offset, unsigned int length); + + size_t explicit_priv_size; + int (*explicit_acquire)(struct nfp_cpp_explicit *expl); + void (*explicit_release)(struct nfp_cpp_explicit *expl); + int (*explicit_put)(struct nfp_cpp_explicit *expl, + const void *buff, size_t len); + int (*explicit_get)(struct nfp_cpp_explicit *expl, + void *buff, size_t len); + int (*explicit_do)(struct nfp_cpp_explicit *expl, + const struct nfp_cpp_explicit_command *cmd, + u64 address); +}; + +struct nfp_cpp * +nfp_cpp_from_operations(const struct nfp_cpp_operations *ops, + struct device *parent, void *priv); +void *nfp_cpp_priv(struct nfp_cpp *priv); + +int nfp_cpp_area_cache_add(struct nfp_cpp *cpp, size_t size); + +/* The following section contains extensions to the + * NFP CPP API, to be used in a Linux kernel-space context. + */ + +/* Use this channel ID for multiple virtual channel interfaces + * (ie ARM and PCIe) when setting up the interface field. + */ +#define NFP_CPP_INTERFACE_CHANNEL_PEROPENER 255 +struct device *nfp_cpp_device(struct nfp_cpp *cpp); + +/* Return code masks for nfp_cpp_explicit_do() + */ +#define NFP_SIGNAL_MASK_A BIT(0) /* Signal A fired */ +#define NFP_SIGNAL_MASK_B BIT(1) /* Signal B fired */ + +enum nfp_cpp_explicit_signal_mode { + NFP_SIGNAL_NONE = 0, + NFP_SIGNAL_PUSH = 1, + NFP_SIGNAL_PUSH_OPTIONAL = -1, + NFP_SIGNAL_PULL = 2, + NFP_SIGNAL_PULL_OPTIONAL = -2, +}; + +struct nfp_cpp_explicit *nfp_cpp_explicit_acquire(struct nfp_cpp *cpp); +int nfp_cpp_explicit_set_target(struct nfp_cpp_explicit *expl, u32 cpp_id, + u8 len, u8 mask); +int nfp_cpp_explicit_set_data(struct nfp_cpp_explicit *expl, + u8 data_master, u16 data_ref); +int nfp_cpp_explicit_set_signal(struct nfp_cpp_explicit *expl, + u8 signal_master, u8 signal_ref); +int nfp_cpp_explicit_set_posted(struct nfp_cpp_explicit *expl, int posted, + u8 siga, + enum nfp_cpp_explicit_signal_mode siga_mode, + u8 sigb, + enum nfp_cpp_explicit_signal_mode sigb_mode); +int nfp_cpp_explicit_put(struct nfp_cpp_explicit *expl, + const void *buff, size_t len); +int nfp_cpp_explicit_do(struct nfp_cpp_explicit *expl, u64 address); +int nfp_cpp_explicit_get(struct nfp_cpp_explicit *expl, void *buff, size_t len); +void nfp_cpp_explicit_release(struct nfp_cpp_explicit *expl); +struct nfp_cpp *nfp_cpp_explicit_cpp(struct nfp_cpp_explicit *expl); +void *nfp_cpp_explicit_priv(struct nfp_cpp_explicit *cpp_explicit); + +/* Implemented in nfp_cpplib.c */ + +int nfp_cpp_model_autodetect(struct nfp_cpp *cpp, u32 *model); + +int nfp_cpp_explicit_read(struct nfp_cpp *cpp, u32 cpp_id, + u64 addr, void *buff, size_t len, + int width_read); + +int nfp_cpp_explicit_write(struct nfp_cpp *cpp, u32 cpp_id, + u64 addr, const void *buff, size_t len, + int width_write); + +#endif /* !__NFP_CPP_H__ */ diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c new file mode 100644 index 000000000000..40108e66c654 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c @@ -0,0 +1,1746 @@ +/* + * Copyright (C) 2015-2017 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* + * nfp_cppcore.c + * Provides low-level access to the NFP's internal CPP bus + * Authors: Jakub Kicinski <jakub.kicinski@netronome.com> + * Jason McMullan <jason.mcmullan@netronome.com> + * Rolf Neugebauer <rolf.neugebauer@netronome.com> + */ + +#include <asm/unaligned.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/ioport.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/wait.h> + +#include "nfp_arm.h" +#include "nfp_cpp.h" +#include "nfp6000/nfp6000.h" + +#define NFP_ARM_GCSR_SOFTMODEL2 0x0000014c +#define NFP_ARM_GCSR_SOFTMODEL3 0x00000150 + +struct nfp_cpp_resource { + struct list_head list; + const char *name; + u32 cpp_id; + u64 start; + u64 end; +}; + +struct nfp_cpp_mutex { + struct list_head list; + struct nfp_cpp *cpp; + int target; + u16 usage; + u16 depth; + unsigned long long address; + u32 key; +}; + +struct nfp_cpp { + struct device dev; + + void *priv; /* Private data of the low-level implementation */ + + u32 model; + u16 interface; + u8 serial[NFP_SERIAL_LEN]; + + const struct nfp_cpp_operations *op; + struct list_head resource_list; /* NFP CPP resource list */ + struct list_head mutex_cache; /* Mutex cache */ + rwlock_t resource_lock; + wait_queue_head_t waitq; + + /* NFP6000 CPP Mapping Table */ + u32 imb_cat_table[16]; + + /* Cached areas for cpp/xpb readl/writel speedups */ + struct mutex area_cache_mutex; /* Lock for the area cache */ + struct list_head area_cache_list; + + /* Cached information */ + void *hwinfo; + void *rtsym; +}; + +/* Element of the area_cache_list */ +struct nfp_cpp_area_cache { + struct list_head entry; + u32 id; + u64 addr; + u32 size; + struct nfp_cpp_area *area; +}; + +struct nfp_cpp_area { + struct nfp_cpp *cpp; + struct kref kref; + atomic_t refcount; + struct mutex mutex; /* Lock for the area's refcount */ + unsigned long long offset; + unsigned long size; + struct nfp_cpp_resource resource; + void __iomem *iomem; + /* Here follows the 'priv' part of nfp_cpp_area. */ +}; + +struct nfp_cpp_explicit { + struct nfp_cpp *cpp; + struct nfp_cpp_explicit_command cmd; + /* Here follows the 'priv' part of nfp_cpp_area. */ +}; + +static void __resource_add(struct list_head *head, struct nfp_cpp_resource *res) +{ + struct nfp_cpp_resource *tmp; + struct list_head *pos; + + list_for_each(pos, head) { + tmp = container_of(pos, struct nfp_cpp_resource, list); + + if (tmp->cpp_id > res->cpp_id) + break; + + if (tmp->cpp_id == res->cpp_id && tmp->start > res->start) + break; + } + + list_add_tail(&res->list, pos); +} + +static void __resource_del(struct nfp_cpp_resource *res) +{ + list_del_init(&res->list); +} + +static void __release_cpp_area(struct kref *kref) +{ + struct nfp_cpp_area *area = + container_of(kref, struct nfp_cpp_area, kref); + struct nfp_cpp *cpp = nfp_cpp_area_cpp(area); + + if (area->cpp->op->area_cleanup) + area->cpp->op->area_cleanup(area); + + write_lock(&cpp->resource_lock); + __resource_del(&area->resource); + write_unlock(&cpp->resource_lock); + kfree(area); +} + +static void nfp_cpp_area_put(struct nfp_cpp_area *area) +{ + kref_put(&area->kref, __release_cpp_area); +} + +static struct nfp_cpp_area *nfp_cpp_area_get(struct nfp_cpp_area *area) +{ + kref_get(&area->kref); + + return area; +} + +/** + * nfp_cpp_free() - free the CPP handle + * @cpp: CPP handle + */ +void nfp_cpp_free(struct nfp_cpp *cpp) +{ + struct nfp_cpp_area_cache *cache, *ctmp; + struct nfp_cpp_resource *res, *rtmp; + struct nfp_cpp_mutex *mutex, *mtmp; + + /* There should be no mutexes in the cache at this point. */ + WARN_ON(!list_empty(&cpp->mutex_cache)); + /* .. but if there are, unlock them and complain. */ + list_for_each_entry_safe(mutex, mtmp, &cpp->mutex_cache, list) { + dev_err(cpp->dev.parent, "Dangling mutex: @%d::0x%llx, %d locks held by %d owners\n", + mutex->target, (unsigned long long)mutex->address, + mutex->depth, mutex->usage); + + /* Forcing an unlock */ + mutex->depth = 1; + nfp_cpp_mutex_unlock(mutex); + + /* Forcing a free */ + mutex->usage = 1; + nfp_cpp_mutex_free(mutex); + } + + /* Remove all caches */ + list_for_each_entry_safe(cache, ctmp, &cpp->area_cache_list, entry) { + list_del(&cache->entry); + if (cache->id) + nfp_cpp_area_release(cache->area); + nfp_cpp_area_free(cache->area); + kfree(cache); + } + + /* There should be no dangling areas at this point */ + WARN_ON(!list_empty(&cpp->resource_list)); + + /* .. but if they weren't, try to clean up. */ + list_for_each_entry_safe(res, rtmp, &cpp->resource_list, list) { + struct nfp_cpp_area *area = container_of(res, + struct nfp_cpp_area, + resource); + + dev_err(cpp->dev.parent, "Dangling area: %d:%d:%d:0x%0llx-0x%0llx%s%s\n", + NFP_CPP_ID_TARGET_of(res->cpp_id), + NFP_CPP_ID_ACTION_of(res->cpp_id), + NFP_CPP_ID_TOKEN_of(res->cpp_id), + res->start, res->end, + res->name ? " " : "", + res->name ? res->name : ""); + + if (area->cpp->op->area_release) + area->cpp->op->area_release(area); + + __release_cpp_area(&area->kref); + } + + if (cpp->op->free) + cpp->op->free(cpp); + + kfree(cpp->hwinfo); + kfree(cpp->rtsym); + + device_unregister(&cpp->dev); + + kfree(cpp); +} + +/** + * nfp_cpp_model() - Retrieve the Model ID of the NFP + * @cpp: NFP CPP handle + * + * Return: NFP CPP Model ID + */ +u32 nfp_cpp_model(struct nfp_cpp *cpp) +{ + return cpp->model; +} + +/** + * nfp_cpp_interface() - Retrieve the Interface ID of the NFP + * @cpp: NFP CPP handle + * + * Return: NFP CPP Interface ID + */ +u16 nfp_cpp_interface(struct nfp_cpp *cpp) +{ + return cpp->interface; +} + +/** + * nfp_cpp_serial() - Retrieve the Serial ID of the NFP + * @cpp: NFP CPP handle + * @serial: Pointer to NFP serial number + * + * Return: Length of NFP serial number + */ +int nfp_cpp_serial(struct nfp_cpp *cpp, const u8 **serial) +{ + *serial = &cpp->serial[0]; + return sizeof(cpp->serial); +} + +void *nfp_hwinfo_cache(struct nfp_cpp *cpp) +{ + return cpp->hwinfo; +} + +void nfp_hwinfo_cache_set(struct nfp_cpp *cpp, void *val) +{ + cpp->hwinfo = val; +} + +void *nfp_rtsym_cache(struct nfp_cpp *cpp) +{ + return cpp->rtsym; +} + +void nfp_rtsym_cache_set(struct nfp_cpp *cpp, void *val) +{ + cpp->rtsym = val; +} + +/** + * nfp_nffw_cache_flush() - Flush cached firmware information + * @cpp: NFP CPP handle + * + * Flush cached firmware information. This function should be called + * every time firmware is loaded on unloaded. + */ +void nfp_nffw_cache_flush(struct nfp_cpp *cpp) +{ + kfree(nfp_rtsym_cache(cpp)); + nfp_rtsym_cache_set(cpp, NULL); +} + +/** + * nfp_cpp_area_alloc_with_name() - allocate a new CPP area + * @cpp: CPP device handle + * @dest: NFP CPP ID + * @name: Name of region + * @address: Address of region + * @size: Size of region + * + * Allocate and initialize a CPP area structure. The area must later + * be locked down with an 'acquire' before it can be safely accessed. + * + * NOTE: @address and @size must be 32-bit aligned values. + * + * Return: NFP CPP area handle, or NULL + */ +struct nfp_cpp_area * +nfp_cpp_area_alloc_with_name(struct nfp_cpp *cpp, u32 dest, const char *name, + unsigned long long address, unsigned long size) +{ + struct nfp_cpp_area *area; + u64 tmp64 = address; + int err, name_len; + + /* Remap from cpp_island to cpp_target */ + err = nfp_target_cpp(dest, tmp64, &dest, &tmp64, cpp->imb_cat_table); + if (err < 0) + return NULL; + + address = tmp64; + + if (!name) + name = "(reserved)"; + + name_len = strlen(name) + 1; + area = kzalloc(sizeof(*area) + cpp->op->area_priv_size + name_len, + GFP_KERNEL); + if (!area) + return NULL; + + area->cpp = cpp; + area->resource.name = (void *)area + sizeof(*area) + + cpp->op->area_priv_size; + memcpy((char *)area->resource.name, name, name_len); + + area->resource.cpp_id = dest; + area->resource.start = address; + area->resource.end = area->resource.start + size - 1; + INIT_LIST_HEAD(&area->resource.list); + + atomic_set(&area->refcount, 0); + kref_init(&area->kref); + mutex_init(&area->mutex); + + if (cpp->op->area_init) { + int err; + + err = cpp->op->area_init(area, dest, address, size); + if (err < 0) { + kfree(area); + return NULL; + } + } + + write_lock(&cpp->resource_lock); + __resource_add(&cpp->resource_list, &area->resource); + write_unlock(&cpp->resource_lock); + + area->offset = address; + area->size = size; + + return area; +} + +/** + * nfp_cpp_area_alloc() - allocate a new CPP area + * @cpp: CPP handle + * @dest: CPP id + * @address: Start address on CPP target + * @size: Size of area in bytes + * + * Allocate and initialize a CPP area structure. The area must later + * be locked down with an 'acquire' before it can be safely accessed. + * + * NOTE: @address and @size must be 32-bit aligned values. + * + * Return: NFP CPP Area handle, or NULL + */ +struct nfp_cpp_area * +nfp_cpp_area_alloc(struct nfp_cpp *cpp, u32 dest, + unsigned long long address, unsigned long size) +{ + return nfp_cpp_area_alloc_with_name(cpp, dest, NULL, address, size); +} + +/** + * nfp_cpp_area_free() - free up the CPP area + * @area: CPP area handle + * + * Frees up memory resources held by the CPP area. + */ +void nfp_cpp_area_free(struct nfp_cpp_area *area) +{ + nfp_cpp_area_put(area); +} + +/** + * nfp_cpp_area_acquire() - lock down a CPP area for access + * @area: CPP area handle + * + * Locks down the CPP area for a potential long term activity. Area + * must always be locked down before being accessed. + * + * Return: 0, or -ERRNO + */ +int nfp_cpp_area_acquire(struct nfp_cpp_area *area) +{ + mutex_lock(&area->mutex); + if (atomic_inc_return(&area->refcount) == 1) { + int (*a_a)(struct nfp_cpp_area *); + + a_a = area->cpp->op->area_acquire; + if (a_a) { + int err; + + wait_event_interruptible(area->cpp->waitq, + (err = a_a(area)) != -EAGAIN); + if (err < 0) { + atomic_dec(&area->refcount); + mutex_unlock(&area->mutex); + return err; + } + } + } + mutex_unlock(&area->mutex); + + nfp_cpp_area_get(area); + return 0; +} + +/** + * nfp_cpp_area_acquire_nonblocking() - lock down a CPP area for access + * @area: CPP area handle + * + * Locks down the CPP area for a potential long term activity. Area + * must always be locked down before being accessed. + * + * NOTE: Returns -EAGAIN is no area is available + * + * Return: 0, or -ERRNO + */ +int nfp_cpp_area_acquire_nonblocking(struct nfp_cpp_area *area) +{ + mutex_lock(&area->mutex); + if (atomic_inc_return(&area->refcount) == 1) { + if (area->cpp->op->area_acquire) { + int err; + + err = area->cpp->op->area_acquire(area); + if (err < 0) { + atomic_dec(&area->refcount); + mutex_unlock(&area->mutex); + return err; + } + } + } + mutex_unlock(&area->mutex); + + nfp_cpp_area_get(area); + return 0; +} + +/** + * nfp_cpp_area_release() - release a locked down CPP area + * @area: CPP area handle + * + * Releases a previously locked down CPP area. + */ +void nfp_cpp_area_release(struct nfp_cpp_area *area) +{ + mutex_lock(&area->mutex); + /* Only call the release on refcount == 0 */ + if (atomic_dec_and_test(&area->refcount)) { + if (area->cpp->op->area_release) { + area->cpp->op->area_release(area); + /* Let anyone waiting for a BAR try to get one.. */ + wake_up_interruptible_all(&area->cpp->waitq); + } + } + mutex_unlock(&area->mutex); + + nfp_cpp_area_put(area); +} + +/** + * nfp_cpp_area_release_free() - release CPP area and free it + * @area: CPP area handle + * + * Releases CPP area and frees up memory resources held by the it. + */ +void nfp_cpp_area_release_free(struct nfp_cpp_area *area) +{ + nfp_cpp_area_release(area); + nfp_cpp_area_free(area); +} + +/** + * nfp_cpp_area_read() - read data from CPP area + * @area: CPP area handle + * @offset: offset into CPP area + * @kernel_vaddr: kernel address to put data into + * @length: number of bytes to read + * + * Read data from indicated CPP region. + * + * NOTE: @offset and @length must be 32-bit aligned values. + * + * NOTE: Area must have been locked down with an 'acquire'. + * + * Return: length of io, or -ERRNO + */ +int nfp_cpp_area_read(struct nfp_cpp_area *area, + unsigned long offset, void *kernel_vaddr, + size_t length) +{ + return area->cpp->op->area_read(area, kernel_vaddr, offset, length); +} + +/** + * nfp_cpp_area_write() - write data to CPP area + * @area: CPP area handle + * @offset: offset into CPP area + * @kernel_vaddr: kernel address to read data from + * @length: number of bytes to write + * + * Write data to indicated CPP region. + * + * NOTE: @offset and @length must be 32-bit aligned values. + * + * NOTE: Area must have been locked down with an 'acquire'. + * + * Return: length of io, or -ERRNO + */ +int nfp_cpp_area_write(struct nfp_cpp_area *area, + unsigned long offset, const void *kernel_vaddr, + size_t length) +{ + return area->cpp->op->area_write(area, kernel_vaddr, offset, length); +} + +/** + * nfp_cpp_area_check_range() - check if address range fits in CPP area + * @area: CPP area handle + * @offset: offset into CPP target + * @length: size of address range in bytes + * + * Check if address range fits within CPP area. Return 0 if area + * fits or -EFAULT on error. + * + * Return: 0, or -ERRNO + */ +int nfp_cpp_area_check_range(struct nfp_cpp_area *area, + unsigned long long offset, unsigned long length) +{ + if (offset < area->offset || + offset + length > area->offset + area->size) + return -EFAULT; + + return 0; +} + +/** + * nfp_cpp_area_name() - return name of a CPP area + * @cpp_area: CPP area handle + * + * Return: Name of the area, or NULL + */ +const char *nfp_cpp_area_name(struct nfp_cpp_area *cpp_area) +{ + return cpp_area->resource.name; +} + +/** + * nfp_cpp_area_priv() - return private struct for CPP area + * @cpp_area: CPP area handle + * + * Return: Private data for the CPP area + */ +void *nfp_cpp_area_priv(struct nfp_cpp_area *cpp_area) +{ + return &cpp_area[1]; +} + +/** + * nfp_cpp_area_cpp() - return CPP handle for CPP area + * @cpp_area: CPP area handle + * + * Return: NFP CPP handle + */ +struct nfp_cpp *nfp_cpp_area_cpp(struct nfp_cpp_area *cpp_area) +{ + return cpp_area->cpp; +} + +/** + * nfp_cpp_area_resource() - get resource + * @area: CPP area handle + * + * NOTE: Area must have been locked down with an 'acquire'. + * + * Return: struct resource pointer, or NULL + */ +struct resource *nfp_cpp_area_resource(struct nfp_cpp_area *area) +{ + struct resource *res = NULL; + + if (area->cpp->op->area_resource) + res = area->cpp->op->area_resource(area); + + return res; +} + +/** + * nfp_cpp_area_phys() - get physical address of CPP area + * @area: CPP area handle + * + * NOTE: Area must have been locked down with an 'acquire'. + * + * Return: phy_addr_t of the area, or NULL + */ +phys_addr_t nfp_cpp_area_phys(struct nfp_cpp_area *area) +{ + phys_addr_t addr = ~0; + + if (area->cpp->op->area_phys) + addr = area->cpp->op->area_phys(area); + + return addr; +} + +/** + * nfp_cpp_area_iomem() - get IOMEM region for CPP area + * @area: CPP area handle + * + * Returns an iomem pointer for use with readl()/writel() style + * operations. + * + * NOTE: Area must have been locked down with an 'acquire'. + * + * Return: __iomem pointer to the area, or NULL + */ +void __iomem *nfp_cpp_area_iomem(struct nfp_cpp_area *area) +{ + void __iomem *iomem = NULL; + + if (area->cpp->op->area_iomem) + iomem = area->cpp->op->area_iomem(area); + + return iomem; +} + +/** + * nfp_cpp_area_readl() - Read a u32 word from an area + * @area: CPP Area handle + * @offset: Offset into area + * @value: Pointer to read buffer + * + * Return: length of the io, or -ERRNO + */ +int nfp_cpp_area_readl(struct nfp_cpp_area *area, + unsigned long offset, u32 *value) +{ + u8 tmp[4]; + int err; + + err = nfp_cpp_area_read(area, offset, &tmp, sizeof(tmp)); + *value = get_unaligned_le32(tmp); + + return err; +} + +/** + * nfp_cpp_area_writel() - Write a u32 word to an area + * @area: CPP Area handle + * @offset: Offset into area + * @value: Value to write + * + * Return: length of the io, or -ERRNO + */ +int nfp_cpp_area_writel(struct nfp_cpp_area *area, + unsigned long offset, u32 value) +{ + u8 tmp[4]; + + put_unaligned_le32(value, tmp); + + return nfp_cpp_area_write(area, offset, &tmp, sizeof(tmp)); +} + +/** + * nfp_cpp_area_readq() - Read a u64 word from an area + * @area: CPP Area handle + * @offset: Offset into area + * @value: Pointer to read buffer + * + * Return: length of the io, or -ERRNO + */ +int nfp_cpp_area_readq(struct nfp_cpp_area *area, + unsigned long offset, u64 *value) +{ + u8 tmp[8]; + int err; + + err = nfp_cpp_area_read(area, offset, &tmp, sizeof(tmp)); + *value = get_unaligned_le64(tmp); + + return err; +} + +/** + * nfp_cpp_area_writeq() - Write a u64 word to an area + * @area: CPP Area handle + * @offset: Offset into area + * @value: Value to write + * + * Return: length of the io, or -ERRNO + */ +int nfp_cpp_area_writeq(struct nfp_cpp_area *area, + unsigned long offset, u64 value) +{ + u8 tmp[8]; + + put_unaligned_le64(value, tmp); + + return nfp_cpp_area_write(area, offset, &tmp, sizeof(tmp)); +} + +/** + * nfp_cpp_area_fill() - fill a CPP area with a value + * @area: CPP area + * @offset: offset into CPP area + * @value: value to fill with + * @length: length of area to fill + * + * Fill indicated area with given value. + * + * Return: length of io, or -ERRNO + */ +int nfp_cpp_area_fill(struct nfp_cpp_area *area, + unsigned long offset, u32 value, size_t length) +{ + u8 tmp[4]; + size_t i; + int k; + + put_unaligned_le32(value, tmp); + + if (offset % sizeof(tmp) || length % sizeof(tmp)) + return -EINVAL; + + for (i = 0; i < length; i += sizeof(tmp)) { + k = nfp_cpp_area_write(area, offset + i, &tmp, sizeof(tmp)); + if (k < 0) + return k; + } + + return i; +} + +/** + * nfp_cpp_area_cache_add() - Permanently reserve and area for the hot cache + * @cpp: NFP CPP handle + * @size: Size of the area - MUST BE A POWER OF 2. + */ +int nfp_cpp_area_cache_add(struct nfp_cpp *cpp, size_t size) +{ + struct nfp_cpp_area_cache *cache; + struct nfp_cpp_area *area; + + /* Allocate an area - we use the MU target's base as a placeholder, + * as all supported chips have a MU. + */ + area = nfp_cpp_area_alloc(cpp, NFP_CPP_ID(7, NFP_CPP_ACTION_RW, 0), + 0, size); + if (!area) + return -ENOMEM; + + cache = kzalloc(sizeof(*cache), GFP_KERNEL); + if (!cache) + return -ENOMEM; + + cache->id = 0; + cache->addr = 0; + cache->size = size; + cache->area = area; + mutex_lock(&cpp->area_cache_mutex); + list_add_tail(&cache->entry, &cpp->area_cache_list); + mutex_unlock(&cpp->area_cache_mutex); + + return 0; +} + +static struct nfp_cpp_area_cache * +area_cache_get(struct nfp_cpp *cpp, u32 id, + u64 addr, unsigned long *offset, size_t length) +{ + struct nfp_cpp_area_cache *cache; + int err; + + /* Early exit when length == 0, which prevents + * the need for special case code below when + * checking against available cache size. + */ + if (length == 0) + return NULL; + + if (list_empty(&cpp->area_cache_list) || id == 0) + return NULL; + + /* Remap from cpp_island to cpp_target */ + err = nfp_target_cpp(id, addr, &id, &addr, cpp->imb_cat_table); + if (err < 0) + return NULL; + + addr += *offset; + + mutex_lock(&cpp->area_cache_mutex); + + /* See if we have a match */ + list_for_each_entry(cache, &cpp->area_cache_list, entry) { + if (id == cache->id && + addr >= cache->addr && + addr + length <= cache->addr + cache->size) + goto exit; + } + + /* No matches - inspect the tail of the LRU */ + cache = list_entry(cpp->area_cache_list.prev, + struct nfp_cpp_area_cache, entry); + + /* Can we fit in the cache entry? */ + if (round_down(addr + length - 1, cache->size) != + round_down(addr, cache->size)) { + mutex_unlock(&cpp->area_cache_mutex); + return NULL; + } + + /* If id != 0, we will need to release it */ + if (cache->id) { + nfp_cpp_area_release(cache->area); + cache->id = 0; + cache->addr = 0; + } + + /* Adjust the start address to be cache size aligned */ + cache->id = id; + cache->addr = addr & ~(u64)(cache->size - 1); + + /* Re-init to the new ID and address */ + if (cpp->op->area_init) { + err = cpp->op->area_init(cache->area, + id, cache->addr, cache->size); + if (err < 0) { + mutex_unlock(&cpp->area_cache_mutex); + return NULL; + } + } + + /* Attempt to acquire */ + err = nfp_cpp_area_acquire(cache->area); + if (err < 0) { + mutex_unlock(&cpp->area_cache_mutex); + return NULL; + } + +exit: + /* Adjust offset */ + *offset = addr - cache->addr; + return cache; +} + +static void +area_cache_put(struct nfp_cpp *cpp, struct nfp_cpp_area_cache *cache) +{ + if (!cache) + return; + + /* Move to front of LRU */ + list_del(&cache->entry); + list_add(&cache->entry, &cpp->area_cache_list); + + mutex_unlock(&cpp->area_cache_mutex); +} + +/** + * nfp_cpp_read() - read from CPP target + * @cpp: CPP handle + * @destination: CPP id + * @address: offset into CPP target + * @kernel_vaddr: kernel buffer for result + * @length: number of bytes to read + * + * Return: length of io, or -ERRNO + */ +int nfp_cpp_read(struct nfp_cpp *cpp, u32 destination, + unsigned long long address, void *kernel_vaddr, size_t length) +{ + struct nfp_cpp_area_cache *cache; + struct nfp_cpp_area *area; + unsigned long offset = 0; + int err; + + cache = area_cache_get(cpp, destination, address, &offset, length); + if (cache) { + area = cache->area; + } else { + area = nfp_cpp_area_alloc(cpp, destination, address, length); + if (!area) + return -ENOMEM; + + err = nfp_cpp_area_acquire(area); + if (err) + goto out; + } + + err = nfp_cpp_area_read(area, offset, kernel_vaddr, length); +out: + if (cache) + area_cache_put(cpp, cache); + else + nfp_cpp_area_release_free(area); + + return err; +} + +/** + * nfp_cpp_write() - write to CPP target + * @cpp: CPP handle + * @destination: CPP id + * @address: offset into CPP target + * @kernel_vaddr: kernel buffer to read from + * @length: number of bytes to write + * + * Return: length of io, or -ERRNO + */ +int nfp_cpp_write(struct nfp_cpp *cpp, u32 destination, + unsigned long long address, + const void *kernel_vaddr, size_t length) +{ + struct nfp_cpp_area_cache *cache; + struct nfp_cpp_area *area; + unsigned long offset = 0; + int err; + + cache = area_cache_get(cpp, destination, address, &offset, length); + if (cache) { + area = cache->area; + } else { + area = nfp_cpp_area_alloc(cpp, destination, address, length); + if (!area) + return -ENOMEM; + + err = nfp_cpp_area_acquire(area); + if (err) + goto out; + } + + err = nfp_cpp_area_write(area, offset, kernel_vaddr, length); + +out: + if (cache) + area_cache_put(cpp, cache); + else + nfp_cpp_area_release_free(area); + + return err; +} + +/* Return the correct CPP address, and fixup xpb_addr as needed. */ +static u32 nfp_xpb_to_cpp(struct nfp_cpp *cpp, u32 *xpb_addr) +{ + int island; + u32 xpb; + + xpb = NFP_CPP_ID(14, NFP_CPP_ACTION_RW, 0); + /* Ensure that non-local XPB accesses go + * out through the global XPBM bus. + */ + island = (*xpb_addr >> 24) & 0x3f; + if (!island) + return xpb; + + if (island != 1) { + *xpb_addr |= 1 << 30; + return xpb; + } + + /* Accesses to the ARM Island overlay uses Island 0 / Global Bit */ + *xpb_addr &= ~0x7f000000; + if (*xpb_addr < 0x60000) { + *xpb_addr |= 1 << 30; + } else { + /* And only non-ARM interfaces use the island id = 1 */ + if (NFP_CPP_INTERFACE_TYPE_of(nfp_cpp_interface(cpp)) + != NFP_CPP_INTERFACE_TYPE_ARM) + *xpb_addr |= 1 << 24; + } + + return xpb; +} + +/** + * nfp_xpb_readl() - Read a u32 word from a XPB location + * @cpp: CPP device handle + * @xpb_addr: Address for operation + * @value: Pointer to read buffer + * + * Return: length of the io, or -ERRNO + */ +int nfp_xpb_readl(struct nfp_cpp *cpp, u32 xpb_addr, u32 *value) +{ + u32 cpp_dest = nfp_xpb_to_cpp(cpp, &xpb_addr); + + return nfp_cpp_readl(cpp, cpp_dest, xpb_addr, value); +} + +/** + * nfp_xpb_writel() - Write a u32 word to a XPB location + * @cpp: CPP device handle + * @xpb_addr: Address for operation + * @value: Value to write + * + * Return: length of the io, or -ERRNO + */ +int nfp_xpb_writel(struct nfp_cpp *cpp, u32 xpb_addr, u32 value) +{ + u32 cpp_dest = nfp_xpb_to_cpp(cpp, &xpb_addr); + + return nfp_cpp_writel(cpp, cpp_dest, xpb_addr, value); +} + +/** + * nfp_xpb_writelm() - Modify bits of a 32-bit value from the XPB bus + * @cpp: NFP CPP device handle + * @xpb_tgt: XPB target and address + * @mask: mask of bits to alter + * @value: value to modify + * + * KERNEL: This operation is safe to call in interrupt or softirq context. + * + * Return: length of the io, or -ERRNO + */ +int nfp_xpb_writelm(struct nfp_cpp *cpp, u32 xpb_tgt, + u32 mask, u32 value) +{ + int err; + u32 tmp; + + err = nfp_xpb_readl(cpp, xpb_tgt, &tmp); + if (err < 0) + return err; + + tmp &= ~mask; + tmp |= mask & value; + return nfp_xpb_writel(cpp, xpb_tgt, tmp); +} + +/* Lockdep markers */ +static struct lock_class_key nfp_cpp_resource_lock_key; + +static void nfp_cpp_dev_release(struct device *dev) +{ + /* Nothing to do here - it just makes the kernel happy */ +} + +/** + * nfp_cpp_from_operations() - Create a NFP CPP handle + * from an operations structure + * @ops: NFP CPP operations structure + * @parent: Parent device + * @priv: Private data of low-level implementation + * + * NOTE: On failure, cpp_ops->free will be called! + * + * Return: NFP CPP handle on success, ERR_PTR on failure + */ +struct nfp_cpp * +nfp_cpp_from_operations(const struct nfp_cpp_operations *ops, + struct device *parent, void *priv) +{ + const u32 arm = NFP_CPP_ID(NFP_CPP_TARGET_ARM, NFP_CPP_ACTION_RW, 0); + struct nfp_cpp *cpp; + u32 mask[2]; + u32 xpbaddr; + size_t tgt; + int err; + + cpp = kzalloc(sizeof(*cpp), GFP_KERNEL); + if (!cpp) { + err = -ENOMEM; + goto err_malloc; + } + + cpp->op = ops; + cpp->priv = priv; + cpp->interface = ops->get_interface(parent); + if (ops->read_serial) + ops->read_serial(parent, cpp->serial); + rwlock_init(&cpp->resource_lock); + init_waitqueue_head(&cpp->waitq); + lockdep_set_class(&cpp->resource_lock, &nfp_cpp_resource_lock_key); + INIT_LIST_HEAD(&cpp->mutex_cache); + INIT_LIST_HEAD(&cpp->resource_list); + INIT_LIST_HEAD(&cpp->area_cache_list); + mutex_init(&cpp->area_cache_mutex); + cpp->dev.init_name = "cpp"; + cpp->dev.parent = parent; + cpp->dev.release = nfp_cpp_dev_release; + err = device_register(&cpp->dev); + if (err < 0) { + put_device(&cpp->dev); + goto err_dev; + } + + dev_set_drvdata(&cpp->dev, cpp); + + /* NOTE: cpp_lock is NOT locked for op->init, + * since it may call NFP CPP API operations + */ + if (cpp->op->init) { + err = cpp->op->init(cpp); + if (err < 0) { + dev_err(parent, + "NFP interface initialization failed\n"); + goto err_out; + } + } + + err = nfp_cpp_model_autodetect(cpp, &cpp->model); + if (err < 0) { + dev_err(parent, "NFP model detection failed\n"); + goto err_out; + } + + for (tgt = 0; tgt < ARRAY_SIZE(cpp->imb_cat_table); tgt++) { + /* Hardcoded XPB IMB Base, island 0 */ + xpbaddr = 0x000a0000 + (tgt * 4); + err = nfp_xpb_readl(cpp, xpbaddr, + &cpp->imb_cat_table[tgt]); + if (err < 0) { + dev_err(parent, + "Can't read CPP mapping from device\n"); + goto err_out; + } + } + + nfp_cpp_readl(cpp, arm, NFP_ARM_GCSR + NFP_ARM_GCSR_SOFTMODEL2, + &mask[0]); + nfp_cpp_readl(cpp, arm, NFP_ARM_GCSR + NFP_ARM_GCSR_SOFTMODEL3, + &mask[1]); + + dev_info(cpp->dev.parent, "Model: 0x%08x, SN: %pM, Ifc: 0x%04x\n", + nfp_cpp_model(cpp), cpp->serial, nfp_cpp_interface(cpp)); + + return cpp; + +err_out: + device_unregister(&cpp->dev); +err_dev: + kfree(cpp); +err_malloc: + return ERR_PTR(err); +} + +/** + * nfp_cpp_priv() - Get the operations private data of a CPP handle + * @cpp: CPP handle + * + * Return: Private data for the NFP CPP handle + */ +void *nfp_cpp_priv(struct nfp_cpp *cpp) +{ + return cpp->priv; +} + +/** + * nfp_cpp_device() - Get the Linux device handle of a CPP handle + * @cpp: CPP handle + * + * Return: Device for the NFP CPP bus + */ +struct device *nfp_cpp_device(struct nfp_cpp *cpp) +{ + return &cpp->dev; +} + +#define NFP_EXPL_OP(func, expl, args...) \ + ({ \ + struct nfp_cpp *cpp = nfp_cpp_explicit_cpp(expl); \ + int err = -ENODEV; \ + \ + if (cpp->op->func) \ + err = cpp->op->func(expl, ##args); \ + err; \ + }) + +#define NFP_EXPL_OP_NR(func, expl, args...) \ + ({ \ + struct nfp_cpp *cpp = nfp_cpp_explicit_cpp(expl); \ + \ + if (cpp->op->func) \ + cpp->op->func(expl, ##args); \ + \ + }) + +/** + * nfp_cpp_explicit_acquire() - Acquire explicit access handle + * @cpp: NFP CPP handle + * + * The 'data_ref' and 'signal_ref' values are useful when + * constructing the NFP_EXPL_CSR1 and NFP_EXPL_POST values. + * + * Return: NFP CPP explicit handle + */ +struct nfp_cpp_explicit *nfp_cpp_explicit_acquire(struct nfp_cpp *cpp) +{ + struct nfp_cpp_explicit *expl; + int err; + + expl = kzalloc(sizeof(*expl) + cpp->op->explicit_priv_size, GFP_KERNEL); + if (!expl) + return NULL; + + expl->cpp = cpp; + err = NFP_EXPL_OP(explicit_acquire, expl); + if (err < 0) { + kfree(expl); + return NULL; + } + + return expl; +} + +/** + * nfp_cpp_explicit_set_target() - Set target fields for explicit + * @expl: Explicit handle + * @cpp_id: CPP ID field + * @len: CPP Length field + * @mask: CPP Mask field + * + * Return: 0, or -ERRNO + */ +int nfp_cpp_explicit_set_target(struct nfp_cpp_explicit *expl, + u32 cpp_id, u8 len, u8 mask) +{ + expl->cmd.cpp_id = cpp_id; + expl->cmd.len = len; + expl->cmd.byte_mask = mask; + + return 0; +} + +/** + * nfp_cpp_explicit_set_data() - Set data fields for explicit + * @expl: Explicit handle + * @data_master: CPP Data Master field + * @data_ref: CPP Data Ref field + * + * Return: 0, or -ERRNO + */ +int nfp_cpp_explicit_set_data(struct nfp_cpp_explicit *expl, + u8 data_master, u16 data_ref) +{ + expl->cmd.data_master = data_master; + expl->cmd.data_ref = data_ref; + + return 0; +} + +/** + * nfp_cpp_explicit_set_signal() - Set signal fields for explicit + * @expl: Explicit handle + * @signal_master: CPP Signal Master field + * @signal_ref: CPP Signal Ref field + * + * Return: 0, or -ERRNO + */ +int nfp_cpp_explicit_set_signal(struct nfp_cpp_explicit *expl, + u8 signal_master, u8 signal_ref) +{ + expl->cmd.signal_master = signal_master; + expl->cmd.signal_ref = signal_ref; + + return 0; +} + +/** + * nfp_cpp_explicit_set_posted() - Set completion fields for explicit + * @expl: Explicit handle + * @posted: True for signaled completion, false otherwise + * @siga: CPP Signal A field + * @siga_mode: CPP Signal A Mode field + * @sigb: CPP Signal B field + * @sigb_mode: CPP Signal B Mode field + * + * Return: 0, or -ERRNO + */ +int nfp_cpp_explicit_set_posted(struct nfp_cpp_explicit *expl, int posted, + u8 siga, + enum nfp_cpp_explicit_signal_mode siga_mode, + u8 sigb, + enum nfp_cpp_explicit_signal_mode sigb_mode) +{ + expl->cmd.posted = posted; + expl->cmd.siga = siga; + expl->cmd.sigb = sigb; + expl->cmd.siga_mode = siga_mode; + expl->cmd.sigb_mode = sigb_mode; + + return 0; +} + +/** + * nfp_cpp_explicit_put() - Set up the write (pull) data for a explicit access + * @expl: NFP CPP Explicit handle + * @buff: Data to have the target pull in the transaction + * @len: Length of data, in bytes + * + * The 'len' parameter must be less than or equal to 128 bytes. + * + * If this function is called before the configuration + * registers are set, it will return -EINVAL. + * + * Return: 0, or -ERRNO + */ +int nfp_cpp_explicit_put(struct nfp_cpp_explicit *expl, + const void *buff, size_t len) +{ + return NFP_EXPL_OP(explicit_put, expl, buff, len); +} + +/** + * nfp_cpp_explicit_do() - Execute a transaction, and wait for it to complete + * @expl: NFP CPP Explicit handle + * @address: Address to send in the explicit transaction + * + * If this function is called before the configuration + * registers are set, it will return -1, with an errno of EINVAL. + * + * Return: 0, or -ERRNO + */ +int nfp_cpp_explicit_do(struct nfp_cpp_explicit *expl, u64 address) +{ + return NFP_EXPL_OP(explicit_do, expl, &expl->cmd, address); +} + +/** + * nfp_cpp_explicit_get() - Get the 'push' (read) data from a explicit access + * @expl: NFP CPP Explicit handle + * @buff: Data that the target pushed in the transaction + * @len: Length of data, in bytes + * + * The 'len' parameter must be less than or equal to 128 bytes. + * + * If this function is called before all three configuration + * registers are set, it will return -1, with an errno of EINVAL. + * + * If this function is called before nfp_cpp_explicit_do() + * has completed, it will return -1, with an errno of EBUSY. + * + * Return: 0, or -ERRNO + */ +int nfp_cpp_explicit_get(struct nfp_cpp_explicit *expl, void *buff, size_t len) +{ + return NFP_EXPL_OP(explicit_get, expl, buff, len); +} + +/** + * nfp_cpp_explicit_release() - Release explicit access handle + * @expl: NFP CPP Explicit handle + * + */ +void nfp_cpp_explicit_release(struct nfp_cpp_explicit *expl) +{ + NFP_EXPL_OP_NR(explicit_release, expl); + kfree(expl); +} + +/** + * nfp_cpp_explicit_cpp() - return CPP handle for CPP explicit + * @cpp_explicit: CPP explicit handle + * + * Return: NFP CPP handle of the explicit + */ +struct nfp_cpp *nfp_cpp_explicit_cpp(struct nfp_cpp_explicit *cpp_explicit) +{ + return cpp_explicit->cpp; +} + +/** + * nfp_cpp_explicit_priv() - return private struct for CPP explicit + * @cpp_explicit: CPP explicit handle + * + * Return: private data of the explicit, or NULL + */ +void *nfp_cpp_explicit_priv(struct nfp_cpp_explicit *cpp_explicit) +{ + return &cpp_explicit[1]; +} + +/* THIS FUNCTION IS NOT EXPORTED */ +static u32 nfp_mutex_locked(u16 interface) +{ + return (u32)interface << 16 | 0x000f; +} + +static u32 nfp_mutex_unlocked(u16 interface) +{ + return (u32)interface << 16 | 0x0000; +} + +static bool nfp_mutex_is_locked(u32 val) +{ + return (val & 0xffff) == 0x000f; +} + +static bool nfp_mutex_is_unlocked(u32 val) +{ + return (val & 0xffff) == 0000; +} + +/* If you need more than 65536 recursive locks, please rethink your code. */ +#define MUTEX_DEPTH_MAX 0xffff + +static int +nfp_cpp_mutex_validate(u16 interface, int *target, unsigned long long address) +{ + /* Not permitted on invalid interfaces */ + if (NFP_CPP_INTERFACE_TYPE_of(interface) == + NFP_CPP_INTERFACE_TYPE_INVALID) + return -EINVAL; + + /* Address must be 64-bit aligned */ + if (address & 7) + return -EINVAL; + + if (*target != NFP_CPP_TARGET_MU) + return -EINVAL; + + return 0; +} + +/** + * nfp_cpp_mutex_init() - Initialize a mutex location + * @cpp: NFP CPP handle + * @target: NFP CPP target ID (ie NFP_CPP_TARGET_CLS or NFP_CPP_TARGET_MU) + * @address: Offset into the address space of the NFP CPP target ID + * @key: Unique 32-bit value for this mutex + * + * The CPP target:address must point to a 64-bit aligned location, and + * will initialize 64 bits of data at the location. + * + * This creates the initial mutex state, as locked by this + * nfp_cpp_interface(). + * + * This function should only be called when setting up + * the initial lock state upon boot-up of the system. + * + * Return: 0 on success, or -errno on failure + */ +int nfp_cpp_mutex_init(struct nfp_cpp *cpp, + int target, unsigned long long address, u32 key) +{ + const u32 muw = NFP_CPP_ID(target, 4, 0); /* atomic_write */ + u16 interface = nfp_cpp_interface(cpp); + int err; + + err = nfp_cpp_mutex_validate(interface, &target, address); + if (err) + return err; + + err = nfp_cpp_writel(cpp, muw, address + 4, key); + if (err) + return err; + + err = nfp_cpp_writel(cpp, muw, address, nfp_mutex_locked(interface)); + if (err) + return err; + + return 0; +} + +/** + * nfp_cpp_mutex_alloc() - Create a mutex handle + * @cpp: NFP CPP handle + * @target: NFP CPP target ID (ie NFP_CPP_TARGET_CLS or NFP_CPP_TARGET_MU) + * @address: Offset into the address space of the NFP CPP target ID + * @key: 32-bit unique key (must match the key at this location) + * + * The CPP target:address must point to a 64-bit aligned location, and + * reserve 64 bits of data at the location for use by the handle. + * + * Only target/address pairs that point to entities that support the + * MU Atomic Engine's CmpAndSwap32 command are supported. + * + * Return: A non-NULL struct nfp_cpp_mutex * on success, NULL on failure. + */ +struct nfp_cpp_mutex *nfp_cpp_mutex_alloc(struct nfp_cpp *cpp, int target, + unsigned long long address, u32 key) +{ + const u32 mur = NFP_CPP_ID(target, 3, 0); /* atomic_read */ + u16 interface = nfp_cpp_interface(cpp); + struct nfp_cpp_mutex *mutex; + int err; + u32 tmp; + + err = nfp_cpp_mutex_validate(interface, &target, address); + if (err) + return NULL; + + /* Look for mutex on cache list */ + list_for_each_entry(mutex, &cpp->mutex_cache, list) { + if (mutex->target == target && mutex->address == address) { + mutex->usage++; + return mutex; + } + } + + err = nfp_cpp_readl(cpp, mur, address + 4, &tmp); + if (err < 0) + return NULL; + + if (tmp != key) + return NULL; + + mutex = kzalloc(sizeof(*mutex), GFP_KERNEL); + if (!mutex) + return NULL; + + mutex->cpp = cpp; + mutex->target = target; + mutex->address = address; + mutex->key = key; + mutex->depth = 0; + mutex->usage = 1; + + /* Add mutex to cache list */ + list_add(&mutex->list, &cpp->mutex_cache); + + return mutex; +} + +/** + * nfp_cpp_mutex_free() - Free a mutex handle - does not alter the lock state + * @mutex: NFP CPP Mutex handle + */ +void nfp_cpp_mutex_free(struct nfp_cpp_mutex *mutex) +{ + if (--mutex->usage) + return; + + /* Remove mutex from cache */ + list_del(&mutex->list); + kfree(mutex); +} + +/** + * nfp_cpp_mutex_lock() - Lock a mutex handle, using the NFP MU Atomic Engine + * @mutex: NFP CPP Mutex handle + * + * Return: 0 on success, or -errno on failure + */ +int nfp_cpp_mutex_lock(struct nfp_cpp_mutex *mutex) +{ + unsigned long warn_at = jiffies + 15 * HZ; + unsigned int timeout_ms = 1; + int err; + + /* We can't use a waitqueue here, because the unlocker + * might be on a separate CPU. + * + * So just wait for now. + */ + for (;;) { + err = nfp_cpp_mutex_trylock(mutex); + if (err != -EBUSY) + break; + + err = msleep_interruptible(timeout_ms); + if (err != 0) + return -ERESTARTSYS; + + if (time_is_before_eq_jiffies(warn_at)) { + warn_at = jiffies + 60 * HZ; + dev_warn(mutex->cpp->dev.parent, + "Warning: waiting for NFP mutex [usage:%hd depth:%hd target:%d addr:%llx key:%08x]\n", + mutex->usage, mutex->depth, + mutex->target, mutex->address, mutex->key); + } + } + + return err; +} + +/** + * nfp_cpp_mutex_unlock() - Unlock a mutex handle, using the MU Atomic Engine + * @mutex: NFP CPP Mutex handle + * + * Return: 0 on success, or -errno on failure + */ +int nfp_cpp_mutex_unlock(struct nfp_cpp_mutex *mutex) +{ + const u32 muw = NFP_CPP_ID(mutex->target, 4, 0); /* atomic_write */ + const u32 mur = NFP_CPP_ID(mutex->target, 3, 0); /* atomic_read */ + struct nfp_cpp *cpp = mutex->cpp; + u32 key, value; + u16 interface; + int err; + + interface = nfp_cpp_interface(cpp); + + if (mutex->depth > 1) { + mutex->depth--; + return 0; + } + + err = nfp_cpp_readl(mutex->cpp, mur, mutex->address + 4, &key); + if (err < 0) + return err; + + if (key != mutex->key) + return -EPERM; + + err = nfp_cpp_readl(mutex->cpp, mur, mutex->address, &value); + if (err < 0) + return err; + + if (value != nfp_mutex_locked(interface)) + return -EACCES; + + err = nfp_cpp_writel(cpp, muw, mutex->address, + nfp_mutex_unlocked(interface)); + if (err < 0) + return err; + + mutex->depth = 0; + return 0; +} + +/** + * nfp_cpp_mutex_trylock() - Attempt to lock a mutex handle + * @mutex: NFP CPP Mutex handle + * + * Return: 0 if the lock succeeded, -errno on failure + */ +int nfp_cpp_mutex_trylock(struct nfp_cpp_mutex *mutex) +{ + const u32 muw = NFP_CPP_ID(mutex->target, 4, 0); /* atomic_write */ + const u32 mus = NFP_CPP_ID(mutex->target, 5, 3); /* test_set_imm */ + const u32 mur = NFP_CPP_ID(mutex->target, 3, 0); /* atomic_read */ + struct nfp_cpp *cpp = mutex->cpp; + u32 key, value, tmp; + int err; + + if (mutex->depth > 0) { + if (mutex->depth == MUTEX_DEPTH_MAX) + return -E2BIG; + mutex->depth++; + return 0; + } + + /* Verify that the lock marker is not damaged */ + err = nfp_cpp_readl(cpp, mur, mutex->address + 4, &key); + if (err < 0) + return err; + + if (key != mutex->key) + return -EPERM; + + /* Compare against the unlocked state, and if true, + * write the interface id into the top 16 bits, and + * mark as locked. + */ + value = nfp_mutex_locked(nfp_cpp_interface(cpp)); + + /* We use test_set_imm here, as it implies a read + * of the current state, and sets the bits in the + * bytemask of the command to 1s. Since the mutex + * is guaranteed to be 64-bit aligned, the bytemask + * of this 32-bit command is ensured to be 8'b00001111, + * which implies that the lower 4 bits will be set to + * ones regardless of the initial state. + * + * Since this is a 'Readback' operation, with no Pull + * data, we can treat this as a normal Push (read) + * atomic, which returns the original value. + */ + err = nfp_cpp_readl(cpp, mus, mutex->address, &tmp); + if (err < 0) + return err; + + /* Was it unlocked? */ + if (nfp_mutex_is_unlocked(tmp)) { + /* The read value can only be 0x....0000 in the unlocked state. + * If there was another contending for this lock, then + * the lock state would be 0x....000f + */ + + /* Write our owner ID into the lock + * While not strictly necessary, this helps with + * debug and bookkeeping. + */ + err = nfp_cpp_writel(cpp, muw, mutex->address, value); + if (err < 0) + return err; + + mutex->depth = 1; + return 0; + } + + /* Already locked by us? Success! */ + if (tmp == value) { + mutex->depth = 1; + return 0; + } + + return nfp_mutex_is_locked(tmp) ? -EBUSY : -EINVAL; +} diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c new file mode 100644 index 000000000000..0ba0379b8f75 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c @@ -0,0 +1,281 @@ +/* + * Copyright (C) 2015-2017 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* + * nfp_cpplib.c + * Library of functions to access the NFP's CPP bus + * Authors: Jakub Kicinski <jakub.kicinski@netronome.com> + * Jason McMullan <jason.mcmullan@netronome.com> + * Rolf Neugebauer <rolf.neugebauer@netronome.com> + */ + +#include <asm/unaligned.h> +#include <linux/bitfield.h> +#include <linux/delay.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/sched.h> + +#include "nfp_cpp.h" +#include "nfp6000/nfp6000.h" +#include "nfp6000/nfp_xpb.h" + +/* NFP6000 PL */ +#define NFP_PL_DEVICE_ID 0x00000004 +#define NFP_PL_DEVICE_ID_MASK GENMASK(7, 0) + +#define NFP6000_ARM_GCSR_SOFTMODEL0 0x00400144 + +/** + * nfp_cpp_readl() - Read a u32 word from a CPP location + * @cpp: CPP device handle + * @cpp_id: CPP ID for operation + * @address: Address for operation + * @value: Pointer to read buffer + * + * Return: length of the io, or -ERRNO + */ +int nfp_cpp_readl(struct nfp_cpp *cpp, u32 cpp_id, + unsigned long long address, u32 *value) +{ + u8 tmp[4]; + int err; + + err = nfp_cpp_read(cpp, cpp_id, address, tmp, sizeof(tmp)); + *value = get_unaligned_le32(tmp); + + return err; +} + +/** + * nfp_cpp_writel() - Write a u32 word to a CPP location + * @cpp: CPP device handle + * @cpp_id: CPP ID for operation + * @address: Address for operation + * @value: Value to write + * + * Return: length of the io, or -ERRNO + */ +int nfp_cpp_writel(struct nfp_cpp *cpp, u32 cpp_id, + unsigned long long address, u32 value) +{ + u8 tmp[4]; + + put_unaligned_le32(value, tmp); + return nfp_cpp_write(cpp, cpp_id, address, tmp, sizeof(tmp)); +} + +/** + * nfp_cpp_readq() - Read a u64 word from a CPP location + * @cpp: CPP device handle + * @cpp_id: CPP ID for operation + * @address: Address for operation + * @value: Pointer to read buffer + * + * Return: length of the io, or -ERRNO + */ +int nfp_cpp_readq(struct nfp_cpp *cpp, u32 cpp_id, + unsigned long long address, u64 *value) +{ + u8 tmp[8]; + int err; + + err = nfp_cpp_read(cpp, cpp_id, address, tmp, sizeof(tmp)); + *value = get_unaligned_le64(tmp); + + return err; +} + +/** + * nfp_cpp_writeq() - Write a u64 word to a CPP location + * @cpp: CPP device handle + * @cpp_id: CPP ID for operation + * @address: Address for operation + * @value: Value to write + * + * Return: length of the io, or -ERRNO + */ +int nfp_cpp_writeq(struct nfp_cpp *cpp, u32 cpp_id, + unsigned long long address, u64 value) +{ + u8 tmp[8]; + + put_unaligned_le64(value, tmp); + return nfp_cpp_write(cpp, cpp_id, address, tmp, sizeof(tmp)); +} + +/* NOTE: This code should not use nfp_xpb_* functions, + * as those are model-specific + */ +int nfp_cpp_model_autodetect(struct nfp_cpp *cpp, u32 *model) +{ + const u32 arm_id = NFP_CPP_ID(NFP_CPP_TARGET_ARM, 0, 0); + u32 reg; + int err; + + err = nfp_cpp_readl(cpp, arm_id, NFP6000_ARM_GCSR_SOFTMODEL0, model); + if (err < 0) + return err; + + /* The PL's PluDeviceID revision code is authoratative */ + *model &= ~0xff; + err = nfp_xpb_readl(cpp, NFP_XPB_DEVICE(1, 1, 16) + NFP_PL_DEVICE_ID, + ®); + if (err < 0) + return err; + + *model |= (NFP_PL_DEVICE_ID_MASK & reg) - 0x10; + + return 0; +} + +static u8 nfp_bytemask(int width, u64 addr) +{ + if (width == 8) + return 0xff; + else if (width == 4) + return 0x0f << (addr & 4); + else if (width == 2) + return 0x03 << (addr & 6); + else if (width == 1) + return 0x01 << (addr & 7); + else + return 0; +} + +int nfp_cpp_explicit_read(struct nfp_cpp *cpp, u32 cpp_id, + u64 addr, void *buff, size_t len, int width_read) +{ + struct nfp_cpp_explicit *expl; + char *tmp = buff; + int err, i, incr; + u8 byte_mask; + + if (len & (width_read - 1)) + return -EINVAL; + + expl = nfp_cpp_explicit_acquire(cpp); + if (!expl) + return -EBUSY; + + incr = min_t(int, 16 * width_read, 128); + incr = min_t(int, incr, len); + + /* Translate a NFP_CPP_ACTION_RW to action 0 */ + if (NFP_CPP_ID_ACTION_of(cpp_id) == NFP_CPP_ACTION_RW) + cpp_id = NFP_CPP_ID(NFP_CPP_ID_TARGET_of(cpp_id), 0, + NFP_CPP_ID_TOKEN_of(cpp_id)); + + byte_mask = nfp_bytemask(width_read, addr); + + nfp_cpp_explicit_set_target(expl, cpp_id, + incr / width_read - 1, byte_mask); + nfp_cpp_explicit_set_posted(expl, 1, 0, NFP_SIGNAL_PUSH, + 0, NFP_SIGNAL_NONE); + + for (i = 0; i < len; i += incr, addr += incr, tmp += incr) { + if (i + incr > len) { + incr = len - i; + nfp_cpp_explicit_set_target(expl, cpp_id, + incr / width_read - 1, + 0xff); + } + + err = nfp_cpp_explicit_do(expl, addr); + if (err < 0) + goto exit_release; + + err = nfp_cpp_explicit_get(expl, tmp, incr); + if (err < 0) + goto exit_release; + } + err = len; +exit_release: + nfp_cpp_explicit_release(expl); + + return err; +} + +int nfp_cpp_explicit_write(struct nfp_cpp *cpp, u32 cpp_id, u64 addr, + const void *buff, size_t len, int width_write) +{ + struct nfp_cpp_explicit *expl; + const char *tmp = buff; + int err, i, incr; + u8 byte_mask; + + if (len & (width_write - 1)) + return -EINVAL; + + expl = nfp_cpp_explicit_acquire(cpp); + if (!expl) + return -EBUSY; + + incr = min_t(int, 16 * width_write, 128); + incr = min_t(int, incr, len); + + /* Translate a NFP_CPP_ACTION_RW to action 1 */ + if (NFP_CPP_ID_ACTION_of(cpp_id) == NFP_CPP_ACTION_RW) + cpp_id = NFP_CPP_ID(NFP_CPP_ID_TARGET_of(cpp_id), 1, + NFP_CPP_ID_TOKEN_of(cpp_id)); + + byte_mask = nfp_bytemask(width_write, addr); + + nfp_cpp_explicit_set_target(expl, cpp_id, + incr / width_write - 1, byte_mask); + nfp_cpp_explicit_set_posted(expl, 1, 0, NFP_SIGNAL_PULL, + 0, NFP_SIGNAL_NONE); + + for (i = 0; i < len; i += incr, addr += incr, tmp += incr) { + if (i + incr > len) { + incr = len - i; + nfp_cpp_explicit_set_target(expl, cpp_id, + incr / width_write - 1, + 0xff); + } + + err = nfp_cpp_explicit_put(expl, tmp, incr); + if (err < 0) + goto exit_release; + + err = nfp_cpp_explicit_do(expl, addr); + if (err < 0) + goto exit_release; + } + err = len; +exit_release: + nfp_cpp_explicit_release(expl); + + return err; +} diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c new file mode 100644 index 000000000000..8d8f311ffa6e --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c @@ -0,0 +1,318 @@ +/* + * Copyright (C) 2015-2017 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* Parse the hwinfo table that the ARM firmware builds in the ARM scratch SRAM + * after chip reset. + * + * Examples of the fields: + * me.count = 40 + * me.mask = 0x7f_ffff_ffff + * + * me.count is the total number of MEs on the system. + * me.mask is the bitmask of MEs that are available for application usage. + * + * (ie, in this example, ME 39 has been reserved by boardconfig.) + */ + +#include <asm/byteorder.h> +#include <asm/unaligned.h> +#include <linux/delay.h> +#include <linux/log2.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/slab.h> + +#define NFP_SUBSYS "nfp_hwinfo" + +#include "crc32.h" +#include "nfp.h" +#include "nfp_cpp.h" +#include "nfp6000/nfp6000.h" + +#define HWINFO_SIZE_MIN 0x100 +#define HWINFO_WAIT 20 /* seconds */ + +/* The Hardware Info Table defines the properties of the system. + * + * HWInfo v1 Table (fixed size) + * + * 0x0000: u32 version Hardware Info Table version (1.0) + * 0x0004: u32 size Total size of the table, including + * the CRC32 (IEEE 802.3) + * 0x0008: u32 jumptab Offset of key/value table + * 0x000c: u32 keys Total number of keys in the key/value table + * NNNNNN: Key/value jump table and string data + * (size - 4): u32 crc32 CRC32 (same as IEEE 802.3, POSIX csum, etc) + * CRC32("",0) = ~0, CRC32("a",1) = 0x48C279FE + * + * HWInfo v2 Table (variable size) + * + * 0x0000: u32 version Hardware Info Table version (2.0) + * 0x0004: u32 size Current size of the data area, excluding CRC32 + * 0x0008: u32 limit Maximum size of the table + * 0x000c: u32 reserved Unused, set to zero + * NNNNNN: Key/value data + * (size - 4): u32 crc32 CRC32 (same as IEEE 802.3, POSIX csum, etc) + * CRC32("",0) = ~0, CRC32("a",1) = 0x48C279FE + * + * If the HWInfo table is in the process of being updated, the low bit + * of version will be set. + * + * HWInfo v1 Key/Value Table + * ------------------------- + * + * The key/value table is a set of offsets to ASCIIZ strings which have + * been strcmp(3) sorted (yes, please use bsearch(3) on the table). + * + * All keys are guaranteed to be unique. + * + * N+0: u32 key_1 Offset to the first key + * N+4: u32 val_1 Offset to the first value + * N+8: u32 key_2 Offset to the second key + * N+c: u32 val_2 Offset to the second value + * ... + * + * HWInfo v2 Key/Value Table + * ------------------------- + * + * Packed UTF8Z strings, ie 'key1\000value1\000key2\000value2\000' + * + * Unsorted. + */ + +#define NFP_HWINFO_VERSION_1 ('H' << 24 | 'I' << 16 | 1 << 8 | 0 << 1 | 0) +#define NFP_HWINFO_VERSION_2 ('H' << 24 | 'I' << 16 | 2 << 8 | 0 << 1 | 0) +#define NFP_HWINFO_VERSION_UPDATING BIT(0) + +struct nfp_hwinfo { + u8 start[0]; + + __le32 version; + __le32 size; + + /* v2 specific fields */ + __le32 limit; + __le32 resv; + + char data[]; +}; + +static bool nfp_hwinfo_is_updating(struct nfp_hwinfo *hwinfo) +{ + return le32_to_cpu(hwinfo->version) & NFP_HWINFO_VERSION_UPDATING; +} + +static int +hwinfo_db_walk(struct nfp_cpp *cpp, struct nfp_hwinfo *hwinfo, u32 size) +{ + const char *key, *val, *end = hwinfo->data + size; + + for (key = hwinfo->data; *key && key < end; + key = val + strlen(val) + 1) { + + val = key + strlen(key) + 1; + if (val >= end) { + nfp_warn(cpp, "Bad HWINFO - overflowing key\n"); + return -EINVAL; + } + + if (val + strlen(val) + 1 > end) { + nfp_warn(cpp, "Bad HWINFO - overflowing value\n"); + return -EINVAL; + } + } + + return 0; +} + +static int +hwinfo_db_validate(struct nfp_cpp *cpp, struct nfp_hwinfo *db, u32 len) +{ + u32 size, crc; + + size = le32_to_cpu(db->size); + if (size > len) { + nfp_err(cpp, "Unsupported hwinfo size %u > %u\n", size, len); + return -EINVAL; + } + + size -= sizeof(u32); + crc = crc32_posix(db, size); + if (crc != get_unaligned_le32(db->start + size)) { + nfp_err(cpp, "Corrupt hwinfo table (CRC mismatch), calculated 0x%x, expected 0x%x\n", + crc, get_unaligned_le32(db->start + size)); + + return -EINVAL; + } + + return hwinfo_db_walk(cpp, db, size); +} + +static int hwinfo_try_fetch(struct nfp_cpp *cpp, size_t *cpp_size) +{ + struct nfp_hwinfo *header; + struct nfp_resource *res; + u64 cpp_addr; + u32 cpp_id; + int err; + u8 *db; + + res = nfp_resource_acquire(cpp, NFP_RESOURCE_NFP_HWINFO); + if (!IS_ERR(res)) { + cpp_id = nfp_resource_cpp_id(res); + cpp_addr = nfp_resource_address(res); + *cpp_size = nfp_resource_size(res); + + nfp_resource_release(res); + + if (*cpp_size < HWINFO_SIZE_MIN) + return -ENOENT; + } else if (PTR_ERR(res) == -ENOENT) { + /* Try getting the HWInfo table from the 'classic' location */ + cpp_id = NFP_CPP_ISLAND_ID(NFP_CPP_TARGET_MU, + NFP_CPP_ACTION_RW, 0, 1); + cpp_addr = 0x30000; + *cpp_size = 0x0e000; + } else { + return PTR_ERR(res); + } + + db = kmalloc(*cpp_size + 1, GFP_KERNEL); + if (!db) + return -ENOMEM; + + err = nfp_cpp_read(cpp, cpp_id, cpp_addr, db, *cpp_size); + if (err != *cpp_size) { + kfree(db); + return err < 0 ? err : -EIO; + } + + header = (void *)db; + if (nfp_hwinfo_is_updating(header)) { + kfree(db); + return -EBUSY; + } + + if (le32_to_cpu(header->version) != NFP_HWINFO_VERSION_2) { + nfp_err(cpp, "Unknown HWInfo version: 0x%08x\n", + le32_to_cpu(header->version)); + kfree(db); + return -EINVAL; + } + + /* NULL-terminate for safety */ + db[*cpp_size] = '\0'; + + nfp_hwinfo_cache_set(cpp, db); + + return 0; +} + +static int hwinfo_fetch(struct nfp_cpp *cpp, size_t *hwdb_size) +{ + const unsigned long wait_until = jiffies + HWINFO_WAIT * HZ; + int err; + + for (;;) { + const unsigned long start_time = jiffies; + + err = hwinfo_try_fetch(cpp, hwdb_size); + if (!err) + return 0; + + err = msleep_interruptible(100); + if (err || time_after(start_time, wait_until)) { + nfp_err(cpp, "NFP access error\n"); + return -EIO; + } + } +} + +static int nfp_hwinfo_load(struct nfp_cpp *cpp) +{ + struct nfp_hwinfo *db; + size_t hwdb_size = 0; + int err; + + err = hwinfo_fetch(cpp, &hwdb_size); + if (err) + return err; + + db = nfp_hwinfo_cache(cpp); + err = hwinfo_db_validate(cpp, db, hwdb_size); + if (err) { + kfree(db); + nfp_hwinfo_cache_set(cpp, NULL); + return err; + } + + return 0; +} + +/** + * nfp_hwinfo_lookup() - Find a value in the HWInfo table by name + * @cpp: NFP CPP handle + * @lookup: HWInfo name to search for + * + * Return: Value of the HWInfo name, or NULL + */ +const char *nfp_hwinfo_lookup(struct nfp_cpp *cpp, const char *lookup) +{ + const char *key, *val, *end; + struct nfp_hwinfo *hwinfo; + int err; + + hwinfo = nfp_hwinfo_cache(cpp); + if (!hwinfo) { + err = nfp_hwinfo_load(cpp); + if (err) + return NULL; + hwinfo = nfp_hwinfo_cache(cpp); + } + + if (!hwinfo || !lookup) + return NULL; + + end = hwinfo->data + le32_to_cpu(hwinfo->size) - sizeof(u32); + + for (key = hwinfo->data; *key && key < end; + key = val + strlen(val) + 1) { + + val = key + strlen(key) + 1; + + if (strcmp(key, lookup) == 0) + return val; + } + + return NULL; +} diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mip.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mip.c new file mode 100644 index 000000000000..3d15dd03647e --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mip.c @@ -0,0 +1,174 @@ +/* + * Copyright (C) 2015-2017 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* + * nfp_mip.c + * Authors: Jakub Kicinski <jakub.kicinski@netronome.com> + * Jason McMullan <jason.mcmullan@netronome.com> + * Espen Skoglund <espen.skoglund@netronome.com> + */ +#include <linux/kernel.h> +#include <linux/slab.h> + +#include "nfp.h" +#include "nfp_cpp.h" +#include "nfp_nffw.h" + +#define NFP_MIP_SIGNATURE cpu_to_le32(0x0050494d) /* "MIP\0" */ +#define NFP_MIP_VERSION cpu_to_le32(1) +#define NFP_MIP_MAX_OFFSET (256 * 1024) + +struct nfp_mip { + __le32 signature; + __le32 mip_version; + __le32 mip_size; + __le32 first_entry; + + __le32 version; + __le32 buildnum; + __le32 buildtime; + __le32 loadtime; + + __le32 symtab_addr; + __le32 symtab_size; + __le32 strtab_addr; + __le32 strtab_size; + + char name[16]; + char toolchain[32]; +}; + +/* Read memory and check if it could be a valid MIP */ +static int +nfp_mip_try_read(struct nfp_cpp *cpp, u32 cpp_id, u64 addr, struct nfp_mip *mip) +{ + int ret; + + ret = nfp_cpp_read(cpp, cpp_id, addr, mip, sizeof(*mip)); + if (ret != sizeof(*mip)) { + nfp_err(cpp, "Failed to read MIP data (%d, %zu)\n", + ret, sizeof(*mip)); + return -EIO; + } + if (mip->signature != NFP_MIP_SIGNATURE) { + nfp_warn(cpp, "Incorrect MIP signature (0x%08x)\n", + le32_to_cpu(mip->signature)); + return -EINVAL; + } + if (mip->mip_version != NFP_MIP_VERSION) { + nfp_warn(cpp, "Unsupported MIP version (%d)\n", + le32_to_cpu(mip->mip_version)); + return -EINVAL; + } + + return 0; +} + +/* Try to locate MIP using the resource table */ +static int nfp_mip_read_resource(struct nfp_cpp *cpp, struct nfp_mip *mip) +{ + struct nfp_nffw_info *nffw_info; + u32 cpp_id; + u64 addr; + int err; + + nffw_info = nfp_nffw_info_open(cpp); + if (IS_ERR(nffw_info)) + return PTR_ERR(nffw_info); + + err = nfp_nffw_info_mip_first(nffw_info, &cpp_id, &addr); + if (err) + goto exit_close_nffw; + + err = nfp_mip_try_read(cpp, cpp_id, addr, mip); +exit_close_nffw: + nfp_nffw_info_close(nffw_info); + return err; +} + +/** + * nfp_mip_open() - Get device MIP structure + * @cpp: NFP CPP Handle + * + * Copy MIP structure from NFP device and return it. The returned + * structure is handled internally by the library and should be + * freed by calling nfp_mip_close(). + * + * Return: pointer to mip, NULL on failure. + */ +const struct nfp_mip *nfp_mip_open(struct nfp_cpp *cpp) +{ + struct nfp_mip *mip; + int err; + + mip = kmalloc(sizeof(*mip), GFP_KERNEL); + if (!mip) + return NULL; + + err = nfp_mip_read_resource(cpp, mip); + if (err) { + kfree(mip); + return NULL; + } + + return mip; +} + +void nfp_mip_close(const struct nfp_mip *mip) +{ + kfree(mip); +} + +/** + * nfp_mip_symtab() - Get the address and size of the MIP symbol table + * @mip: MIP handle + * @addr: Location for NFP DDR address of MIP symbol table + * @size: Location for size of MIP symbol table + */ +void nfp_mip_symtab(const struct nfp_mip *mip, u32 *addr, u32 *size) +{ + *addr = le32_to_cpu(mip->symtab_addr); + *size = le32_to_cpu(mip->symtab_size); +} + +/** + * nfp_mip_strtab() - Get the address and size of the MIP symbol name table + * @mip: MIP handle + * @addr: Location for NFP DDR address of MIP symbol name table + * @size: Location for size of MIP symbol name table + */ +void nfp_mip_strtab(const struct nfp_mip *mip, u32 *addr, u32 *size) +{ + *addr = le32_to_cpu(mip->strtab_addr); + *size = le32_to_cpu(mip->strtab_size); +} diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c new file mode 100644 index 000000000000..cd34097b79f1 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c @@ -0,0 +1,323 @@ +/* + * Copyright (C) 2015-2017 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* + * nfp_nffw.c + * Authors: Jakub Kicinski <jakub.kicinski@netronome.com> + * Jason McMullan <jason.mcmullan@netronome.com> + * Francois H. Theron <francois.theron@netronome.com> + */ + +#include <linux/kernel.h> +#include <linux/slab.h> + +#include "nfp.h" +#include "nfp_cpp.h" +#include "nfp_nffw.h" +#include "nfp6000/nfp6000.h" + +/* Init-CSR owner IDs for firmware map to firmware IDs which start at 4. + * Lower IDs are reserved for target and loader IDs. + */ +#define NFFW_FWID_EXT 3 /* For active MEs that we didn't load. */ +#define NFFW_FWID_BASE 4 + +#define NFFW_FWID_ALL 255 + +/** + * NFFW_INFO_VERSION history: + * 0: This was never actually used (before versioning), but it refers to + * the previous struct which had FWINFO_CNT = MEINFO_CNT = 120 that later + * changed to 200. + * 1: First versioned struct, with + * FWINFO_CNT = 120 + * MEINFO_CNT = 120 + * 2: FWINFO_CNT = 200 + * MEINFO_CNT = 200 + */ +#define NFFW_INFO_VERSION_CURRENT 2 + +/* Enough for all current chip families */ +#define NFFW_MEINFO_CNT_V1 120 +#define NFFW_FWINFO_CNT_V1 120 +#define NFFW_MEINFO_CNT_V2 200 +#define NFFW_FWINFO_CNT_V2 200 + +/* Work in 32-bit words to make cross-platform endianness easier to handle */ + +/** nfp.nffw meinfo **/ +struct nffw_meinfo { + __le32 ctxmask__fwid__meid; +}; + +struct nffw_fwinfo { + __le32 loaded__mu_da__mip_off_hi; + __le32 mip_cppid; /* 0 means no MIP */ + __le32 mip_offset_lo; +}; + +struct nfp_nffw_info_v1 { + struct nffw_meinfo meinfo[NFFW_MEINFO_CNT_V1]; + struct nffw_fwinfo fwinfo[NFFW_FWINFO_CNT_V1]; +}; + +struct nfp_nffw_info_v2 { + struct nffw_meinfo meinfo[NFFW_MEINFO_CNT_V2]; + struct nffw_fwinfo fwinfo[NFFW_FWINFO_CNT_V2]; +}; + +/** Resource: nfp.nffw main **/ +struct nfp_nffw_info_data { + __le32 flags[2]; + union { + struct nfp_nffw_info_v1 v1; + struct nfp_nffw_info_v2 v2; + } info; +}; + +struct nfp_nffw_info { + struct nfp_cpp *cpp; + struct nfp_resource *res; + + struct nfp_nffw_info_data fwinf; +}; + +/* flg_info_version = flags[0]<27:16> + * This is a small version counter intended only to detect if the current + * implementation can read the current struct. Struct changes should be very + * rare and as such a 12-bit counter should cover large spans of time. By the + * time it wraps around, we don't expect to have 4096 versions of this struct + * to be in use at the same time. + */ +static u32 nffw_res_info_version_get(const struct nfp_nffw_info_data *res) +{ + return (le32_to_cpu(res->flags[0]) >> 16) & 0xfff; +} + +/* flg_init = flags[0]<0> */ +static u32 nffw_res_flg_init_get(const struct nfp_nffw_info_data *res) +{ + return (le32_to_cpu(res->flags[0]) >> 0) & 1; +} + +/* loaded = loaded__mu_da__mip_off_hi<31:31> */ +static u32 nffw_fwinfo_loaded_get(const struct nffw_fwinfo *fi) +{ + return (le32_to_cpu(fi->loaded__mu_da__mip_off_hi) >> 31) & 1; +} + +/* mip_cppid = mip_cppid */ +static u32 nffw_fwinfo_mip_cppid_get(const struct nffw_fwinfo *fi) +{ + return le32_to_cpu(fi->mip_cppid); +} + +/* loaded = loaded__mu_da__mip_off_hi<8:8> */ +static u32 nffw_fwinfo_mip_mu_da_get(const struct nffw_fwinfo *fi) +{ + return (le32_to_cpu(fi->loaded__mu_da__mip_off_hi) >> 8) & 1; +} + +/* mip_offset = (loaded__mu_da__mip_off_hi<7:0> << 8) | mip_offset_lo */ +static u64 nffw_fwinfo_mip_offset_get(const struct nffw_fwinfo *fi) +{ + u64 mip_off_hi = le32_to_cpu(fi->loaded__mu_da__mip_off_hi); + + return (mip_off_hi & 0xFF) << 32 | le32_to_cpu(fi->mip_offset_lo); +} + +#define NFP_IMB_TGTADDRESSMODECFG_MODE_of(_x) (((_x) >> 13) & 0x7) +#define NFP_IMB_TGTADDRESSMODECFG_ADDRMODE BIT(12) +#define NFP_IMB_TGTADDRESSMODECFG_ADDRMODE_32_BIT 0 +#define NFP_IMB_TGTADDRESSMODECFG_ADDRMODE_40_BIT BIT(12) + +static int nfp_mip_mu_locality_lsb(struct nfp_cpp *cpp) +{ + unsigned int mode, addr40; + u32 xpbaddr, imbcppat; + int err; + + /* Hardcoded XPB IMB Base, island 0 */ + xpbaddr = 0x000a0000 + NFP_CPP_TARGET_MU * 4; + err = nfp_xpb_readl(cpp, xpbaddr, &imbcppat); + if (err < 0) + return err; + + mode = NFP_IMB_TGTADDRESSMODECFG_MODE_of(imbcppat); + addr40 = !!(imbcppat & NFP_IMB_TGTADDRESSMODECFG_ADDRMODE); + + return nfp_cppat_mu_locality_lsb(mode, addr40); +} + +static unsigned int +nffw_res_fwinfos(struct nfp_nffw_info_data *fwinf, struct nffw_fwinfo **arr) +{ + /* For the this code, version 0 is most likely to be + * version 1 in this case. Since the kernel driver + * does not take responsibility for initialising the + * nfp.nffw resource, any previous code (CA firmware or + * userspace) that left the version 0 and did set + * the init flag is going to be version 1. + */ + switch (nffw_res_info_version_get(fwinf)) { + case 0: + case 1: + *arr = &fwinf->info.v1.fwinfo[0]; + return NFFW_FWINFO_CNT_V1; + case 2: + *arr = &fwinf->info.v2.fwinfo[0]; + return NFFW_FWINFO_CNT_V2; + default: + *arr = NULL; + return 0; + } +} + +/** + * nfp_nffw_info_open() - Acquire the lock on the NFFW table + * @cpp: NFP CPP handle + * + * Return: 0, or -ERRNO + */ +struct nfp_nffw_info *nfp_nffw_info_open(struct nfp_cpp *cpp) +{ + struct nfp_nffw_info_data *fwinf; + struct nfp_nffw_info *state; + u32 info_ver; + int err; + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return ERR_PTR(-ENOMEM); + + state->res = nfp_resource_acquire(cpp, NFP_RESOURCE_NFP_NFFW); + if (IS_ERR(state->res)) + goto err_free; + + fwinf = &state->fwinf; + + if (sizeof(*fwinf) > nfp_resource_size(state->res)) + goto err_release; + + err = nfp_cpp_read(cpp, nfp_resource_cpp_id(state->res), + nfp_resource_address(state->res), + fwinf, sizeof(*fwinf)); + if (err < sizeof(*fwinf)) + goto err_release; + + if (!nffw_res_flg_init_get(fwinf)) + goto err_release; + + info_ver = nffw_res_info_version_get(fwinf); + if (info_ver > NFFW_INFO_VERSION_CURRENT) + goto err_release; + + state->cpp = cpp; + return state; + +err_release: + nfp_resource_release(state->res); +err_free: + kfree(state); + return ERR_PTR(-EIO); +} + +/** + * nfp_nffw_info_release() - Release the lock on the NFFW table + * @state: NFP FW info state + * + * Return: 0, or -ERRNO + */ +void nfp_nffw_info_close(struct nfp_nffw_info *state) +{ + nfp_resource_release(state->res); + kfree(state); +} + +/** + * nfp_nffw_info_fwid_first() - Return the first firmware ID in the NFFW + * @state: NFP FW info state + * + * Return: First NFFW firmware info, NULL on failure + */ +static struct nffw_fwinfo *nfp_nffw_info_fwid_first(struct nfp_nffw_info *state) +{ + struct nffw_fwinfo *fwinfo; + unsigned int cnt, i; + + cnt = nffw_res_fwinfos(&state->fwinf, &fwinfo); + if (!cnt) + return NULL; + + for (i = 0; i < cnt; i++) + if (nffw_fwinfo_loaded_get(&fwinfo[i])) + return &fwinfo[i]; + + return NULL; +} + +/** + * nfp_nffw_info_mip_first() - Retrieve the location of the first FW's MIP + * @state: NFP FW info state + * @cpp_id: Pointer to the CPP ID of the MIP + * @off: Pointer to the CPP Address of the MIP + * + * Return: 0, or -ERRNO + */ +int nfp_nffw_info_mip_first(struct nfp_nffw_info *state, u32 *cpp_id, u64 *off) +{ + struct nffw_fwinfo *fwinfo; + + fwinfo = nfp_nffw_info_fwid_first(state); + if (!fwinfo) + return -EINVAL; + + *cpp_id = nffw_fwinfo_mip_cppid_get(fwinfo); + *off = nffw_fwinfo_mip_offset_get(fwinfo); + + if (nffw_fwinfo_mip_mu_da_get(fwinfo)) { + int locality_off; + + if (NFP_CPP_ID_TARGET_of(*cpp_id) != NFP_CPP_TARGET_MU) + return 0; + + locality_off = nfp_mip_mu_locality_lsb(state->cpp); + if (locality_off < 0) + return locality_off; + + *off &= ~(NFP_MU_ADDR_ACCESS_TYPE_MASK << locality_off); + *off |= NFP_MU_ADDR_ACCESS_TYPE_DIRECT << locality_off; + } + + return 0; +} diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h new file mode 100644 index 000000000000..988badd230d1 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h @@ -0,0 +1,95 @@ +/* + * Copyright (C) 2015-2017 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* + * nfp_nffw.h + * Authors: Jason McMullan <jason.mcmullan@netronome.com> + * Francois H. Theron <francois.theron@netronome.com> + */ + +#ifndef NFP_NFFW_H +#define NFP_NFFW_H + +/* Implemented in nfp_nffw.c */ + +struct nfp_nffw_info; + +struct nfp_nffw_info *nfp_nffw_info_open(struct nfp_cpp *cpp); +void nfp_nffw_info_close(struct nfp_nffw_info *state); +int nfp_nffw_info_mip_first(struct nfp_nffw_info *state, u32 *cpp_id, u64 *off); + +/* Implemented in nfp_mip.c */ + +struct nfp_mip; + +const struct nfp_mip *nfp_mip_open(struct nfp_cpp *cpp); +void nfp_mip_close(const struct nfp_mip *mip); + +void nfp_mip_symtab(const struct nfp_mip *mip, u32 *addr, u32 *size); +void nfp_mip_strtab(const struct nfp_mip *mip, u32 *addr, u32 *size); + +/* Implemented in nfp_rtsym.c */ + +#define NFP_RTSYM_TYPE_NONE 0 +#define NFP_RTSYM_TYPE_OBJECT 1 +#define NFP_RTSYM_TYPE_FUNCTION 2 +#define NFP_RTSYM_TYPE_ABS 3 + +#define NFP_RTSYM_TARGET_NONE 0 +#define NFP_RTSYM_TARGET_LMEM -1 +#define NFP_RTSYM_TARGET_EMU_CACHE -7 + +/** + * struct nfp_rtsym - RTSYM descriptor + * @name: Symbol name + * @addr: Address in the domain/target's address space + * @size: Size (in bytes) of the symbol + * @type: NFP_RTSYM_TYPE_* of the symbol + * @target: CPP Target identifier, or NFP_RTSYM_TARGET_* + * @domain: CPP Target Domain (island) + */ +struct nfp_rtsym { + const char *name; + u64 addr; + u64 size; + int type; + int target; + int domain; +}; + +int nfp_rtsym_count(struct nfp_cpp *cpp); +const struct nfp_rtsym *nfp_rtsym_get(struct nfp_cpp *cpp, int idx); +const struct nfp_rtsym *nfp_rtsym_lookup(struct nfp_cpp *cpp, const char *name); +u64 nfp_rtsym_read_le(struct nfp_cpp *cpp, const char *name, int *error); + +#endif /* NFP_NFFW_H */ diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c new file mode 100644 index 000000000000..34c50987c377 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c @@ -0,0 +1,426 @@ +/* + * Copyright (C) 2015-2017 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* + * nfp_nsp.c + * Author: Jakub Kicinski <jakub.kicinski@netronome.com> + * Jason McMullan <jason.mcmullan@netronome.com> + */ + +#include <linux/bitfield.h> +#include <linux/delay.h> +#include <linux/firmware.h> +#include <linux/kernel.h> +#include <linux/kthread.h> +#include <linux/sizes.h> +#include <linux/slab.h> + +#define NFP_SUBSYS "nfp_nsp" + +#include "nfp.h" +#include "nfp_cpp.h" + +/* Offsets relative to the CSR base */ +#define NSP_STATUS 0x00 +#define NSP_STATUS_MAGIC GENMASK_ULL(63, 48) +#define NSP_STATUS_MAJOR GENMASK_ULL(47, 44) +#define NSP_STATUS_MINOR GENMASK_ULL(43, 32) +#define NSP_STATUS_CODE GENMASK_ULL(31, 16) +#define NSP_STATUS_RESULT GENMASK_ULL(15, 8) +#define NSP_STATUS_BUSY BIT_ULL(0) + +#define NSP_COMMAND 0x08 +#define NSP_COMMAND_OPTION GENMASK_ULL(63, 32) +#define NSP_COMMAND_CODE GENMASK_ULL(31, 16) +#define NSP_COMMAND_START BIT_ULL(0) + +/* CPP address to retrieve the data from */ +#define NSP_BUFFER 0x10 +#define NSP_BUFFER_CPP GENMASK_ULL(63, 40) +#define NSP_BUFFER_PCIE GENMASK_ULL(39, 38) +#define NSP_BUFFER_ADDRESS GENMASK_ULL(37, 0) + +#define NSP_DFLT_BUFFER 0x18 + +#define NSP_DFLT_BUFFER_CONFIG 0x20 +#define NSP_DFLT_BUFFER_SIZE_MB GENMASK_ULL(7, 0) + +#define NSP_MAGIC 0xab10 +#define NSP_MAJOR 0 +#define NSP_MINOR (__MAX_SPCODE - 1) + +#define NSP_CODE_MAJOR GENMASK(15, 12) +#define NSP_CODE_MINOR GENMASK(11, 0) + +enum nfp_nsp_cmd { + SPCODE_NOOP = 0, /* No operation */ + SPCODE_SOFT_RESET = 1, /* Soft reset the NFP */ + SPCODE_FW_DEFAULT = 2, /* Load default (UNDI) FW */ + SPCODE_PHY_INIT = 3, /* Initialize the PHY */ + SPCODE_MAC_INIT = 4, /* Initialize the MAC */ + SPCODE_PHY_RXADAPT = 5, /* Re-run PHY RX Adaptation */ + SPCODE_FW_LOAD = 6, /* Load fw from buffer, len in option */ + SPCODE_ETH_RESCAN = 7, /* Rescan ETHs, write ETH_TABLE to buf */ + SPCODE_ETH_CONTROL = 8, /* Update media config from buffer */ + + __MAX_SPCODE, +}; + +struct nfp_nsp { + struct nfp_cpp *cpp; + struct nfp_resource *res; + struct { + u16 major; + u16 minor; + } ver; +}; + +static int nfp_nsp_check(struct nfp_nsp *state) +{ + struct nfp_cpp *cpp = state->cpp; + u64 nsp_status, reg; + u32 nsp_cpp; + int err; + + nsp_cpp = nfp_resource_cpp_id(state->res); + nsp_status = nfp_resource_address(state->res) + NSP_STATUS; + + err = nfp_cpp_readq(cpp, nsp_cpp, nsp_status, ®); + if (err < 0) + return err; + + if (FIELD_GET(NSP_STATUS_MAGIC, reg) != NSP_MAGIC) { + nfp_err(cpp, "Cannot detect NFP Service Processor\n"); + return -ENODEV; + } + + state->ver.major = FIELD_GET(NSP_STATUS_MAJOR, reg); + state->ver.minor = FIELD_GET(NSP_STATUS_MINOR, reg); + + if (state->ver.major != NSP_MAJOR || state->ver.minor < NSP_MINOR) { + nfp_err(cpp, "Unsupported ABI %hu.%hu\n", + state->ver.major, state->ver.minor); + return -EINVAL; + } + + if (reg & NSP_STATUS_BUSY) { + nfp_err(cpp, "Service processor busy!\n"); + return -EBUSY; + } + + return 0; +} + +/** + * nfp_nsp_open() - Prepare for communication and lock the NSP resource. + * @cpp: NFP CPP Handle + */ +struct nfp_nsp *nfp_nsp_open(struct nfp_cpp *cpp) +{ + struct nfp_resource *res; + struct nfp_nsp *state; + int err; + + res = nfp_resource_acquire(cpp, NFP_RESOURCE_NSP); + if (IS_ERR(res)) + return (void *)res; + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (!state) { + nfp_resource_release(res); + return ERR_PTR(-ENOMEM); + } + state->cpp = cpp; + state->res = res; + + err = nfp_nsp_check(state); + if (err) { + nfp_nsp_close(state); + return ERR_PTR(err); + } + + return state; +} + +/** + * nfp_nsp_close() - Clean up and unlock the NSP resource. + * @state: NFP SP state + */ +void nfp_nsp_close(struct nfp_nsp *state) +{ + nfp_resource_release(state->res); + kfree(state); +} + +u16 nfp_nsp_get_abi_ver_major(struct nfp_nsp *state) +{ + return state->ver.major; +} + +u16 nfp_nsp_get_abi_ver_minor(struct nfp_nsp *state) +{ + return state->ver.minor; +} + +static int +nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg, + u32 nsp_cpp, u64 addr, u64 mask, u64 val) +{ + const unsigned long wait_until = jiffies + 30 * HZ; + int err; + + for (;;) { + const unsigned long start_time = jiffies; + + err = nfp_cpp_readq(cpp, nsp_cpp, addr, reg); + if (err < 0) + return err; + + if ((*reg & mask) == val) + return 0; + + err = msleep_interruptible(100); + if (err) + return err; + + if (time_after(start_time, wait_until)) + return -ETIMEDOUT; + } +} + +/** + * nfp_nsp_command() - Execute a command on the NFP Service Processor + * @state: NFP SP state + * @code: NFP SP Command Code + * @option: NFP SP Command Argument + * @buff_cpp: NFP SP Buffer CPP Address info + * @buff_addr: NFP SP Buffer Host address + * + * Return: 0 for success with no result + * + * 1..255 for NSP completion with a result code + * + * -EAGAIN if the NSP is not yet present + * -ENODEV if the NSP is not a supported model + * -EBUSY if the NSP is stuck + * -EINTR if interrupted while waiting for completion + * -ETIMEDOUT if the NSP took longer than 30 seconds to complete + */ +static int nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option, + u32 buff_cpp, u64 buff_addr) +{ + u64 reg, nsp_base, nsp_buffer, nsp_status, nsp_command; + struct nfp_cpp *cpp = state->cpp; + u32 nsp_cpp; + int err; + + nsp_cpp = nfp_resource_cpp_id(state->res); + nsp_base = nfp_resource_address(state->res); + nsp_status = nsp_base + NSP_STATUS; + nsp_command = nsp_base + NSP_COMMAND; + nsp_buffer = nsp_base + NSP_BUFFER; + + err = nfp_nsp_check(state); + if (err) + return err; + + if (!FIELD_FIT(NSP_BUFFER_CPP, buff_cpp >> 8) || + !FIELD_FIT(NSP_BUFFER_ADDRESS, buff_addr)) { + nfp_err(cpp, "Host buffer out of reach %08x %016llx\n", + buff_cpp, buff_addr); + return -EINVAL; + } + + err = nfp_cpp_writeq(cpp, nsp_cpp, nsp_buffer, + FIELD_PREP(NSP_BUFFER_CPP, buff_cpp >> 8) | + FIELD_PREP(NSP_BUFFER_ADDRESS, buff_addr)); + if (err < 0) + return err; + + err = nfp_cpp_writeq(cpp, nsp_cpp, nsp_command, + FIELD_PREP(NSP_COMMAND_OPTION, option) | + FIELD_PREP(NSP_COMMAND_CODE, code) | + FIELD_PREP(NSP_COMMAND_START, 1)); + if (err < 0) + return err; + + /* Wait for NSP_COMMAND_START to go to 0 */ + err = nfp_nsp_wait_reg(cpp, ®, + nsp_cpp, nsp_command, NSP_COMMAND_START, 0); + if (err) { + nfp_err(cpp, "Error %d waiting for code 0x%04x to start\n", + err, code); + return err; + } + + /* Wait for NSP_STATUS_BUSY to go to 0 */ + err = nfp_nsp_wait_reg(cpp, ®, + nsp_cpp, nsp_status, NSP_STATUS_BUSY, 0); + if (err) { + nfp_err(cpp, "Error %d waiting for code 0x%04x to complete\n", + err, code); + return err; + } + + err = FIELD_GET(NSP_STATUS_RESULT, reg); + if (err) { + nfp_warn(cpp, "Result (error) code set: %d command: %d\n", + -err, code); + return -err; + } + + err = nfp_cpp_readq(cpp, nsp_cpp, nsp_command, ®); + if (err < 0) + return err; + + return FIELD_GET(NSP_COMMAND_OPTION, reg); +} + +static int nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option, + const void *in_buf, unsigned int in_size, + void *out_buf, unsigned int out_size) +{ + struct nfp_cpp *cpp = nsp->cpp; + unsigned int max_size; + u64 reg, cpp_buf; + int ret, err; + u32 cpp_id; + + if (nsp->ver.minor < 13) { + nfp_err(cpp, "NSP: Code 0x%04x with buffer not supported (ABI %hu.%hu)\n", + code, nsp->ver.major, nsp->ver.minor); + return -EOPNOTSUPP; + } + + err = nfp_cpp_readq(cpp, nfp_resource_cpp_id(nsp->res), + nfp_resource_address(nsp->res) + + NSP_DFLT_BUFFER_CONFIG, + ®); + if (err < 0) + return err; + + max_size = max(in_size, out_size); + if (FIELD_GET(NSP_DFLT_BUFFER_SIZE_MB, reg) * SZ_1M < max_size) { + nfp_err(cpp, "NSP: default buffer too small for command 0x%04x (%llu < %u)\n", + code, FIELD_GET(NSP_DFLT_BUFFER_SIZE_MB, reg) * SZ_1M, + max_size); + return -EINVAL; + } + + err = nfp_cpp_readq(cpp, nfp_resource_cpp_id(nsp->res), + nfp_resource_address(nsp->res) + + NSP_DFLT_BUFFER, + ®); + if (err < 0) + return err; + + cpp_id = FIELD_GET(NSP_BUFFER_CPP, reg) << 8; + cpp_buf = FIELD_GET(NSP_BUFFER_ADDRESS, reg); + + if (in_buf && in_size) { + err = nfp_cpp_write(cpp, cpp_id, cpp_buf, in_buf, in_size); + if (err < 0) + return err; + } + + ret = nfp_nsp_command(nsp, code, option, cpp_id, cpp_buf); + if (ret < 0) + return ret; + + if (out_buf && out_size) { + err = nfp_cpp_read(cpp, cpp_id, cpp_buf, out_buf, out_size); + if (err < 0) + return err; + } + + return ret; +} + +int nfp_nsp_wait(struct nfp_nsp *state) +{ + const unsigned long wait_until = jiffies + 30 * HZ; + int err; + + nfp_dbg(state->cpp, "Waiting for NSP to respond (30 sec max).\n"); + + for (;;) { + const unsigned long start_time = jiffies; + + err = nfp_nsp_command(state, SPCODE_NOOP, 0, 0, 0); + if (err != -EAGAIN) + break; + + err = msleep_interruptible(100); + if (err) + break; + + if (time_after(start_time, wait_until)) { + err = -ETIMEDOUT; + break; + } + } + if (err) + nfp_err(state->cpp, "NSP failed to respond %d\n", err); + + return err; +} + +int nfp_nsp_device_soft_reset(struct nfp_nsp *state) +{ + int err; + + err = nfp_nsp_command(state, SPCODE_SOFT_RESET, 0, 0, 0); + + nfp_nffw_cache_flush(state->cpp); + + return err; +} + +int nfp_nsp_load_fw(struct nfp_nsp *state, const struct firmware *fw) +{ + return nfp_nsp_command_buf(state, SPCODE_FW_LOAD, fw->size, fw->data, + fw->size, NULL, 0); +} + +int nfp_nsp_read_eth_table(struct nfp_nsp *state, void *buf, unsigned int size) +{ + return nfp_nsp_command_buf(state, SPCODE_ETH_RESCAN, size, NULL, 0, + buf, size); +} + +int nfp_nsp_write_eth_table(struct nfp_nsp *state, + const void *buf, unsigned int size) +{ + return nfp_nsp_command_buf(state, SPCODE_ETH_CONTROL, size, buf, size, + NULL, 0); +} diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c new file mode 100644 index 000000000000..1ece1f8ae4b3 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c @@ -0,0 +1,270 @@ +/* + * Copyright (C) 2015-2017 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* Authors: David Brunecz <david.brunecz@netronome.com> + * Jakub Kicinski <jakub.kicinski@netronome.com> + * Jason Mcmullan <jason.mcmullan@netronome.com> + */ + +#include <linux/bitfield.h> +#include <linux/ethtool.h> +#include <linux/if_ether.h> +#include <linux/kernel.h> +#include <linux/module.h> + +#include "nfp.h" +#include "nfp_nsp_eth.h" +#include "nfp6000/nfp6000.h" + +#define NSP_ETH_NBI_PORT_COUNT 24 +#define NSP_ETH_MAX_COUNT (2 * NSP_ETH_NBI_PORT_COUNT) +#define NSP_ETH_TABLE_SIZE (NSP_ETH_MAX_COUNT * \ + sizeof(struct eth_table_entry)) + +#define NSP_ETH_PORT_LANES GENMASK_ULL(3, 0) +#define NSP_ETH_PORT_INDEX GENMASK_ULL(15, 8) +#define NSP_ETH_PORT_LABEL GENMASK_ULL(53, 48) +#define NSP_ETH_PORT_PHYLABEL GENMASK_ULL(59, 54) + +#define NSP_ETH_PORT_LANES_MASK cpu_to_le64(NSP_ETH_PORT_LANES) + +#define NSP_ETH_STATE_ENABLED BIT_ULL(1) +#define NSP_ETH_STATE_TX_ENABLED BIT_ULL(2) +#define NSP_ETH_STATE_RX_ENABLED BIT_ULL(3) +#define NSP_ETH_STATE_RATE GENMASK_ULL(11, 8) + +#define NSP_ETH_CTRL_ENABLED BIT_ULL(1) +#define NSP_ETH_CTRL_TX_ENABLED BIT_ULL(2) +#define NSP_ETH_CTRL_RX_ENABLED BIT_ULL(3) + +enum nfp_eth_rate { + RATE_INVALID = 0, + RATE_10M, + RATE_100M, + RATE_1G, + RATE_10G, + RATE_25G, +}; + +struct eth_table_entry { + __le64 port; + __le64 state; + u8 mac_addr[6]; + u8 resv[2]; + __le64 control; +}; + +static unsigned int nfp_eth_rate(enum nfp_eth_rate rate) +{ + unsigned int rate_xlate[] = { + [RATE_INVALID] = 0, + [RATE_10M] = SPEED_10, + [RATE_100M] = SPEED_100, + [RATE_1G] = SPEED_1000, + [RATE_10G] = SPEED_10000, + [RATE_25G] = SPEED_25000, + }; + + if (rate >= ARRAY_SIZE(rate_xlate)) + return 0; + + return rate_xlate[rate]; +} + +static void nfp_eth_copy_mac_reverse(u8 *dst, const u8 *src) +{ + int i; + + for (i = 0; i < ETH_ALEN; i++) + dst[ETH_ALEN - i - 1] = src[i]; +} + +static void +nfp_eth_port_translate(const struct eth_table_entry *src, unsigned int index, + struct nfp_eth_table_port *dst) +{ + unsigned int rate; + u64 port, state; + + port = le64_to_cpu(src->port); + state = le64_to_cpu(src->state); + + dst->eth_index = FIELD_GET(NSP_ETH_PORT_INDEX, port); + dst->index = index; + dst->nbi = index / NSP_ETH_NBI_PORT_COUNT; + dst->base = index % NSP_ETH_NBI_PORT_COUNT; + dst->lanes = FIELD_GET(NSP_ETH_PORT_LANES, port); + + dst->enabled = FIELD_GET(NSP_ETH_STATE_ENABLED, state); + dst->tx_enabled = FIELD_GET(NSP_ETH_STATE_TX_ENABLED, state); + dst->rx_enabled = FIELD_GET(NSP_ETH_STATE_RX_ENABLED, state); + + rate = nfp_eth_rate(FIELD_GET(NSP_ETH_STATE_RATE, state)); + dst->speed = dst->lanes * rate; + + nfp_eth_copy_mac_reverse(dst->mac_addr, src->mac_addr); + + snprintf(dst->label, sizeof(dst->label) - 1, "%llu.%llu", + FIELD_GET(NSP_ETH_PORT_PHYLABEL, port), + FIELD_GET(NSP_ETH_PORT_LABEL, port)); +} + +/** + * nfp_eth_read_ports() - retrieve port information + * @cpp: NFP CPP handle + * + * Read the port information from the device. Returned structure should + * be freed with kfree() once no longer needed. + * + * Return: populated ETH table or NULL on error. + */ +struct nfp_eth_table *nfp_eth_read_ports(struct nfp_cpp *cpp) +{ + struct nfp_eth_table *ret; + struct nfp_nsp *nsp; + + nsp = nfp_nsp_open(cpp); + if (IS_ERR(nsp)) + return NULL; + + ret = __nfp_eth_read_ports(cpp, nsp); + nfp_nsp_close(nsp); + + return ret; +} + +struct nfp_eth_table * +__nfp_eth_read_ports(struct nfp_cpp *cpp, struct nfp_nsp *nsp) +{ + struct eth_table_entry *entries; + struct nfp_eth_table *table; + unsigned int cnt; + int i, j, ret; + + entries = kzalloc(NSP_ETH_TABLE_SIZE, GFP_KERNEL); + if (!entries) + return NULL; + + ret = nfp_nsp_read_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE); + if (ret < 0) { + nfp_err(cpp, "reading port table failed %d\n", ret); + kfree(entries); + return NULL; + } + + /* Some versions of flash will give us 0 instead of port count */ + cnt = ret; + if (!cnt) { + for (i = 0; i < NSP_ETH_MAX_COUNT; i++) + if (entries[i].port & NSP_ETH_PORT_LANES_MASK) + cnt++; + } + + table = kzalloc(sizeof(*table) + + sizeof(struct nfp_eth_table_port) * cnt, GFP_KERNEL); + if (!table) { + kfree(entries); + return NULL; + } + + table->count = cnt; + for (i = 0, j = 0; i < NSP_ETH_MAX_COUNT; i++) + if (entries[i].port & NSP_ETH_PORT_LANES_MASK) + nfp_eth_port_translate(&entries[i], i, + &table->ports[j++]); + + kfree(entries); + + return table; +} + +/** + * nfp_eth_set_mod_enable() - set PHY module enable control bit + * @cpp: NFP CPP handle + * @idx: NFP chip-wide port index + * @enable: Desired state + * + * Enable or disable PHY module (this usually means setting the TX lanes + * disable bits). + * + * Return: 0 or -ERRNO. + */ +int nfp_eth_set_mod_enable(struct nfp_cpp *cpp, unsigned int idx, bool enable) +{ + struct eth_table_entry *entries; + struct nfp_nsp *nsp; + u64 reg; + int ret; + + entries = kzalloc(NSP_ETH_TABLE_SIZE, GFP_KERNEL); + if (!entries) + return -ENOMEM; + + nsp = nfp_nsp_open(cpp); + if (IS_ERR(nsp)) { + kfree(entries); + return PTR_ERR(nsp); + } + + ret = nfp_nsp_read_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE); + if (ret < 0) { + nfp_err(cpp, "reading port table failed %d\n", ret); + goto exit_close_nsp; + } + + if (!(entries[idx].port & NSP_ETH_PORT_LANES_MASK)) { + nfp_warn(cpp, "trying to set port state on disabled port %d\n", + idx); + ret = -EINVAL; + goto exit_close_nsp; + } + + /* Check if we are already in requested state */ + reg = le64_to_cpu(entries[idx].state); + if (enable == FIELD_GET(NSP_ETH_CTRL_ENABLED, reg)) { + ret = 0; + goto exit_close_nsp; + } + + reg = le64_to_cpu(entries[idx].control); + reg &= ~NSP_ETH_CTRL_ENABLED; + reg |= FIELD_PREP(NSP_ETH_CTRL_ENABLED, enable); + entries[idx].control = cpu_to_le64(reg); + + ret = nfp_nsp_write_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE); +exit_close_nsp: + nfp_nsp_close(nsp); + kfree(entries); + + return ret < 0 ? ret : 0; +} diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.h new file mode 100644 index 000000000000..edf703d319c8 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.h @@ -0,0 +1,81 @@ +/* + * Copyright (C) 2015-2017 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef NSP_NSP_ETH_H +#define NSP_NSP_ETH_H 1 + +#include <linux/types.h> +#include <linux/if_ether.h> + +/** + * struct nfp_eth_table - ETH table information + * @count: number of table entries + * @ports: table of ports + * + * @eth_index: port index according to legacy ethX numbering + * @index: chip-wide first channel index + * @nbi: NBI index + * @base: first channel index (within NBI) + * @lanes: number of channels + * @speed: interface speed (in Mbps) + * @mac_addr: interface MAC address + * @label: interface id string + * @enabled: is enabled? + * @tx_enabled: is TX enabled? + * @rx_enabled: is RX enabled? + */ +struct nfp_eth_table { + unsigned int count; + struct nfp_eth_table_port { + unsigned int eth_index; + unsigned int index; + unsigned int nbi; + unsigned int base; + unsigned int lanes; + unsigned int speed; + + u8 mac_addr[ETH_ALEN]; + char label[8]; + + bool enabled; + bool tx_enabled; + bool rx_enabled; + } ports[0]; +}; + +struct nfp_eth_table *nfp_eth_read_ports(struct nfp_cpp *cpp); +struct nfp_eth_table * +__nfp_eth_read_ports(struct nfp_cpp *cpp, struct nfp_nsp *nsp); +int nfp_eth_set_mod_enable(struct nfp_cpp *cpp, unsigned int idx, bool enable); + +#endif diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c new file mode 100644 index 000000000000..a2850344f8b4 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c @@ -0,0 +1,279 @@ +/* + * Copyright (C) 2015-2017 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* + * nfp_resource.c + * Author: Jakub Kicinski <jakub.kicinski@netronome.com> + * Jason McMullan <jason.mcmullan@netronome.com> + */ +#include <linux/delay.h> +#include <linux/kernel.h> +#include <linux/slab.h> + +#include "crc32.h" +#include "nfp.h" +#include "nfp_cpp.h" +#include "nfp6000/nfp6000.h" + +#define NFP_RESOURCE_ENTRY_NAME_SZ 8 + +/** + * struct nfp_resource_entry - Resource table entry + * @owner: NFP CPP Lock, interface owner + * @key: NFP CPP Lock, posix_crc32(name, 8) + * @region: Memory region descriptor + * @name: ASCII, zero padded name + * @reserved + * @cpp_action: CPP Action + * @cpp_token: CPP Token + * @cpp_target: CPP Target ID + * @page_offset: 256-byte page offset into target's CPP address + * @page_size: size, in 256-byte pages + */ +struct nfp_resource_entry { + struct nfp_resource_entry_mutex { + u32 owner; + u32 key; + } mutex; + struct nfp_resource_entry_region { + u8 name[NFP_RESOURCE_ENTRY_NAME_SZ]; + u8 reserved[5]; + u8 cpp_action; + u8 cpp_token; + u8 cpp_target; + u32 page_offset; + u32 page_size; + } region; +}; + +#define NFP_RESOURCE_TBL_SIZE 4096 +#define NFP_RESOURCE_TBL_ENTRIES (NFP_RESOURCE_TBL_SIZE / \ + sizeof(struct nfp_resource_entry)) + +struct nfp_resource { + char name[NFP_RESOURCE_ENTRY_NAME_SZ + 1]; + u32 cpp_id; + u64 addr; + u64 size; + struct nfp_cpp_mutex *mutex; +}; + +static int nfp_cpp_resource_find(struct nfp_cpp *cpp, struct nfp_resource *res) +{ + char name_pad[NFP_RESOURCE_ENTRY_NAME_SZ] = {}; + struct nfp_resource_entry entry; + u32 cpp_id, key; + int ret, i; + + cpp_id = NFP_CPP_ID(NFP_RESOURCE_TBL_TARGET, 3, 0); /* Atomic read */ + + strncpy(name_pad, res->name, sizeof(name_pad)); + + /* Search for a matching entry */ + key = NFP_RESOURCE_TBL_KEY; + if (memcmp(name_pad, NFP_RESOURCE_TBL_NAME "\0\0\0\0\0\0\0\0", 8)) + key = crc32_posix(name_pad, sizeof(name_pad)); + + for (i = 0; i < NFP_RESOURCE_TBL_ENTRIES; i++) { + u64 addr = NFP_RESOURCE_TBL_BASE + + sizeof(struct nfp_resource_entry) * i; + + ret = nfp_cpp_read(cpp, cpp_id, addr, &entry, sizeof(entry)); + if (ret != sizeof(entry)) + return -EIO; + + if (entry.mutex.key != key) + continue; + + /* Found key! */ + res->mutex = + nfp_cpp_mutex_alloc(cpp, + NFP_RESOURCE_TBL_TARGET, addr, key); + res->cpp_id = NFP_CPP_ID(entry.region.cpp_target, + entry.region.cpp_action, + entry.region.cpp_token); + res->addr = (u64)entry.region.page_offset << 8; + res->size = (u64)entry.region.page_size << 8; + + return 0; + } + + return -ENOENT; +} + +static int +nfp_resource_try_acquire(struct nfp_cpp *cpp, struct nfp_resource *res, + struct nfp_cpp_mutex *dev_mutex) +{ + int err; + + if (nfp_cpp_mutex_lock(dev_mutex)) + return -EINVAL; + + err = nfp_cpp_resource_find(cpp, res); + if (err) + goto err_unlock_dev; + + err = nfp_cpp_mutex_trylock(res->mutex); + if (err) + goto err_res_mutex_free; + + nfp_cpp_mutex_unlock(dev_mutex); + + return 0; + +err_res_mutex_free: + nfp_cpp_mutex_free(res->mutex); +err_unlock_dev: + nfp_cpp_mutex_unlock(dev_mutex); + + return err; +} + +/** + * nfp_resource_acquire() - Acquire a resource handle + * @cpp: NFP CPP handle + * @name: Name of the resource + * + * NOTE: This function locks the acquired resource + * + * Return: NFP Resource handle, or ERR_PTR() + */ +struct nfp_resource * +nfp_resource_acquire(struct nfp_cpp *cpp, const char *name) +{ + unsigned long warn_at = jiffies + 15 * HZ; + struct nfp_cpp_mutex *dev_mutex; + struct nfp_resource *res; + int err; + + res = kzalloc(sizeof(*res), GFP_KERNEL); + if (!res) + return ERR_PTR(-ENOMEM); + + strncpy(res->name, name, NFP_RESOURCE_ENTRY_NAME_SZ); + + dev_mutex = nfp_cpp_mutex_alloc(cpp, NFP_RESOURCE_TBL_TARGET, + NFP_RESOURCE_TBL_BASE, + NFP_RESOURCE_TBL_KEY); + if (!dev_mutex) { + kfree(res); + return ERR_PTR(-ENOMEM); + } + + for (;;) { + err = nfp_resource_try_acquire(cpp, res, dev_mutex); + if (!err) + break; + if (err != -EBUSY) + goto err_free; + + err = msleep_interruptible(1); + if (err != 0) { + err = -ERESTARTSYS; + goto err_free; + } + + if (time_is_before_eq_jiffies(warn_at)) { + warn_at = jiffies + 60 * HZ; + nfp_warn(cpp, "Warning: waiting for NFP resource %s\n", + name); + } + } + + nfp_cpp_mutex_free(dev_mutex); + + return res; + +err_free: + nfp_cpp_mutex_free(dev_mutex); + kfree(res); + return ERR_PTR(err); +} + +/** + * nfp_resource_release() - Release a NFP Resource handle + * @res: NFP Resource handle + * + * NOTE: This function implictly unlocks the resource handle + */ +void nfp_resource_release(struct nfp_resource *res) +{ + nfp_cpp_mutex_unlock(res->mutex); + nfp_cpp_mutex_free(res->mutex); + kfree(res); +} + +/** + * nfp_resource_cpp_id() - Return the cpp_id of a resource handle + * @res: NFP Resource handle + * + * Return: NFP CPP ID + */ +u32 nfp_resource_cpp_id(struct nfp_resource *res) +{ + return res->cpp_id; +} + +/** + * nfp_resource_name() - Return the name of a resource handle + * @res: NFP Resource handle + * + * Return: const char pointer to the name of the resource + */ +const char *nfp_resource_name(struct nfp_resource *res) +{ + return res->name; +} + +/** + * nfp_resource_address() - Return the address of a resource handle + * @res: NFP Resource handle + * + * Return: Address of the resource + */ +u64 nfp_resource_address(struct nfp_resource *res) +{ + return res->addr; +} + +/** + * nfp_resource_size() - Return the size in bytes of a resource handle + * @res: NFP Resource handle + * + * Return: Size of the resource in bytes + */ +u64 nfp_resource_size(struct nfp_resource *res) +{ + return res->size; +} diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c new file mode 100644 index 000000000000..0e3870ecfb8c --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c @@ -0,0 +1,306 @@ +/* + * Copyright (C) 2015-2017 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* + * nfp_rtsym.c + * Interface for accessing run-time symbol table + * Authors: Jakub Kicinski <jakub.kicinski@netronome.com> + * Jason McMullan <jason.mcmullan@netronome.com> + * Espen Skoglund <espen.skoglund@netronome.com> + * Francois H. Theron <francois.theron@netronome.com> + */ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/io-64-nonatomic-hi-lo.h> + +#include "nfp.h" +#include "nfp_cpp.h" +#include "nfp_nffw.h" +#include "nfp6000/nfp6000.h" + +/* These need to match the linker */ +#define SYM_TGT_LMEM 0 +#define SYM_TGT_EMU_CACHE 0x17 + +struct nfp_rtsym_entry { + u8 type; + u8 target; + u8 island; + u8 addr_hi; + __le32 addr_lo; + __le16 name; + u8 menum; + u8 size_hi; + __le32 size_lo; +}; + +struct nfp_rtsym_cache { + int num; + char *strtab; + struct nfp_rtsym symtab[]; +}; + +static int nfp_meid(u8 island_id, u8 menum) +{ + return (island_id & 0x3F) == island_id && menum < 12 ? + (island_id << 4) | (menum + 4) : -1; +} + +static void +nfp_rtsym_sw_entry_init(struct nfp_rtsym_cache *cache, u32 strtab_size, + struct nfp_rtsym *sw, struct nfp_rtsym_entry *fw) +{ + sw->type = fw->type; + sw->name = cache->strtab + le16_to_cpu(fw->name) % strtab_size; + sw->addr = ((u64)fw->addr_hi << 32) | le32_to_cpu(fw->addr_lo); + sw->size = ((u64)fw->size_hi << 32) | le32_to_cpu(fw->size_lo); + + switch (fw->target) { + case SYM_TGT_LMEM: + sw->target = NFP_RTSYM_TARGET_LMEM; + break; + case SYM_TGT_EMU_CACHE: + sw->target = NFP_RTSYM_TARGET_EMU_CACHE; + break; + default: + sw->target = fw->target; + break; + } + + if (fw->menum != 0xff) + sw->domain = nfp_meid(fw->island, fw->menum); + else if (fw->island != 0xff) + sw->domain = fw->island; + else + sw->domain = -1; +} + +static int nfp_rtsymtab_probe(struct nfp_cpp *cpp) +{ + const u32 dram = NFP_CPP_ID(NFP_CPP_TARGET_MU, NFP_CPP_ACTION_RW, 0) | + NFP_ISL_EMEM0; + u32 strtab_addr, symtab_addr, strtab_size, symtab_size; + struct nfp_rtsym_entry *rtsymtab; + struct nfp_rtsym_cache *cache; + const struct nfp_mip *mip; + int err, n, size; + + mip = nfp_mip_open(cpp); + if (!mip) + return -EIO; + + nfp_mip_strtab(mip, &strtab_addr, &strtab_size); + nfp_mip_symtab(mip, &symtab_addr, &symtab_size); + nfp_mip_close(mip); + + if (!symtab_size || !strtab_size || symtab_size % sizeof(*rtsymtab)) + return -ENXIO; + + /* Align to 64 bits */ + symtab_size = round_up(symtab_size, 8); + strtab_size = round_up(strtab_size, 8); + + rtsymtab = kmalloc(symtab_size, GFP_KERNEL); + if (!rtsymtab) + return -ENOMEM; + + size = sizeof(*cache); + size += symtab_size / sizeof(*rtsymtab) * sizeof(struct nfp_rtsym); + size += strtab_size + 1; + cache = kmalloc(size, GFP_KERNEL); + if (!cache) { + err = -ENOMEM; + goto err_free_rtsym_raw; + } + + cache->num = symtab_size / sizeof(*rtsymtab); + cache->strtab = (void *)&cache->symtab[cache->num]; + + err = nfp_cpp_read(cpp, dram, symtab_addr, rtsymtab, symtab_size); + if (err != symtab_size) + goto err_free_cache; + + err = nfp_cpp_read(cpp, dram, strtab_addr, cache->strtab, strtab_size); + if (err != strtab_size) + goto err_free_cache; + cache->strtab[strtab_size] = '\0'; + + for (n = 0; n < cache->num; n++) + nfp_rtsym_sw_entry_init(cache, strtab_size, + &cache->symtab[n], &rtsymtab[n]); + + kfree(rtsymtab); + nfp_rtsym_cache_set(cpp, cache); + return 0; + +err_free_cache: + kfree(cache); +err_free_rtsym_raw: + kfree(rtsymtab); + return err; +} + +static struct nfp_rtsym_cache *nfp_rtsym(struct nfp_cpp *cpp) +{ + struct nfp_rtsym_cache *cache; + int err; + + cache = nfp_rtsym_cache(cpp); + if (cache) + return cache; + + err = nfp_rtsymtab_probe(cpp); + if (err < 0) + return ERR_PTR(err); + + return nfp_rtsym_cache(cpp); +} + +/** + * nfp_rtsym_count() - Get the number of RTSYM descriptors + * @cpp: NFP CPP handle + * + * Return: Number of RTSYM descriptors, or -ERRNO + */ +int nfp_rtsym_count(struct nfp_cpp *cpp) +{ + struct nfp_rtsym_cache *cache; + + cache = nfp_rtsym(cpp); + if (IS_ERR(cache)) + return PTR_ERR(cache); + + return cache->num; +} + +/** + * nfp_rtsym_get() - Get the Nth RTSYM descriptor + * @cpp: NFP CPP handle + * @idx: Index (0-based) of the RTSYM descriptor + * + * Return: const pointer to a struct nfp_rtsym descriptor, or NULL + */ +const struct nfp_rtsym *nfp_rtsym_get(struct nfp_cpp *cpp, int idx) +{ + struct nfp_rtsym_cache *cache; + + cache = nfp_rtsym(cpp); + if (IS_ERR(cache)) + return NULL; + + if (idx >= cache->num) + return NULL; + + return &cache->symtab[idx]; +} + +/** + * nfp_rtsym_lookup() - Return the RTSYM descriptor for a symbol name + * @cpp: NFP CPP handle + * @name: Symbol name + * + * Return: const pointer to a struct nfp_rtsym descriptor, or NULL + */ +const struct nfp_rtsym *nfp_rtsym_lookup(struct nfp_cpp *cpp, const char *name) +{ + struct nfp_rtsym_cache *cache; + int n; + + cache = nfp_rtsym(cpp); + if (IS_ERR(cache)) + return NULL; + + for (n = 0; n < cache->num; n++) { + if (strcmp(name, cache->symtab[n].name) == 0) + return &cache->symtab[n]; + } + + return NULL; +} + +/** + * nfp_rtsym_read_le() - Read a simple unsigned scalar value from symbol + * @cpp: NFP CPP handle + * @name: Symbol name + * @error: Poniter to error code (optional) + * + * Lookup a symbol, map, read it and return it's value. Value of the symbol + * will be interpreted as a simple little-endian unsigned value. Symbol can + * be 4 or 8 bytes in size. + * + * Return: value read, on error sets the error and returns ~0ULL. + */ +u64 nfp_rtsym_read_le(struct nfp_cpp *cpp, const char *name, int *error) +{ + const struct nfp_rtsym *sym; + u32 val32, id; + u64 val; + int err; + + sym = nfp_rtsym_lookup(cpp, name); + if (!sym) { + err = -ENOENT; + goto exit; + } + + id = NFP_CPP_ISLAND_ID(sym->target, NFP_CPP_ACTION_RW, 0, sym->domain); + + switch (sym->size) { + case 4: + err = nfp_cpp_readl(cpp, id, sym->addr, &val32); + val = val32; + break; + case 8: + err = nfp_cpp_readq(cpp, id, sym->addr, &val); + break; + default: + nfp_err(cpp, + "rtsym '%s' unsupported or non-scalar size: %lld\n", + name, sym->size); + err = -EINVAL; + break; + } + + if (err == sym->size) + err = 0; + else if (err >= 0) + err = -EIO; +exit: + if (error) + *error = err; + + if (err) + return ~0ULL; + return val; +} diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_target.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_target.c new file mode 100644 index 000000000000..4ea1e585d945 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_target.c @@ -0,0 +1,764 @@ +/* + * Copyright (C) 2015-2017 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* + * nfp_target.c + * CPP Access Width Decoder + * Authors: Jakub Kicinski <jakub.kicinski@netronome.com> + * Jason McMullan <jason.mcmullan@netronome.com> + * Francois H. Theron <francois.theron@netronome.com> + */ + +#include <linux/bitops.h> + +#include "nfp_cpp.h" + +#include "nfp6000/nfp6000.h" + +#define P32 1 +#define P64 2 + +/* This structure ONLY includes items that can be done with a read or write of + * 32-bit or 64-bit words. All others are not listed. + */ + +#define AT(_action, _token, _pull, _push) \ + case NFP_CPP_ID(0, (_action), (_token)): \ + return PUSHPULL((_pull), (_push)) + +static int target_rw(u32 cpp_id, int pp, int start, int len) +{ + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + AT(0, 0, 0, pp); + AT(1, 0, pp, 0); + AT(NFP_CPP_ACTION_RW, 0, pp, pp); + default: + return -EINVAL; + } +} + +static int nfp6000_nbi_dma(u32 cpp_id) +{ + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + AT(0, 0, 0, P64); /* ReadNbiDma */ + AT(1, 0, P64, 0); /* WriteNbiDma */ + AT(NFP_CPP_ACTION_RW, 0, P64, P64); + default: + return -EINVAL; + } +} + +static int nfp6000_nbi_stats(u32 cpp_id) +{ + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + AT(0, 0, 0, P32); /* ReadNbiStats */ + AT(1, 0, P32, 0); /* WriteNbiStats */ + AT(NFP_CPP_ACTION_RW, 0, P32, P32); + default: + return -EINVAL; + } +} + +static int nfp6000_nbi_tm(u32 cpp_id) +{ + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + AT(0, 0, 0, P64); /* ReadNbiTM */ + AT(1, 0, P64, 0); /* WriteNbiTM */ + AT(NFP_CPP_ACTION_RW, 0, P64, P64); + default: + return -EINVAL; + } +} + +static int nfp6000_nbi_ppc(u32 cpp_id) +{ + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + AT(0, 0, 0, P64); /* ReadNbiPreclassifier */ + AT(1, 0, P64, 0); /* WriteNbiPreclassifier */ + AT(NFP_CPP_ACTION_RW, 0, P64, P64); + default: + return -EINVAL; + } +} + +static int nfp6000_nbi(u32 cpp_id, u64 address) +{ + u64 rel_addr = address & 0x3fFFFF; + + if (rel_addr < (1 << 20)) + return nfp6000_nbi_dma(cpp_id); + if (rel_addr < (2 << 20)) + return nfp6000_nbi_stats(cpp_id); + if (rel_addr < (3 << 20)) + return nfp6000_nbi_tm(cpp_id); + return nfp6000_nbi_ppc(cpp_id); +} + +/* This structure ONLY includes items that can be done with a read or write of + * 32-bit or 64-bit words. All others are not listed. + */ +static int nfp6000_mu_common(u32 cpp_id) +{ + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + AT(NFP_CPP_ACTION_RW, 0, P64, P64); /* read_be/write_be */ + AT(NFP_CPP_ACTION_RW, 1, P64, P64); /* read_le/write_le */ + AT(NFP_CPP_ACTION_RW, 2, P64, P64); /* read_swap_be/write_swap_be */ + AT(NFP_CPP_ACTION_RW, 3, P64, P64); /* read_swap_le/write_swap_le */ + AT(0, 0, 0, P64); /* read_be */ + AT(0, 1, 0, P64); /* read_le */ + AT(0, 2, 0, P64); /* read_swap_be */ + AT(0, 3, 0, P64); /* read_swap_le */ + AT(1, 0, P64, 0); /* write_be */ + AT(1, 1, P64, 0); /* write_le */ + AT(1, 2, P64, 0); /* write_swap_be */ + AT(1, 3, P64, 0); /* write_swap_le */ + AT(3, 0, 0, P32); /* atomic_read */ + AT(3, 2, P32, 0); /* mask_compare_write */ + AT(4, 0, P32, 0); /* atomic_write */ + AT(4, 2, 0, 0); /* atomic_write_imm */ + AT(4, 3, 0, P32); /* swap_imm */ + AT(5, 0, P32, 0); /* set */ + AT(5, 3, 0, P32); /* test_set_imm */ + AT(6, 0, P32, 0); /* clr */ + AT(6, 3, 0, P32); /* test_clr_imm */ + AT(7, 0, P32, 0); /* add */ + AT(7, 3, 0, P32); /* test_add_imm */ + AT(8, 0, P32, 0); /* addsat */ + AT(8, 3, 0, P32); /* test_subsat_imm */ + AT(9, 0, P32, 0); /* sub */ + AT(9, 3, 0, P32); /* test_sub_imm */ + AT(10, 0, P32, 0); /* subsat */ + AT(10, 3, 0, P32); /* test_subsat_imm */ + AT(13, 0, 0, P32); /* microq128_get */ + AT(13, 1, 0, P32); /* microq128_pop */ + AT(13, 2, P32, 0); /* microq128_put */ + AT(15, 0, P32, 0); /* xor */ + AT(15, 3, 0, P32); /* test_xor_imm */ + AT(28, 0, 0, P32); /* read32_be */ + AT(28, 1, 0, P32); /* read32_le */ + AT(28, 2, 0, P32); /* read32_swap_be */ + AT(28, 3, 0, P32); /* read32_swap_le */ + AT(31, 0, P32, 0); /* write32_be */ + AT(31, 1, P32, 0); /* write32_le */ + AT(31, 2, P32, 0); /* write32_swap_be */ + AT(31, 3, P32, 0); /* write32_swap_le */ + default: + return -EINVAL; + } +} + +static int nfp6000_mu_ctm(u32 cpp_id) +{ + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + AT(16, 1, 0, P32); /* packet_read_packet_status */ + AT(17, 1, 0, P32); /* packet_credit_get */ + AT(17, 3, 0, P64); /* packet_add_thread */ + AT(18, 2, 0, P64); /* packet_free_and_return_pointer */ + AT(18, 3, 0, P64); /* packet_return_pointer */ + AT(21, 0, 0, P64); /* pe_dma_to_memory_indirect */ + AT(21, 1, 0, P64); /* pe_dma_to_memory_indirect_swap */ + AT(21, 2, 0, P64); /* pe_dma_to_memory_indirect_free */ + AT(21, 3, 0, P64); /* pe_dma_to_memory_indirect_free_swap */ + default: + return nfp6000_mu_common(cpp_id); + } +} + +static int nfp6000_mu_emu(u32 cpp_id) +{ + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + AT(18, 0, 0, P32); /* read_queue */ + AT(18, 1, 0, P32); /* read_queue_ring */ + AT(18, 2, P32, 0); /* write_queue */ + AT(18, 3, P32, 0); /* write_queue_ring */ + AT(20, 2, P32, 0); /* journal */ + AT(21, 0, 0, P32); /* get */ + AT(21, 1, 0, P32); /* get_eop */ + AT(21, 2, 0, P32); /* get_freely */ + AT(22, 0, 0, P32); /* pop */ + AT(22, 1, 0, P32); /* pop_eop */ + AT(22, 2, 0, P32); /* pop_freely */ + default: + return nfp6000_mu_common(cpp_id); + } +} + +static int nfp6000_mu_imu(u32 cpp_id) +{ + return nfp6000_mu_common(cpp_id); +} + +static int nfp6000_mu(u32 cpp_id, u64 address) +{ + int pp; + + if (address < 0x2000000000ULL) + pp = nfp6000_mu_ctm(cpp_id); + else if (address < 0x8000000000ULL) + pp = nfp6000_mu_emu(cpp_id); + else if (address < 0x9800000000ULL) + pp = nfp6000_mu_ctm(cpp_id); + else if (address < 0x9C00000000ULL) + pp = nfp6000_mu_emu(cpp_id); + else if (address < 0xA000000000ULL) + pp = nfp6000_mu_imu(cpp_id); + else + pp = nfp6000_mu_ctm(cpp_id); + + return pp; +} + +static int nfp6000_ila(u32 cpp_id) +{ + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + AT(0, 1, 0, P32); /* read_check_error */ + AT(2, 0, 0, P32); /* read_int */ + AT(3, 0, P32, 0); /* write_int */ + default: + return target_rw(cpp_id, P32, 48, 4); + } +} + +static int nfp6000_pci(u32 cpp_id) +{ + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + AT(2, 0, 0, P32); + AT(3, 0, P32, 0); + default: + return target_rw(cpp_id, P32, 4, 4); + } +} + +static int nfp6000_crypto(u32 cpp_id) +{ + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + AT(2, 0, P64, 0); + default: + return target_rw(cpp_id, P64, 12, 4); + } +} + +static int nfp6000_cap_xpb(u32 cpp_id) +{ + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + AT(0, 1, 0, P32); /* RingGet */ + AT(0, 2, P32, 0); /* Interthread Signal */ + AT(1, 1, P32, 0); /* RingPut */ + AT(1, 2, P32, 0); /* CTNNWr */ + AT(2, 0, 0, P32); /* ReflectRd, signal none */ + AT(2, 1, 0, P32); /* ReflectRd, signal self */ + AT(2, 2, 0, P32); /* ReflectRd, signal remote */ + AT(2, 3, 0, P32); /* ReflectRd, signal both */ + AT(3, 0, P32, 0); /* ReflectWr, signal none */ + AT(3, 1, P32, 0); /* ReflectWr, signal self */ + AT(3, 2, P32, 0); /* ReflectWr, signal remote */ + AT(3, 3, P32, 0); /* ReflectWr, signal both */ + AT(NFP_CPP_ACTION_RW, 1, P32, P32); + default: + return target_rw(cpp_id, P32, 1, 63); + } +} + +static int nfp6000_cls(u32 cpp_id) +{ + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + AT(0, 3, P32, 0); /* xor */ + AT(2, 0, P32, 0); /* set */ + AT(2, 1, P32, 0); /* clr */ + AT(4, 0, P32, 0); /* add */ + AT(4, 1, P32, 0); /* add64 */ + AT(6, 0, P32, 0); /* sub */ + AT(6, 1, P32, 0); /* sub64 */ + AT(6, 2, P32, 0); /* subsat */ + AT(8, 2, P32, 0); /* hash_mask */ + AT(8, 3, P32, 0); /* hash_clear */ + AT(9, 0, 0, P32); /* ring_get */ + AT(9, 1, 0, P32); /* ring_pop */ + AT(9, 2, 0, P32); /* ring_get_freely */ + AT(9, 3, 0, P32); /* ring_pop_freely */ + AT(10, 0, P32, 0); /* ring_put */ + AT(10, 2, P32, 0); /* ring_journal */ + AT(14, 0, P32, 0); /* reflect_write_sig_local */ + AT(15, 1, 0, P32); /* reflect_read_sig_local */ + AT(17, 2, P32, 0); /* statisic */ + AT(24, 0, 0, P32); /* ring_read */ + AT(24, 1, P32, 0); /* ring_write */ + AT(25, 0, 0, P32); /* ring_workq_add_thread */ + AT(25, 1, P32, 0); /* ring_workq_add_work */ + default: + return target_rw(cpp_id, P32, 0, 64); + } +} + +int nfp_target_pushpull(u32 cpp_id, u64 address) +{ + switch (NFP_CPP_ID_TARGET_of(cpp_id)) { + case NFP_CPP_TARGET_NBI: + return nfp6000_nbi(cpp_id, address); + case NFP_CPP_TARGET_QDR: + return target_rw(cpp_id, P32, 24, 4); + case NFP_CPP_TARGET_ILA: + return nfp6000_ila(cpp_id); + case NFP_CPP_TARGET_MU: + return nfp6000_mu(cpp_id, address); + case NFP_CPP_TARGET_PCIE: + return nfp6000_pci(cpp_id); + case NFP_CPP_TARGET_ARM: + if (address < 0x10000) + return target_rw(cpp_id, P64, 1, 1); + else + return target_rw(cpp_id, P32, 1, 1); + case NFP_CPP_TARGET_CRYPTO: + return nfp6000_crypto(cpp_id); + case NFP_CPP_TARGET_CT_XPB: + return nfp6000_cap_xpb(cpp_id); + case NFP_CPP_TARGET_CLS: + return nfp6000_cls(cpp_id); + case 0: + return target_rw(cpp_id, P32, 4, 4); + default: + return -EINVAL; + } +} + +#undef AT +#undef P32 +#undef P64 + +/* All magic NFP-6xxx IMB 'mode' numbers here are from: + * Databook (1 August 2013) + * - System Overview and Connectivity + * -- Internal Connectivity + * --- Distributed Switch Fabric - Command Push/Pull (DSF-CPP) Bus + * ---- CPP addressing + * ----- Table 3.6. CPP Address Translation Mode Commands + */ + +#define _NIC_NFP6000_MU_LOCALITY_DIRECT 2 + +static int nfp_decode_basic(u64 addr, int *dest_island, int cpp_tgt, + int mode, bool addr40, int isld1, int isld0) +{ + int iid_lsb, idx_lsb; + + /* This function doesn't handle MU or CTXBP */ + if (cpp_tgt == NFP_CPP_TARGET_MU || cpp_tgt == NFP_CPP_TARGET_CT_XPB) + return -EINVAL; + + switch (mode) { + case 0: + /* For VQDR, in this mode for 32-bit addressing + * it would be islands 0, 16, 32 and 48 depending on channel + * and upper address bits. + * Since those are not all valid islands, most decode + * cases would result in bad island IDs, but we do them + * anyway since this is decoding an address that is already + * assumed to be used as-is to get to sram. + */ + iid_lsb = addr40 ? 34 : 26; + *dest_island = (addr >> iid_lsb) & 0x3F; + return 0; + case 1: + /* For VQDR 32-bit, this would decode as: + * Channel 0: island#0 + * Channel 1: island#0 + * Channel 2: island#1 + * Channel 3: island#1 + * That would be valid as long as both islands + * have VQDR. Let's allow this. + */ + idx_lsb = addr40 ? 39 : 31; + if (addr & BIT_ULL(idx_lsb)) + *dest_island = isld1; + else + *dest_island = isld0; + + return 0; + case 2: + /* For VQDR 32-bit: + * Channel 0: (island#0 | 0) + * Channel 1: (island#0 | 1) + * Channel 2: (island#1 | 0) + * Channel 3: (island#1 | 1) + * + * Make sure we compare against isldN values + * by clearing the LSB. + * This is what the silicon does. + */ + isld0 &= ~1; + isld1 &= ~1; + + idx_lsb = addr40 ? 39 : 31; + iid_lsb = idx_lsb - 1; + + if (addr & BIT_ULL(idx_lsb)) + *dest_island = isld1 | (int)((addr >> iid_lsb) & 1); + else + *dest_island = isld0 | (int)((addr >> iid_lsb) & 1); + + return 0; + case 3: + /* In this mode the data address starts to affect the island ID + * so rather not allow it. In some really specific case + * one could use this to send the upper half of the + * VQDR channel to another MU, but this is getting very + * specific. + * However, as above for mode 0, this is the decoder + * and the caller should validate the resulting IID. + * This blindly does what the silicon would do. + */ + isld0 &= ~3; + isld1 &= ~3; + + idx_lsb = addr40 ? 39 : 31; + iid_lsb = idx_lsb - 2; + + if (addr & BIT_ULL(idx_lsb)) + *dest_island = isld1 | (int)((addr >> iid_lsb) & 3); + else + *dest_island = isld0 | (int)((addr >> iid_lsb) & 3); + + return 0; + default: + return -EINVAL; + } +} + +static int nfp_encode_basic_qdr(u64 addr, int dest_island, int cpp_tgt, + int mode, bool addr40, int isld1, int isld0) +{ + int v, ret; + + /* Full Island ID and channel bits overlap? */ + ret = nfp_decode_basic(addr, &v, cpp_tgt, mode, addr40, isld1, isld0); + if (ret) + return ret; + + /* The current address won't go where expected? */ + if (dest_island != -1 && dest_island != v) + return -EINVAL; + + /* If dest_island was -1, we don't care where it goes. */ + return 0; +} + +/* Try each option, take first one that fits. + * Not sure if we would want to do some smarter + * searching and prefer 0 or non-0 island IDs. + */ +static int nfp_encode_basic_search(u64 *addr, int dest_island, int *isld, + int iid_lsb, int idx_lsb, int v_max) +{ + int i, v; + + for (i = 0; i < 2; i++) + for (v = 0; v < v_max; v++) { + if (dest_island != (isld[i] | v)) + continue; + + *addr &= ~GENMASK_ULL(idx_lsb, iid_lsb); + *addr |= ((u64)i << idx_lsb); + *addr |= ((u64)v << iid_lsb); + return 0; + } + + return -ENODEV; +} + +/* For VQDR, we may not modify the Channel bits, which might overlap + * with the Index bit. When it does, we need to ensure that isld0 == isld1. + */ +static int nfp_encode_basic(u64 *addr, int dest_island, int cpp_tgt, + int mode, bool addr40, int isld1, int isld0) +{ + int iid_lsb, idx_lsb; + int isld[2]; + u64 v64; + + isld[0] = isld0; + isld[1] = isld1; + + /* This function doesn't handle MU or CTXBP */ + if (cpp_tgt == NFP_CPP_TARGET_MU || cpp_tgt == NFP_CPP_TARGET_CT_XPB) + return -EINVAL; + + switch (mode) { + case 0: + if (cpp_tgt == NFP_CPP_TARGET_QDR && !addr40) + /* In this specific mode we'd rather not modify + * the address but we can verify if the existing + * contents will point to a valid island. + */ + return nfp_encode_basic_qdr(*addr, cpp_tgt, dest_island, + mode, addr40, isld1, isld0); + + iid_lsb = addr40 ? 34 : 26; + /* <39:34> or <31:26> */ + v64 = GENMASK_ULL(iid_lsb + 5, iid_lsb); + *addr &= ~v64; + *addr |= ((u64)dest_island << iid_lsb) & v64; + return 0; + case 1: + if (cpp_tgt == NFP_CPP_TARGET_QDR && !addr40) + return nfp_encode_basic_qdr(*addr, cpp_tgt, dest_island, + mode, addr40, isld1, isld0); + + idx_lsb = addr40 ? 39 : 31; + if (dest_island == isld0) { + /* Only need to clear the Index bit */ + *addr &= ~BIT_ULL(idx_lsb); + return 0; + } + + if (dest_island == isld1) { + /* Only need to set the Index bit */ + *addr |= BIT_ULL(idx_lsb); + return 0; + } + + return -ENODEV; + case 2: + /* iid<0> = addr<30> = channel<0> + * channel<1> = addr<31> = Index + */ + if (cpp_tgt == NFP_CPP_TARGET_QDR && !addr40) + /* Special case where we allow channel bits to + * be set before hand and with them select an island. + * So we need to confirm that it's at least plausible. + */ + return nfp_encode_basic_qdr(*addr, cpp_tgt, dest_island, + mode, addr40, isld1, isld0); + + /* Make sure we compare against isldN values + * by clearing the LSB. + * This is what the silicon does. + */ + isld[0] &= ~1; + isld[1] &= ~1; + + idx_lsb = addr40 ? 39 : 31; + iid_lsb = idx_lsb - 1; + + return nfp_encode_basic_search(addr, dest_island, isld, + iid_lsb, idx_lsb, 2); + case 3: + if (cpp_tgt == NFP_CPP_TARGET_QDR && !addr40) + /* iid<0> = addr<29> = data + * iid<1> = addr<30> = channel<0> + * channel<1> = addr<31> = Index + */ + return nfp_encode_basic_qdr(*addr, cpp_tgt, dest_island, + mode, addr40, isld1, isld0); + + isld[0] &= ~3; + isld[1] &= ~3; + + idx_lsb = addr40 ? 39 : 31; + iid_lsb = idx_lsb - 2; + + return nfp_encode_basic_search(addr, dest_island, isld, + iid_lsb, idx_lsb, 4); + default: + return -EINVAL; + } +} + +static int nfp_encode_mu(u64 *addr, int dest_island, int mode, + bool addr40, int isld1, int isld0) +{ + int iid_lsb, idx_lsb, locality_lsb; + int isld[2]; + u64 v64; + int da; + + isld[0] = isld0; + isld[1] = isld1; + locality_lsb = nfp_cppat_mu_locality_lsb(mode, addr40); + + if (((*addr >> locality_lsb) & 3) == _NIC_NFP6000_MU_LOCALITY_DIRECT) + da = 1; + else + da = 0; + + switch (mode) { + case 0: + iid_lsb = addr40 ? 32 : 24; + v64 = GENMASK_ULL(iid_lsb + 5, iid_lsb); + *addr &= ~v64; + *addr |= (((u64)dest_island) << iid_lsb) & v64; + return 0; + case 1: + if (da) { + iid_lsb = addr40 ? 32 : 24; + v64 = GENMASK_ULL(iid_lsb + 5, iid_lsb); + *addr &= ~v64; + *addr |= (((u64)dest_island) << iid_lsb) & v64; + return 0; + } + + idx_lsb = addr40 ? 37 : 29; + if (dest_island == isld0) { + *addr &= ~BIT_ULL(idx_lsb); + return 0; + } + + if (dest_island == isld1) { + *addr |= BIT_ULL(idx_lsb); + return 0; + } + + return -ENODEV; + case 2: + if (da) { + iid_lsb = addr40 ? 32 : 24; + v64 = GENMASK_ULL(iid_lsb + 5, iid_lsb); + *addr &= ~v64; + *addr |= (((u64)dest_island) << iid_lsb) & v64; + return 0; + } + + /* Make sure we compare against isldN values + * by clearing the LSB. + * This is what the silicon does. + */ + isld[0] &= ~1; + isld[1] &= ~1; + + idx_lsb = addr40 ? 37 : 29; + iid_lsb = idx_lsb - 1; + + return nfp_encode_basic_search(addr, dest_island, isld, + iid_lsb, idx_lsb, 2); + case 3: + /* Only the EMU will use 40 bit addressing. Silently + * set the direct locality bit for everyone else. + * The SDK toolchain uses dest_island <= 0 to test + * for atypical address encodings to support access + * to local-island CTM with a 32-but address (high-locality + * is effewctively ignored and just used for + * routing to island #0). + */ + if (dest_island > 0 && (dest_island < 24 || dest_island > 26)) { + *addr |= ((u64)_NIC_NFP6000_MU_LOCALITY_DIRECT) + << locality_lsb; + da = 1; + } + + if (da) { + iid_lsb = addr40 ? 32 : 24; + v64 = GENMASK_ULL(iid_lsb + 5, iid_lsb); + *addr &= ~v64; + *addr |= (((u64)dest_island) << iid_lsb) & v64; + return 0; + } + + isld[0] &= ~3; + isld[1] &= ~3; + + idx_lsb = addr40 ? 37 : 29; + iid_lsb = idx_lsb - 2; + + return nfp_encode_basic_search(addr, dest_island, isld, + iid_lsb, idx_lsb, 4); + default: + return -EINVAL; + } +} + +static int nfp_cppat_addr_encode(u64 *addr, int dest_island, int cpp_tgt, + int mode, bool addr40, int isld1, int isld0) +{ + switch (cpp_tgt) { + case NFP_CPP_TARGET_NBI: + case NFP_CPP_TARGET_QDR: + case NFP_CPP_TARGET_ILA: + case NFP_CPP_TARGET_PCIE: + case NFP_CPP_TARGET_ARM: + case NFP_CPP_TARGET_CRYPTO: + case NFP_CPP_TARGET_CLS: + return nfp_encode_basic(addr, dest_island, cpp_tgt, mode, + addr40, isld1, isld0); + + case NFP_CPP_TARGET_MU: + return nfp_encode_mu(addr, dest_island, mode, + addr40, isld1, isld0); + + case NFP_CPP_TARGET_CT_XPB: + if (mode != 1 || addr40) + return -EINVAL; + *addr &= ~GENMASK_ULL(29, 24); + *addr |= ((u64)dest_island << 24) & GENMASK_ULL(29, 24); + return 0; + default: + return -EINVAL; + } +} + +int nfp_target_cpp(u32 cpp_island_id, u64 cpp_island_address, + u32 *cpp_target_id, u64 *cpp_target_address, + const u32 *imb_table) +{ + const int island = NFP_CPP_ID_ISLAND_of(cpp_island_id); + const int target = NFP_CPP_ID_TARGET_of(cpp_island_id); + u32 imb; + int err; + + if (target < 0 || target >= 16) + return -EINVAL; + + if (island == 0) { + /* Already translated */ + *cpp_target_id = cpp_island_id; + *cpp_target_address = cpp_island_address; + return 0; + } + + /* CPP + Island only allowed on systems with IMB tables */ + if (!imb_table) + return -EINVAL; + + imb = imb_table[target]; + + *cpp_target_address = cpp_island_address; + err = nfp_cppat_addr_encode(cpp_target_address, island, target, + ((imb >> 13) & 7), ((imb >> 12) & 1), + ((imb >> 6) & 0x3f), ((imb >> 0) & 0x3f)); + if (err) + return err; + + *cpp_target_id = NFP_CPP_ID(target, + NFP_CPP_ID_ACTION_of(cpp_island_id), + NFP_CPP_ID_TOKEN_of(cpp_island_id)); + + return 0; +} diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c index 119f6dca71f0..9709c8ca0774 100644 --- a/drivers/net/ethernet/nuvoton/w90p910_ether.c +++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c @@ -874,16 +874,18 @@ static void w90p910_get_drvinfo(struct net_device *dev, strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); } -static int w90p910_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int w90p910_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct w90p910_ether *ether = netdev_priv(dev); - return mii_ethtool_gset(ðer->mii, cmd); + return mii_ethtool_get_link_ksettings(ðer->mii, cmd); } -static int w90p910_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int w90p910_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct w90p910_ether *ether = netdev_priv(dev); - return mii_ethtool_sset(ðer->mii, cmd); + return mii_ethtool_set_link_ksettings(ðer->mii, cmd); } static int w90p910_nway_reset(struct net_device *dev) @@ -899,11 +901,11 @@ static u32 w90p910_get_link(struct net_device *dev) } static const struct ethtool_ops w90p910_ether_ethtool_ops = { - .get_settings = w90p910_get_settings, - .set_settings = w90p910_set_settings, .get_drvinfo = w90p910_get_drvinfo, .nway_reset = w90p910_nway_reset, .get_link = w90p910_get_link, + .get_link_ksettings = w90p910_get_link_ksettings, + .set_link_ksettings = w90p910_set_link_ksettings, }; static const struct net_device_ops w90p910_ether_netdev_ops = { diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index 3913f07279d2..978d32944c80 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -1733,7 +1733,7 @@ static void nv_update_stats(struct net_device *dev) * Called with read_lock(&dev_base_lock) held for read - * only synchronized against unregister_netdevice. */ -static struct rtnl_link_stats64* +static void nv_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage) __acquires(&netdev_priv(dev)->hwstats_lock) __releases(&netdev_priv(dev)->hwstats_lock) @@ -1793,8 +1793,6 @@ nv_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage) spin_unlock_bh(&np->hwstats_lock); } - - return storage; } /* @@ -3274,8 +3272,6 @@ static void nv_force_linkspeed(struct net_device *dev, int speed, int duplex) pci_push(base); writel(np->linkspeed, base + NvRegLinkSpeed); pci_push(base); - - return; } /** @@ -3751,7 +3747,7 @@ static int nv_napi_poll(struct napi_struct *napi, int budget) if (rx_work < budget) { /* re-enable interrupts (msix not enabled in napi) */ - napi_complete(napi); + napi_complete_done(napi, rx_work); writel(np->irqmask, base + NvRegIrqMask); } @@ -4239,14 +4235,15 @@ static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo) return 0; } -static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +static int nv_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct fe_priv *np = netdev_priv(dev); - u32 speed; + u32 speed, supported, advertising; int adv; spin_lock_irq(&np->lock); - ecmd->port = PORT_MII; + cmd->base.port = PORT_MII; if (!netif_running(dev)) { /* We do not track link speed / duplex setting if the * interface is disabled. Force a link check */ @@ -4274,64 +4271,71 @@ static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) speed = -1; break; } - ecmd->duplex = DUPLEX_HALF; + cmd->base.duplex = DUPLEX_HALF; if (np->duplex) - ecmd->duplex = DUPLEX_FULL; + cmd->base.duplex = DUPLEX_FULL; } else { speed = SPEED_UNKNOWN; - ecmd->duplex = DUPLEX_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; } - ethtool_cmd_speed_set(ecmd, speed); - ecmd->autoneg = np->autoneg; + cmd->base.speed = speed; + cmd->base.autoneg = np->autoneg; - ecmd->advertising = ADVERTISED_MII; + advertising = ADVERTISED_MII; if (np->autoneg) { - ecmd->advertising |= ADVERTISED_Autoneg; + advertising |= ADVERTISED_Autoneg; adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); if (adv & ADVERTISE_10HALF) - ecmd->advertising |= ADVERTISED_10baseT_Half; + advertising |= ADVERTISED_10baseT_Half; if (adv & ADVERTISE_10FULL) - ecmd->advertising |= ADVERTISED_10baseT_Full; + advertising |= ADVERTISED_10baseT_Full; if (adv & ADVERTISE_100HALF) - ecmd->advertising |= ADVERTISED_100baseT_Half; + advertising |= ADVERTISED_100baseT_Half; if (adv & ADVERTISE_100FULL) - ecmd->advertising |= ADVERTISED_100baseT_Full; + advertising |= ADVERTISED_100baseT_Full; if (np->gigabit == PHY_GIGABIT) { adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); if (adv & ADVERTISE_1000FULL) - ecmd->advertising |= ADVERTISED_1000baseT_Full; + advertising |= ADVERTISED_1000baseT_Full; } } - ecmd->supported = (SUPPORTED_Autoneg | + supported = (SUPPORTED_Autoneg | SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_MII); if (np->gigabit == PHY_GIGABIT) - ecmd->supported |= SUPPORTED_1000baseT_Full; + supported |= SUPPORTED_1000baseT_Full; - ecmd->phy_address = np->phyaddr; - ecmd->transceiver = XCVR_EXTERNAL; + cmd->base.phy_address = np->phyaddr; + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); /* ignore maxtxpkt, maxrxpkt for now */ spin_unlock_irq(&np->lock); return 0; } -static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +static int nv_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct fe_priv *np = netdev_priv(dev); - u32 speed = ethtool_cmd_speed(ecmd); + u32 speed = cmd->base.speed; + u32 advertising; - if (ecmd->port != PORT_MII) - return -EINVAL; - if (ecmd->transceiver != XCVR_EXTERNAL) + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); + + if (cmd->base.port != PORT_MII) return -EINVAL; - if (ecmd->phy_address != np->phyaddr) { + if (cmd->base.phy_address != np->phyaddr) { /* TODO: support switching between multiple phys. Should be * trivial, but not enabled due to lack of test hardware. */ return -EINVAL; } - if (ecmd->autoneg == AUTONEG_ENABLE) { + if (cmd->base.autoneg == AUTONEG_ENABLE) { u32 mask; mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | @@ -4339,16 +4343,17 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) if (np->gigabit == PHY_GIGABIT) mask |= ADVERTISED_1000baseT_Full; - if ((ecmd->advertising & mask) == 0) + if ((advertising & mask) == 0) return -EINVAL; - } else if (ecmd->autoneg == AUTONEG_DISABLE) { + } else if (cmd->base.autoneg == AUTONEG_DISABLE) { /* Note: autonegotiation disable, speed 1000 intentionally * forbidden - no one should need that. */ if (speed != SPEED_10 && speed != SPEED_100) return -EINVAL; - if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL) + if (cmd->base.duplex != DUPLEX_HALF && + cmd->base.duplex != DUPLEX_FULL) return -EINVAL; } else { return -EINVAL; @@ -4378,7 +4383,7 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) netif_tx_unlock_bh(dev); } - if (ecmd->autoneg == AUTONEG_ENABLE) { + if (cmd->base.autoneg == AUTONEG_ENABLE) { int adv, bmcr; np->autoneg = 1; @@ -4386,13 +4391,13 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) /* advertise only what has been requested */ adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); - if (ecmd->advertising & ADVERTISED_10baseT_Half) + if (advertising & ADVERTISED_10baseT_Half) adv |= ADVERTISE_10HALF; - if (ecmd->advertising & ADVERTISED_10baseT_Full) + if (advertising & ADVERTISED_10baseT_Full) adv |= ADVERTISE_10FULL; - if (ecmd->advertising & ADVERTISED_100baseT_Half) + if (advertising & ADVERTISED_100baseT_Half) adv |= ADVERTISE_100HALF; - if (ecmd->advertising & ADVERTISED_100baseT_Full) + if (advertising & ADVERTISED_100baseT_Full) adv |= ADVERTISE_100FULL; if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisements but disable tx pause */ adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; @@ -4403,7 +4408,7 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) if (np->gigabit == PHY_GIGABIT) { adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); adv &= ~ADVERTISE_1000FULL; - if (ecmd->advertising & ADVERTISED_1000baseT_Full) + if (advertising & ADVERTISED_1000baseT_Full) adv |= ADVERTISE_1000FULL; mii_rw(dev, np->phyaddr, MII_CTRL1000, adv); } @@ -4430,13 +4435,13 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); - if (speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF) + if (speed == SPEED_10 && cmd->base.duplex == DUPLEX_HALF) adv |= ADVERTISE_10HALF; - if (speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL) + if (speed == SPEED_10 && cmd->base.duplex == DUPLEX_FULL) adv |= ADVERTISE_10FULL; - if (speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF) + if (speed == SPEED_100 && cmd->base.duplex == DUPLEX_HALF) adv |= ADVERTISE_100HALF; - if (speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL) + if (speed == SPEED_100 && cmd->base.duplex == DUPLEX_FULL) adv |= ADVERTISE_100FULL; np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE); if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisements but disable tx pause */ @@ -5243,8 +5248,6 @@ static const struct ethtool_ops ops = { .get_link = ethtool_op_get_link, .get_wol = nv_get_wol, .set_wol = nv_set_wol, - .get_settings = nv_get_settings, - .set_settings = nv_set_settings, .get_regs_len = nv_get_regs_len, .get_regs = nv_get_regs, .nway_reset = nv_nway_reset, @@ -5257,6 +5260,8 @@ static const struct ethtool_ops ops = { .get_sset_count = nv_get_sset_count, .self_test = nv_self_test, .get_ts_info = ethtool_op_get_ts_info, + .get_link_ksettings = nv_get_link_ksettings, + .set_link_ksettings = nv_set_link_ksettings, }; /* The mgmt unit and driver use a semaphore to access the phy during init */ diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index dd6b0d0f7fa5..9c7ffd649e9a 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c @@ -999,7 +999,7 @@ static int lpc_eth_poll(struct napi_struct *napi, int budget) rx_done = __lpc_handle_recv(ndev, budget); if (rx_done < budget) { - napi_complete(napi); + napi_complete_done(napi, rx_done); lpc_eth_enable_int(pldat->net_base); } diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c index b19be7c6c1f4..21093276d2b7 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c @@ -73,62 +73,80 @@ static const struct pch_gbe_stats pch_gbe_gstrings_stats[] = { #define PCH_GBE_MAC_REGS_LEN (sizeof(struct pch_gbe_regs) / 4) #define PCH_GBE_REGS_LEN (PCH_GBE_MAC_REGS_LEN + PCH_GBE_PHY_REGS_LEN) /** - * pch_gbe_get_settings - Get device-specific settings + * pch_gbe_get_link_ksettings - Get device-specific settings * @netdev: Network interface device structure * @ecmd: Ethtool command * Returns: * 0: Successful. * Negative value: Failed. */ -static int pch_gbe_get_settings(struct net_device *netdev, - struct ethtool_cmd *ecmd) +static int pch_gbe_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *ecmd) { struct pch_gbe_adapter *adapter = netdev_priv(netdev); + u32 supported, advertising; int ret; - ret = mii_ethtool_gset(&adapter->mii, ecmd); - ecmd->supported &= ~(SUPPORTED_TP | SUPPORTED_1000baseT_Half); - ecmd->advertising &= ~(ADVERTISED_TP | ADVERTISED_1000baseT_Half); + ret = mii_ethtool_get_link_ksettings(&adapter->mii, ecmd); + + ethtool_convert_link_mode_to_legacy_u32(&supported, + ecmd->link_modes.supported); + ethtool_convert_link_mode_to_legacy_u32(&advertising, + ecmd->link_modes.advertising); + + supported &= ~(SUPPORTED_TP | SUPPORTED_1000baseT_Half); + advertising &= ~(ADVERTISED_TP | ADVERTISED_1000baseT_Half); + + ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.advertising, + advertising); if (!netif_carrier_ok(adapter->netdev)) - ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); + ecmd->base.speed = SPEED_UNKNOWN; return ret; } /** - * pch_gbe_set_settings - Set device-specific settings + * pch_gbe_set_link_ksettings - Set device-specific settings * @netdev: Network interface device structure * @ecmd: Ethtool command * Returns: * 0: Successful. * Negative value: Failed. */ -static int pch_gbe_set_settings(struct net_device *netdev, - struct ethtool_cmd *ecmd) +static int pch_gbe_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *ecmd) { struct pch_gbe_adapter *adapter = netdev_priv(netdev); struct pch_gbe_hw *hw = &adapter->hw; - u32 speed = ethtool_cmd_speed(ecmd); + struct ethtool_link_ksettings copy_ecmd; + u32 speed = ecmd->base.speed; + u32 advertising; int ret; pch_gbe_hal_write_phy_reg(hw, MII_BMCR, BMCR_RESET); + memcpy(©_ecmd, ecmd, sizeof(*ecmd)); + /* when set_settings() is called with a ethtool_cmd previously * filled by get_settings() on a down link, speed is -1: */ if (speed == UINT_MAX) { speed = SPEED_1000; - ethtool_cmd_speed_set(ecmd, speed); - ecmd->duplex = DUPLEX_FULL; + copy_ecmd.base.speed = speed; + copy_ecmd.base.duplex = DUPLEX_FULL; } - ret = mii_ethtool_sset(&adapter->mii, ecmd); + ret = mii_ethtool_set_link_ksettings(&adapter->mii, ©_ecmd); if (ret) { - netdev_err(netdev, "Error: mii_ethtool_sset\n"); + netdev_err(netdev, "Error: mii_ethtool_set_link_ksettings\n"); return ret; } hw->mac.link_speed = speed; - hw->mac.link_duplex = ecmd->duplex; - hw->phy.autoneg_advertised = ecmd->advertising; - hw->mac.autoneg = ecmd->autoneg; + hw->mac.link_duplex = copy_ecmd.base.duplex; + ethtool_convert_link_mode_to_legacy_u32( + &advertising, copy_ecmd.link_modes.advertising); + hw->phy.autoneg_advertised = advertising; + hw->mac.autoneg = copy_ecmd.base.autoneg; /* reset the link */ if (netif_running(adapter->netdev)) { @@ -487,8 +505,6 @@ static int pch_gbe_get_sset_count(struct net_device *netdev, int sset) } static const struct ethtool_ops pch_gbe_ethtool_ops = { - .get_settings = pch_gbe_get_settings, - .set_settings = pch_gbe_set_settings, .get_drvinfo = pch_gbe_get_drvinfo, .get_regs_len = pch_gbe_get_regs_len, .get_regs = pch_gbe_get_regs, @@ -503,6 +519,8 @@ static const struct ethtool_ops pch_gbe_ethtool_ops = { .get_strings = pch_gbe_get_strings, .get_ethtool_stats = pch_gbe_get_ethtool_stats, .get_sset_count = pch_gbe_get_sset_count, + .get_link_ksettings = pch_gbe_get_link_ksettings, + .set_link_ksettings = pch_gbe_set_link_ksettings, }; void pch_gbe_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index d461f419948e..5ae9681a2da7 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -2149,17 +2149,6 @@ static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) } /** - * pch_gbe_get_stats - Get System Network Statistics - * @netdev: Network interface device structure - * Returns: The current stats - */ -static struct net_device_stats *pch_gbe_get_stats(struct net_device *netdev) -{ - /* only return the current stats */ - return &netdev->stats; -} - -/** * pch_gbe_set_multi - Multicast and Promiscuous mode set * @netdev: Network interface device structure */ @@ -2385,7 +2374,7 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget) poll_end_flag = true; if (poll_end_flag) { - napi_complete(napi); + napi_complete_done(napi, work_done); pch_gbe_irq_enable(adapter); } @@ -2420,7 +2409,6 @@ static const struct net_device_ops pch_gbe_netdev_ops = { .ndo_open = pch_gbe_open, .ndo_stop = pch_gbe_stop, .ndo_start_xmit = pch_gbe_xmit_frame, - .ndo_get_stats = pch_gbe_get_stats, .ndo_set_mac_address = pch_gbe_set_mac, .ndo_tx_timeout = pch_gbe_tx_timeout, .ndo_change_mtu = pch_gbe_change_mtu, diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c index baff744b560e..8b026dbf0d8d 100644 --- a/drivers/net/ethernet/packetengines/hamachi.c +++ b/drivers/net/ethernet/packetengines/hamachi.c @@ -1811,21 +1811,23 @@ static void hamachi_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo * strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info)); } -static int hamachi_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +static int hamachi_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct hamachi_private *np = netdev_priv(dev); spin_lock_irq(&np->lock); - mii_ethtool_gset(&np->mii_if, ecmd); + mii_ethtool_get_link_ksettings(&np->mii_if, cmd); spin_unlock_irq(&np->lock); return 0; } -static int hamachi_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +static int hamachi_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct hamachi_private *np = netdev_priv(dev); int res; spin_lock_irq(&np->lock); - res = mii_ethtool_sset(&np->mii_if, ecmd); + res = mii_ethtool_set_link_ksettings(&np->mii_if, cmd); spin_unlock_irq(&np->lock); return res; } @@ -1845,10 +1847,10 @@ static u32 hamachi_get_link(struct net_device *dev) static const struct ethtool_ops ethtool_ops = { .begin = check_if_running, .get_drvinfo = hamachi_get_drvinfo, - .get_settings = hamachi_get_settings, - .set_settings = hamachi_set_settings, .nway_reset = hamachi_nway_reset, .get_link = hamachi_get_link, + .get_link_ksettings = hamachi_get_link_ksettings, + .set_link_ksettings = hamachi_set_link_ksettings, }; static const struct ethtool_ops ethtool_ops_no_mii = { diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c index badfa1d562a4..49591d9c2e1b 100644 --- a/drivers/net/ethernet/pasemi/pasemi_mac.c +++ b/drivers/net/ethernet/pasemi/pasemi_mac.c @@ -1575,7 +1575,7 @@ static int pasemi_mac_poll(struct napi_struct *napi, int budget) pkts = pasemi_mac_clean_rx(rx_ring(mac), budget); if (pkts < budget) { /* all done, no more packets present */ - napi_complete(napi); + napi_complete_done(napi, pkts); pasemi_mac_restart_rx_intr(mac); pasemi_mac_restart_tx_intr(mac); diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig index 3cfd10503446..c2e24afbaeb2 100644 --- a/drivers/net/ethernet/qlogic/Kconfig +++ b/drivers/net/ethernet/qlogic/Kconfig @@ -104,6 +104,7 @@ config QED_SRIOV config QEDE tristate "QLogic QED 25/40/100Gb Ethernet NIC" depends on QED + imply PTP_1588_CLOCK ---help--- This enables the support for ... @@ -113,4 +114,7 @@ config QED_RDMA config QED_ISCSI bool +config QED_FCOE + bool + endif # NET_VENDOR_QLOGIC diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c index f9034467736c..3157f97dd782 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c @@ -96,69 +96,70 @@ netxen_nic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) } static int -netxen_nic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +netxen_nic_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct netxen_adapter *adapter = netdev_priv(dev); int check_sfp_module = 0; + u32 supported, advertising; /* read which mode */ if (adapter->ahw.port_type == NETXEN_NIC_GBE) { - ecmd->supported = (SUPPORTED_10baseT_Half | + supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full); - ecmd->advertising = (ADVERTISED_100baseT_Half | + advertising = (ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full); - ecmd->port = PORT_TP; + cmd->base.port = PORT_TP; - ethtool_cmd_speed_set(ecmd, adapter->link_speed); - ecmd->duplex = adapter->link_duplex; - ecmd->autoneg = adapter->link_autoneg; + cmd->base.speed = adapter->link_speed; + cmd->base.duplex = adapter->link_duplex; + cmd->base.autoneg = adapter->link_autoneg; } else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) { u32 val; val = NXRD32(adapter, NETXEN_PORT_MODE_ADDR); if (val == NETXEN_PORT_MODE_802_3_AP) { - ecmd->supported = SUPPORTED_1000baseT_Full; - ecmd->advertising = ADVERTISED_1000baseT_Full; + supported = SUPPORTED_1000baseT_Full; + advertising = ADVERTISED_1000baseT_Full; } else { - ecmd->supported = SUPPORTED_10000baseT_Full; - ecmd->advertising = ADVERTISED_10000baseT_Full; + supported = SUPPORTED_10000baseT_Full; + advertising = ADVERTISED_10000baseT_Full; } if (netif_running(dev) && adapter->has_link_events) { - ethtool_cmd_speed_set(ecmd, adapter->link_speed); - ecmd->autoneg = adapter->link_autoneg; - ecmd->duplex = adapter->link_duplex; + cmd->base.speed = adapter->link_speed; + cmd->base.autoneg = adapter->link_autoneg; + cmd->base.duplex = adapter->link_duplex; goto skip; } - ecmd->port = PORT_TP; + cmd->base.port = PORT_TP; if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { u16 pcifn = adapter->ahw.pci_func; val = NXRD32(adapter, P3_LINK_SPEED_REG(pcifn)); - ethtool_cmd_speed_set(ecmd, P3_LINK_SPEED_MHZ * - P3_LINK_SPEED_VAL(pcifn, val)); + cmd->base.speed = P3_LINK_SPEED_MHZ * + P3_LINK_SPEED_VAL(pcifn, val); } else - ethtool_cmd_speed_set(ecmd, SPEED_10000); + cmd->base.speed = SPEED_10000; - ecmd->duplex = DUPLEX_FULL; - ecmd->autoneg = AUTONEG_DISABLE; + cmd->base.duplex = DUPLEX_FULL; + cmd->base.autoneg = AUTONEG_DISABLE; } else return -EIO; skip: - ecmd->phy_address = adapter->physical_port; - ecmd->transceiver = XCVR_EXTERNAL; + cmd->base.phy_address = adapter->physical_port; switch (adapter->ahw.board_type) { case NETXEN_BRDTYPE_P2_SB35_4G: @@ -167,16 +168,16 @@ skip: case NETXEN_BRDTYPE_P3_4_GB: case NETXEN_BRDTYPE_P3_4_GB_MM: - ecmd->supported |= SUPPORTED_Autoneg; - ecmd->advertising |= ADVERTISED_Autoneg; + supported |= SUPPORTED_Autoneg; + advertising |= ADVERTISED_Autoneg; case NETXEN_BRDTYPE_P2_SB31_10G_CX4: case NETXEN_BRDTYPE_P3_10G_CX4: case NETXEN_BRDTYPE_P3_10G_CX4_LP: case NETXEN_BRDTYPE_P3_10000_BASE_T: - ecmd->supported |= SUPPORTED_TP; - ecmd->advertising |= ADVERTISED_TP; - ecmd->port = PORT_TP; - ecmd->autoneg = (adapter->ahw.board_type == + supported |= SUPPORTED_TP; + advertising |= ADVERTISED_TP; + cmd->base.port = PORT_TP; + cmd->base.autoneg = (adapter->ahw.board_type == NETXEN_BRDTYPE_P2_SB31_10G_CX4) ? (AUTONEG_DISABLE) : (adapter->link_autoneg); break; @@ -185,39 +186,39 @@ skip: case NETXEN_BRDTYPE_P3_IMEZ: case NETXEN_BRDTYPE_P3_XG_LOM: case NETXEN_BRDTYPE_P3_HMEZ: - ecmd->supported |= SUPPORTED_MII; - ecmd->advertising |= ADVERTISED_MII; - ecmd->port = PORT_MII; - ecmd->autoneg = AUTONEG_DISABLE; + supported |= SUPPORTED_MII; + advertising |= ADVERTISED_MII; + cmd->base.port = PORT_MII; + cmd->base.autoneg = AUTONEG_DISABLE; break; case NETXEN_BRDTYPE_P3_10G_SFP_PLUS: case NETXEN_BRDTYPE_P3_10G_SFP_CT: case NETXEN_BRDTYPE_P3_10G_SFP_QT: - ecmd->advertising |= ADVERTISED_TP; - ecmd->supported |= SUPPORTED_TP; + advertising |= ADVERTISED_TP; + supported |= SUPPORTED_TP; check_sfp_module = netif_running(dev) && adapter->has_link_events; case NETXEN_BRDTYPE_P2_SB31_10G: case NETXEN_BRDTYPE_P3_10G_XFP: - ecmd->supported |= SUPPORTED_FIBRE; - ecmd->advertising |= ADVERTISED_FIBRE; - ecmd->port = PORT_FIBRE; - ecmd->autoneg = AUTONEG_DISABLE; + supported |= SUPPORTED_FIBRE; + advertising |= ADVERTISED_FIBRE; + cmd->base.port = PORT_FIBRE; + cmd->base.autoneg = AUTONEG_DISABLE; break; case NETXEN_BRDTYPE_P3_10G_TP: if (adapter->ahw.port_type == NETXEN_NIC_XGBE) { - ecmd->autoneg = AUTONEG_DISABLE; - ecmd->supported |= (SUPPORTED_FIBRE | SUPPORTED_TP); - ecmd->advertising |= + cmd->base.autoneg = AUTONEG_DISABLE; + supported |= (SUPPORTED_FIBRE | SUPPORTED_TP); + advertising |= (ADVERTISED_FIBRE | ADVERTISED_TP); - ecmd->port = PORT_FIBRE; + cmd->base.port = PORT_FIBRE; check_sfp_module = netif_running(dev) && adapter->has_link_events; } else { - ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg); - ecmd->advertising |= + supported |= (SUPPORTED_TP | SUPPORTED_Autoneg); + advertising |= (ADVERTISED_TP | ADVERTISED_Autoneg); - ecmd->port = PORT_TP; + cmd->base.port = PORT_TP; } break; default: @@ -232,31 +233,37 @@ skip: case LINKEVENT_MODULE_OPTICAL_SRLR: case LINKEVENT_MODULE_OPTICAL_LRM: case LINKEVENT_MODULE_OPTICAL_SFP_1G: - ecmd->port = PORT_FIBRE; + cmd->base.port = PORT_FIBRE; break; case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE: case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN: case LINKEVENT_MODULE_TWINAX: - ecmd->port = PORT_TP; + cmd->base.port = PORT_TP; break; default: - ecmd->port = -1; + cmd->base.port = -1; } } if (!netif_running(dev) || !adapter->ahw.linkup) { - ecmd->duplex = DUPLEX_UNKNOWN; - ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); + cmd->base.duplex = DUPLEX_UNKNOWN; + cmd->base.speed = SPEED_UNKNOWN; } + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); + return 0; } static int -netxen_nic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +netxen_nic_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct netxen_adapter *adapter = netdev_priv(dev); - u32 speed = ethtool_cmd_speed(ecmd); + u32 speed = cmd->base.speed; int ret; if (adapter->ahw.port_type != NETXEN_NIC_GBE) @@ -265,16 +272,16 @@ netxen_nic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) if (!(adapter->capabilities & NX_FW_CAPABILITY_GBE_LINK_CFG)) return -EOPNOTSUPP; - ret = nx_fw_cmd_set_gbe_port(adapter, speed, ecmd->duplex, - ecmd->autoneg); + ret = nx_fw_cmd_set_gbe_port(adapter, speed, cmd->base.duplex, + cmd->base.autoneg); if (ret == NX_RCODE_NOT_SUPPORTED) return -EOPNOTSUPP; else if (ret) return -EIO; adapter->link_speed = speed; - adapter->link_duplex = ecmd->duplex; - adapter->link_autoneg = ecmd->autoneg; + adapter->link_duplex = cmd->base.duplex; + adapter->link_autoneg = cmd->base.autoneg; if (!netif_running(dev)) return 0; @@ -931,8 +938,6 @@ netxen_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump, } const struct ethtool_ops netxen_nic_ethtool_ops = { - .get_settings = netxen_nic_get_settings, - .set_settings = netxen_nic_set_settings, .get_drvinfo = netxen_nic_get_drvinfo, .get_regs_len = netxen_nic_get_regs_len, .get_regs = netxen_nic_get_regs, @@ -954,4 +959,6 @@ const struct ethtool_ops netxen_nic_ethtool_ops = { .get_dump_flag = netxen_get_dump_flag, .get_dump_data = netxen_get_dump_data, .set_dump = netxen_set_dump, + .get_link_ksettings = netxen_nic_get_link_ksettings, + .set_link_ksettings = netxen_nic_set_link_ksettings, }; diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index 561fb94c7267..827de838389f 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -90,8 +90,8 @@ static irqreturn_t netxen_msix_intr(int irq, void *data); static void netxen_free_ip_list(struct netxen_adapter *, bool); static void netxen_restore_indev_addr(struct net_device *dev, unsigned long); -static struct rtnl_link_stats64 *netxen_nic_get_stats(struct net_device *dev, - struct rtnl_link_stats64 *stats); +static void netxen_nic_get_stats(struct net_device *dev, + struct rtnl_link_stats64 *stats); static int netxen_nic_set_mac(struct net_device *netdev, void *p); /* PCI Device ID Table */ @@ -2302,8 +2302,8 @@ request_reset: clear_bit(__NX_RESETTING, &adapter->state); } -static struct rtnl_link_stats64 *netxen_nic_get_stats(struct net_device *netdev, - struct rtnl_link_stats64 *stats) +static void netxen_nic_get_stats(struct net_device *netdev, + struct rtnl_link_stats64 *stats) { struct netxen_adapter *adapter = netdev_priv(netdev); @@ -2313,8 +2313,6 @@ static struct rtnl_link_stats64 *netxen_nic_get_stats(struct net_device *netdev, stats->tx_bytes = adapter->stats.txbytes; stats->rx_dropped = adapter->stats.rxdropped; stats->tx_dropped = adapter->stats.txdropped; - - return stats; } static irqreturn_t netxen_intr(int irq, void *data) @@ -2398,7 +2396,7 @@ static int netxen_nic_poll(struct napi_struct *napi, int budget) work_done = budget; if (work_done < budget) { - napi_complete(&sds_ring->napi); + napi_complete_done(&sds_ring->napi, work_done); if (test_bit(__NX_DEV_UP, &adapter->state)) netxen_nic_enable_int(sds_ring); } @@ -3007,14 +3005,14 @@ static ssize_t netxen_sysfs_write_mem(struct file *filp, struct kobject *kobj, } -static struct bin_attribute bin_attr_crb = { +static const struct bin_attribute bin_attr_crb = { .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)}, .size = 0, .read = netxen_sysfs_read_crb, .write = netxen_sysfs_write_crb, }; -static struct bin_attribute bin_attr_mem = { +static const struct bin_attribute bin_attr_mem = { .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)}, .size = 0, .read = netxen_sysfs_read_mem, @@ -3143,7 +3141,7 @@ out: } -static struct bin_attribute bin_attr_dimm = { +static const struct bin_attribute bin_attr_dimm = { .attr = { .name = "dimm", .mode = (S_IRUGO | S_IWUSR) }, .size = sizeof(struct netxen_dimm_cfg), .read = netxen_sysfs_read_dimm, @@ -3266,7 +3264,7 @@ netxen_list_config_ip(struct netxen_adapter *adapter, cur = kzalloc(sizeof(struct nx_ip_list), GFP_ATOMIC); if (cur == NULL) goto out; - if (dev->priv_flags & IFF_802_1Q_VLAN) + if (is_vlan_dev(dev)) dev = vlan_dev_real_dev(dev); cur->master = !!netif_is_bond_master(dev); cur->ip_addr = ifa->ifa_address; @@ -3376,7 +3374,7 @@ static void netxen_config_master(struct net_device *dev, unsigned long event) !netif_is_bond_slave(dev)) { netxen_config_indev_addr(adapter, master, event); for_each_netdev_rcu(&init_net, slave) - if (slave->priv_flags & IFF_802_1Q_VLAN && + if (is_vlan_dev(slave) && vlan_dev_real_dev(slave) == master) netxen_config_indev_addr(adapter, slave, event); } @@ -3402,7 +3400,7 @@ recheck: if (dev == NULL) goto done; - if (dev->priv_flags & IFF_802_1Q_VLAN) { + if (is_vlan_dev(dev)) { dev = vlan_dev_real_dev(dev); goto recheck; } @@ -3447,7 +3445,7 @@ recheck: if (dev == NULL) goto done; - if (dev->priv_flags & IFF_802_1Q_VLAN) { + if (is_vlan_dev(dev)) { dev = vlan_dev_real_dev(dev); goto recheck; } diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile index 729e43768e99..974929dcc74e 100644 --- a/drivers/net/ethernet/qlogic/qed/Makefile +++ b/drivers/net/ethernet/qlogic/qed/Makefile @@ -2,8 +2,9 @@ obj-$(CONFIG_QED) := qed.o qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \ qed_int.o qed_main.o qed_mcp.o qed_sp_commands.o qed_spq.o qed_l2.o \ - qed_selftest.o qed_dcbx.o qed_debug.o + qed_selftest.o qed_dcbx.o qed_debug.o qed_ptp.o qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o qed-$(CONFIG_QED_LL2) += qed_ll2.o qed-$(CONFIG_QED_RDMA) += qed_roce.o qed-$(CONFIG_QED_ISCSI) += qed_iscsi.o qed_ooo.o +qed-$(CONFIG_QED_FCOE) += qed_fcoe.o diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index 44c184ebe3b0..00c17fa6545b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef _QED_H @@ -27,7 +51,7 @@ #include "qed_hsi.h" extern const struct qed_common_ops qed_common_ops_pass; -#define DRV_MODULE_VERSION "8.10.9.20" +#define DRV_MODULE_VERSION "8.10.10.20" #define MAX_HWFNS_PER_DEVICE (4) #define NAME_SIZE 16 @@ -36,6 +60,7 @@ extern const struct qed_common_ops qed_common_ops_pass; #define QED_WFQ_UNIT 100 #define ISCSI_BDQ_ID(_port_id) (_port_id) +#define FCOE_BDQ_ID(_port_id) ((_port_id) + 2) #define QED_WID_SIZE (1024) #define QED_PF_DEMS_SIZE (4) @@ -143,6 +168,7 @@ struct qed_tunn_update_params { */ enum qed_pci_personality { QED_PCI_ETH, + QED_PCI_FCOE, QED_PCI_ISCSI, QED_PCI_ETH_ROCE, QED_PCI_DEFAULT /* default in shmem */ @@ -180,6 +206,7 @@ enum QED_FEATURE { QED_VF, QED_RDMA_CNQ, QED_VF_L2_QUE, + QED_FCOE_CQ, QED_MAX_FEATURES, }; @@ -197,6 +224,7 @@ enum QED_PORT_MODE { enum qed_dev_cap { QED_DEV_CAP_ETH, + QED_DEV_CAP_FCOE, QED_DEV_CAP_ISCSI, QED_DEV_CAP_ROCE, }; @@ -231,6 +259,10 @@ struct qed_hw_info { u32 part_num[4]; unsigned char hw_mac_addr[ETH_ALEN]; + u64 node_wwn; + u64 port_wwn; + + u16 num_fcoe_conns; struct qed_igu_info *p_igu_info; @@ -386,6 +418,7 @@ struct qed_hwfn { struct qed_ooo_info *p_ooo_info; struct qed_rdma_info *p_rdma_info; struct qed_iscsi_info *p_iscsi_info; + struct qed_fcoe_info *p_fcoe_info; struct qed_pf_params pf_params; bool b_rdma_enabled_in_prs; @@ -432,6 +465,8 @@ struct qed_hwfn { u8 dcbx_no_edpm; u8 db_bar_no_edpm; + /* p_ptp_ptt is valid for leading HWFN only */ + struct qed_ptt *p_ptp_ptt; struct qed_simd_fp_handler simd_proto_handler[64]; #ifdef CONFIG_QED_SRIOV @@ -594,11 +629,13 @@ struct qed_dev { u8 protocol; #define IS_QED_ETH_IF(cdev) ((cdev)->protocol == QED_PROTOCOL_ETH) +#define IS_QED_FCOE_IF(cdev) ((cdev)->protocol == QED_PROTOCOL_FCOE) /* Callbacks to protocol driver */ union { struct qed_common_cb_ops *common; struct qed_eth_cb_ops *eth; + struct qed_fcoe_cb_ops *fcoe; struct qed_iscsi_cb_ops *iscsi; } protocol_ops; void *ops_cookie; @@ -651,7 +688,9 @@ static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev, #define OOO_LB_TC 9 int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate); -void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate); +void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, + struct qed_ptt *p_ptt, + u32 min_pf_rate); void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); #define QED_LEADING_HWFN(dev) (&dev->hwfns[0]) diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index 0c42c240b5cf..d42d03df751a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #include <linux/types.h> @@ -66,12 +90,14 @@ union conn_context { struct core_conn_context core_ctx; struct eth_conn_context eth_ctx; struct iscsi_conn_context iscsi_ctx; + struct fcoe_conn_context fcoe_ctx; struct roce_conn_context roce_ctx; }; -/* TYPE-0 task context - iSCSI */ +/* TYPE-0 task context - iSCSI, FCOE */ union type0_task_context { struct iscsi_task_context iscsi_ctx; + struct fcoe_task_context fcoe_ctx; }; /* TYPE-1 task context - ROCE */ @@ -216,15 +242,22 @@ struct qed_cxt_mngr { static bool src_proto(enum protocol_type type) { return type == PROTOCOLID_ISCSI || + type == PROTOCOLID_FCOE || type == PROTOCOLID_ROCE; } static bool tm_cid_proto(enum protocol_type type) { return type == PROTOCOLID_ISCSI || + type == PROTOCOLID_FCOE || type == PROTOCOLID_ROCE; } +static bool tm_tid_proto(enum protocol_type type) +{ + return type == PROTOCOLID_FCOE; +} + /* counts the iids for the CDU/CDUC ILT client configuration */ struct qed_cdu_iids { u32 pf_cids; @@ -283,6 +316,22 @@ static void qed_cxt_tm_iids(struct qed_cxt_mngr *p_mngr, iids->pf_cids += p_cfg->cid_count; iids->per_vf_cids += p_cfg->cids_per_vf; } + + if (tm_tid_proto(i)) { + struct qed_tid_seg *segs = p_cfg->tid_seg; + + /* for each segment there is at most one + * protocol for which count is not 0. + */ + for (j = 0; j < NUM_TASK_PF_SEGMENTS; j++) + iids->pf_tids[j] += segs[j].count; + + /* The last array elelment is for the VFs. As for PF + * segments there can be only one protocol for + * which this value is not 0. + */ + iids->per_vf_tids += segs[NUM_TASK_PF_SEGMENTS].count; + } } iids->pf_cids = roundup(iids->pf_cids, TM_ALIGN); @@ -1670,9 +1719,42 @@ static void qed_tm_init_pf(struct qed_hwfn *p_hwfn) /* @@@TBD how to enable the scan for the VFs */ } +static void qed_prs_init_common(struct qed_hwfn *p_hwfn) +{ + if ((p_hwfn->hw_info.personality == QED_PCI_FCOE) && + p_hwfn->pf_params.fcoe_pf_params.is_target) + STORE_RT_REG(p_hwfn, + PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET, 0); +} + +static void qed_prs_init_pf(struct qed_hwfn *p_hwfn) +{ + struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + struct qed_conn_type_cfg *p_fcoe; + struct qed_tid_seg *p_tid; + + p_fcoe = &p_mngr->conn_cfg[PROTOCOLID_FCOE]; + + /* If FCoE is active set the MAX OX_ID (tid) in the Parser */ + if (!p_fcoe->cid_count) + return; + + p_tid = &p_fcoe->tid_seg[QED_CXT_FCOE_TID_SEG]; + if (p_hwfn->pf_params.fcoe_pf_params.is_target) { + STORE_RT_REG_AGG(p_hwfn, + PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET, + p_tid->count); + } else { + STORE_RT_REG_AGG(p_hwfn, + PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET, + p_tid->count); + } +} + void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn) { qed_cdu_init_common(p_hwfn); + qed_prs_init_common(p_hwfn); } void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn) @@ -1684,6 +1766,7 @@ void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn) qed_ilt_init_pf(p_hwfn); qed_src_init_pf(p_hwfn); qed_tm_init_pf(p_hwfn); + qed_prs_init_pf(p_hwfn); } int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn, @@ -1861,6 +1944,27 @@ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn) p_params->num_cons, 1); break; } + case QED_PCI_FCOE: + { + struct qed_fcoe_pf_params *p_params; + + p_params = &p_hwfn->pf_params.fcoe_pf_params; + + if (p_params->num_cons && p_params->num_tasks) { + qed_cxt_set_proto_cid_count(p_hwfn, + PROTOCOLID_FCOE, + p_params->num_cons, + 0); + + qed_cxt_set_proto_tid_count(p_hwfn, PROTOCOLID_FCOE, + QED_CXT_FCOE_TID_SEG, 0, + p_params->num_tasks, true); + } else { + DP_INFO(p_hwfn->cdev, + "Fcoe personality used without setting params!\n"); + } + break; + } case QED_PCI_ISCSI: { struct qed_iscsi_pf_params *p_params; @@ -1903,6 +2007,10 @@ int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn, /* Verify the personality */ switch (p_hwfn->hw_info.personality) { + case QED_PCI_FCOE: + proto = PROTOCOLID_FCOE; + seg = QED_CXT_FCOE_TID_SEG; + break; case QED_PCI_ISCSI: proto = PROTOCOLID_ISCSI; seg = QED_CXT_ISCSI_TID_SEG; @@ -2191,15 +2299,19 @@ int qed_cxt_get_task_ctx(struct qed_hwfn *p_hwfn, { struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; struct qed_ilt_client_cfg *p_cli; - struct qed_ilt_cli_blk *p_seg; struct qed_tid_seg *p_seg_info; - u32 proto, seg; - u32 total_lines; - u32 tid_size, ilt_idx; + struct qed_ilt_cli_blk *p_seg; u32 num_tids_per_block; + u32 tid_size, ilt_idx; + u32 total_lines; + u32 proto, seg; /* Verify the personality */ switch (p_hwfn->hw_info.personality) { + case QED_PCI_FCOE: + proto = PROTOCOLID_FCOE; + seg = QED_CXT_FCOE_TID_SEG; + break; case QED_PCI_ISCSI: proto = PROTOCOLID_ISCSI; seg = QED_CXT_ISCSI_TID_SEG; diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h index 2b8bdaa77800..8b010324268a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation - * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef _QED_CXT_H @@ -67,6 +91,7 @@ int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn, #define QED_CXT_ISCSI_TID_SEG PROTOCOLID_ISCSI #define QED_CXT_ROCE_TID_SEG PROTOCOLID_ROCE +#define QED_CXT_FCOE_TID_SEG PROTOCOLID_FCOE enum qed_cxt_elem_type { QED_ELEM_CXT, QED_ELEM_SRQ, @@ -180,4 +205,6 @@ int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto); #define QED_CTX_WORKING_MEM 0 #define QED_CTX_FL_MEM 1 +int qed_cxt_get_task_ctx(struct qed_hwfn *p_hwfn, + u32 tid, u8 ctx_type, void **task_ctx); #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index a4789a93b692..5bd36a4a8fcd 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #include <linux/types.h> @@ -408,7 +432,6 @@ qed_dcbx_copy_mib(struct qed_hwfn *p_hwfn, return rc; } -#ifdef CONFIG_DCB static void qed_dcbx_get_priority_info(struct qed_hwfn *p_hwfn, struct qed_dcbx_app_prio *p_prio, @@ -725,7 +748,6 @@ qed_dcbx_get_params(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, return 0; } -#endif static int qed_dcbx_read_local_lldp_mib(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) @@ -840,6 +862,15 @@ static int qed_dcbx_read_mib(struct qed_hwfn *p_hwfn, return rc; } +void qed_dcbx_aen(struct qed_hwfn *hwfn, u32 mib_type) +{ + struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common; + void *cookie = hwfn->cdev->ops_cookie; + + if (cookie && op->dcbx_aen) + op->dcbx_aen(cookie, &hwfn->p_dcbx_info->get, mib_type); +} + /* Read updated MIB. * Reconfigure QM and invoke PF update ramrod command if operational MIB * change is detected. @@ -866,6 +897,8 @@ qed_dcbx_mib_update_event(struct qed_hwfn *p_hwfn, qed_sp_pf_update(p_hwfn); } } + qed_dcbx_get_params(p_hwfn, p_ptt, &p_hwfn->p_dcbx_info->get, type); + qed_dcbx_aen(p_hwfn, type); return rc; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h index 9ba681643d05..0fabe97f998d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef _QED_DCBX_H @@ -33,7 +57,6 @@ struct qed_dcbx_app_data { u8 tc; /* Traffic Class */ }; -#ifdef CONFIG_DCB #define QED_DCBX_VERSION_DISABLED 0 #define QED_DCBX_VERSION_IEEE 1 #define QED_DCBX_VERSION_CEE 2 @@ -49,7 +72,6 @@ struct qed_dcbx_set { struct qed_dcbx_admin_params config; u32 ver_num; }; -#endif struct qed_dcbx_results { bool dcbx_enabled; @@ -73,9 +95,8 @@ struct qed_dcbx_info { struct qed_dcbx_results results; struct dcbx_mib operational; struct dcbx_mib remote; -#ifdef CONFIG_DCB struct qed_dcbx_set set; -#endif + struct qed_dcbx_get get; u8 dcbx_cap; }; diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 3b2250021c5f..e2a081ceaf52 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #include <linux/types.h> @@ -25,6 +49,7 @@ #include "qed_cxt.h" #include "qed_dcbx.h" #include "qed_dev_api.h" +#include "qed_fcoe.h" #include "qed_hsi.h" #include "qed_hw.h" #include "qed_init_ops.h" @@ -148,6 +173,9 @@ void qed_resc_free(struct qed_dev *cdev) #ifdef CONFIG_QED_LL2 qed_ll2_free(p_hwfn, p_hwfn->p_ll2_info); #endif + if (p_hwfn->hw_info.personality == QED_PCI_FCOE) + qed_fcoe_free(p_hwfn, p_hwfn->p_fcoe_info); + if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) { qed_iscsi_free(p_hwfn, p_hwfn->p_iscsi_info); qed_ooo_free(p_hwfn, p_hwfn->p_ooo_info); @@ -409,6 +437,7 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) int qed_resc_alloc(struct qed_dev *cdev) { struct qed_iscsi_info *p_iscsi_info; + struct qed_fcoe_info *p_fcoe_info; struct qed_ooo_info *p_ooo_info; #ifdef CONFIG_QED_LL2 struct qed_ll2_info *p_ll2_info; @@ -515,6 +544,14 @@ int qed_resc_alloc(struct qed_dev *cdev) p_hwfn->p_ll2_info = p_ll2_info; } #endif + + if (p_hwfn->hw_info.personality == QED_PCI_FCOE) { + p_fcoe_info = qed_fcoe_alloc(p_hwfn); + if (!p_fcoe_info) + goto alloc_no_mem; + p_hwfn->p_fcoe_info = p_fcoe_info; + } + if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) { p_iscsi_info = qed_iscsi_alloc(p_hwfn); if (!p_iscsi_info) @@ -578,6 +615,9 @@ void qed_resc_setup(struct qed_dev *cdev) if (p_hwfn->using_ll2) qed_ll2_setup(p_hwfn, p_hwfn->p_ll2_info); #endif + if (p_hwfn->hw_info.personality == QED_PCI_FCOE) + qed_fcoe_setup(p_hwfn, p_hwfn->p_fcoe_info); + if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) { qed_iscsi_setup(p_hwfn, p_hwfn->p_iscsi_info); qed_ooo_setup(p_hwfn, p_hwfn->p_ooo_info); @@ -873,7 +913,7 @@ qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) /* Either EDPM is mandatory, or we are attempting to allocate a * WID per CPU. */ - n_cpus = num_active_cpus(); + n_cpus = num_present_cpus(); rc = qed_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus); } @@ -970,7 +1010,8 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, /* Protocl Configuration */ STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET, (p_hwfn->hw_info.personality == QED_PCI_ISCSI) ? 1 : 0); - STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET, 0); + STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET, + (p_hwfn->hw_info.personality == QED_PCI_FCOE) ? 1 : 0); STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0); /* Cleanup chip from previous driver if such remains exist */ @@ -1002,8 +1043,16 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, /* send function start command */ rc = qed_sp_pf_start(p_hwfn, p_tunn, p_hwfn->cdev->mf_mode, allow_npar_tx_switch); - if (rc) + if (rc) { DP_NOTICE(p_hwfn, "Function start ramrod failed\n"); + return rc; + } + if (p_hwfn->hw_info.personality == QED_PCI_FCOE) { + qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1, BIT(2)); + qed_wr(p_hwfn, p_ptt, + PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST, + 0x100); + } } return rc; } @@ -1763,8 +1812,8 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn) static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { - u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg; u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities; + u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg; struct qed_mcp_link_params *link; /* Read global nvm_cfg address */ @@ -1910,6 +1959,9 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET) __set_bit(QED_DEV_CAP_ETH, &p_hwfn->hw_info.device_capabilities); + if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE) + __set_bit(QED_DEV_CAP_FCOE, + &p_hwfn->hw_info.device_capabilities); if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI) __set_bit(QED_DEV_CAP_ISCSI, &p_hwfn->hw_info.device_capabilities); @@ -2647,6 +2699,177 @@ void qed_llh_remove_mac_filter(struct qed_hwfn *p_hwfn, DP_NOTICE(p_hwfn, "Tried to remove a non-configured filter\n"); } +int +qed_llh_add_protocol_filter(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 source_port_or_eth_type, + u16 dest_port, enum qed_llh_port_filter_type_t type) +{ + u32 high = 0, low = 0, en; + int i; + + if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) + return 0; + + switch (type) { + case QED_LLH_FILTER_ETHERTYPE: + high = source_port_or_eth_type; + break; + case QED_LLH_FILTER_TCP_SRC_PORT: + case QED_LLH_FILTER_UDP_SRC_PORT: + low = source_port_or_eth_type << 16; + break; + case QED_LLH_FILTER_TCP_DEST_PORT: + case QED_LLH_FILTER_UDP_DEST_PORT: + low = dest_port; + break; + case QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT: + case QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT: + low = (source_port_or_eth_type << 16) | dest_port; + break; + default: + DP_NOTICE(p_hwfn, + "Non valid LLH protocol filter type %d\n", type); + return -EINVAL; + } + /* Find a free entry and utilize it */ + for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { + en = qed_rd(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32)); + if (en) + continue; + qed_wr(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_VALUE + + 2 * i * sizeof(u32), low); + qed_wr(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_VALUE + + (2 * i + 1) * sizeof(u32), high); + qed_wr(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 1); + qed_wr(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE + + i * sizeof(u32), 1 << type); + qed_wr(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 1); + break; + } + if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) { + DP_NOTICE(p_hwfn, + "Failed to find an empty LLH filter to utilize\n"); + return -EINVAL; + } + switch (type) { + case QED_LLH_FILTER_ETHERTYPE: + DP_VERBOSE(p_hwfn, NETIF_MSG_HW, + "ETH type %x is added at %d\n", + source_port_or_eth_type, i); + break; + case QED_LLH_FILTER_TCP_SRC_PORT: + DP_VERBOSE(p_hwfn, NETIF_MSG_HW, + "TCP src port %x is added at %d\n", + source_port_or_eth_type, i); + break; + case QED_LLH_FILTER_UDP_SRC_PORT: + DP_VERBOSE(p_hwfn, NETIF_MSG_HW, + "UDP src port %x is added at %d\n", + source_port_or_eth_type, i); + break; + case QED_LLH_FILTER_TCP_DEST_PORT: + DP_VERBOSE(p_hwfn, NETIF_MSG_HW, + "TCP dst port %x is added at %d\n", dest_port, i); + break; + case QED_LLH_FILTER_UDP_DEST_PORT: + DP_VERBOSE(p_hwfn, NETIF_MSG_HW, + "UDP dst port %x is added at %d\n", dest_port, i); + break; + case QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT: + DP_VERBOSE(p_hwfn, NETIF_MSG_HW, + "TCP src/dst ports %x/%x are added at %d\n", + source_port_or_eth_type, dest_port, i); + break; + case QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT: + DP_VERBOSE(p_hwfn, NETIF_MSG_HW, + "UDP src/dst ports %x/%x are added at %d\n", + source_port_or_eth_type, dest_port, i); + break; + } + return 0; +} + +void +qed_llh_remove_protocol_filter(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 source_port_or_eth_type, + u16 dest_port, + enum qed_llh_port_filter_type_t type) +{ + u32 high = 0, low = 0; + int i; + + if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) + return; + + switch (type) { + case QED_LLH_FILTER_ETHERTYPE: + high = source_port_or_eth_type; + break; + case QED_LLH_FILTER_TCP_SRC_PORT: + case QED_LLH_FILTER_UDP_SRC_PORT: + low = source_port_or_eth_type << 16; + break; + case QED_LLH_FILTER_TCP_DEST_PORT: + case QED_LLH_FILTER_UDP_DEST_PORT: + low = dest_port; + break; + case QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT: + case QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT: + low = (source_port_or_eth_type << 16) | dest_port; + break; + default: + DP_NOTICE(p_hwfn, + "Non valid LLH protocol filter type %d\n", type); + return; + } + + for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { + if (!qed_rd(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32))) + continue; + if (!qed_rd(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32))) + continue; + if (!(qed_rd(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE + + i * sizeof(u32)) & BIT(type))) + continue; + if (qed_rd(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_VALUE + + 2 * i * sizeof(u32)) != low) + continue; + if (qed_rd(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_VALUE + + (2 * i + 1) * sizeof(u32)) != high) + continue; + + qed_wr(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 0); + qed_wr(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 0); + qed_wr(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE + + i * sizeof(u32), 0); + qed_wr(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_VALUE + 2 * i * sizeof(u32), 0); + qed_wr(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_VALUE + + (2 * i + 1) * sizeof(u32), 0); + break; + } + + if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) + DP_NOTICE(p_hwfn, "Tried to remove a non-configured filter\n"); +} + static int qed_set_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 hw_addr, void *p_eth_qzone, size_t eth_qzone_size, u8 timeset) @@ -2975,7 +3198,8 @@ int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate) } /* API to configure WFQ from mcp link change */ -void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate) +void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, + struct qed_ptt *p_ptt, u32 min_pf_rate) { int i; @@ -2989,8 +3213,7 @@ void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate) for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; - __qed_configure_vp_wfq_on_link_change(p_hwfn, - p_hwfn->p_dpc_ptt, + __qed_configure_vp_wfq_on_link_change(p_hwfn, p_ptt, min_pf_rate); } } diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h index b6711c106597..6812003411cd 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation - * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef _QED_DEV_API_H @@ -329,6 +353,48 @@ int qed_llh_add_mac_filter(struct qed_hwfn *p_hwfn, void qed_llh_remove_mac_filter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 *p_filter); +enum qed_llh_port_filter_type_t { + QED_LLH_FILTER_ETHERTYPE, + QED_LLH_FILTER_TCP_SRC_PORT, + QED_LLH_FILTER_TCP_DEST_PORT, + QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT, + QED_LLH_FILTER_UDP_SRC_PORT, + QED_LLH_FILTER_UDP_DEST_PORT, + QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT +}; + +/** + * @brief qed_llh_add_protocol_filter - configures a protocol filter in llh + * + * @param p_hwfn + * @param p_ptt + * @param source_port_or_eth_type - source port or ethertype to add + * @param dest_port - destination port to add + * @param type - type of filters and comparing + */ +int +qed_llh_add_protocol_filter(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 source_port_or_eth_type, + u16 dest_port, + enum qed_llh_port_filter_type_t type); + +/** + * @brief qed_llh_remove_protocol_filter - remove a protocol filter in llh + * + * @param p_hwfn + * @param p_ptt + * @param source_port_or_eth_type - source port or ethertype to add + * @param dest_port - destination port to add + * @param type - type of filters and comparing + */ +void +qed_llh_remove_protocol_filter(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 source_port_or_eth_type, + u16 dest_port, + enum qed_llh_port_filter_type_t type); + /** * *@brief Cleanup of previous driver remains prior to load * diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c new file mode 100644 index 000000000000..cbc81412174f --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c @@ -0,0 +1,1014 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/types.h> +#include <asm/byteorder.h> +#include <asm/param.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/log2.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/slab.h> +#include <linux/stddef.h> +#include <linux/string.h> +#include <linux/version.h> +#include <linux/workqueue.h> +#include <linux/errno.h> +#include <linux/list.h> +#include <linux/spinlock.h> +#define __PREVENT_DUMP_MEM_ARR__ +#define __PREVENT_PXP_GLOBAL_WIN__ +#include "qed.h" +#include "qed_cxt.h" +#include "qed_dev_api.h" +#include "qed_fcoe.h" +#include "qed_hsi.h" +#include "qed_hw.h" +#include "qed_int.h" +#include "qed_ll2.h" +#include "qed_mcp.h" +#include "qed_reg_addr.h" +#include "qed_sp.h" +#include "qed_sriov.h" +#include <linux/qed/qed_fcoe_if.h> + +struct qed_fcoe_conn { + struct list_head list_entry; + bool free_on_delete; + + u16 conn_id; + u32 icid; + u32 fw_cid; + u8 layer_code; + + dma_addr_t sq_pbl_addr; + dma_addr_t sq_curr_page_addr; + dma_addr_t sq_next_page_addr; + dma_addr_t xferq_pbl_addr; + void *xferq_pbl_addr_virt_addr; + dma_addr_t xferq_addr[4]; + void *xferq_addr_virt_addr[4]; + dma_addr_t confq_pbl_addr; + void *confq_pbl_addr_virt_addr; + dma_addr_t confq_addr[2]; + void *confq_addr_virt_addr[2]; + + dma_addr_t terminate_params; + + u16 dst_mac_addr_lo; + u16 dst_mac_addr_mid; + u16 dst_mac_addr_hi; + u16 src_mac_addr_lo; + u16 src_mac_addr_mid; + u16 src_mac_addr_hi; + + u16 tx_max_fc_pay_len; + u16 e_d_tov_timer_val; + u16 rec_tov_timer_val; + u16 rx_max_fc_pay_len; + u16 vlan_tag; + u16 physical_q0; + + struct fc_addr_nw s_id; + u8 max_conc_seqs_c3; + struct fc_addr_nw d_id; + u8 flags; + u8 def_q_idx; +}; + +static int +qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_addr) +{ + struct qed_fcoe_pf_params *fcoe_pf_params = NULL; + struct fcoe_init_ramrod_params *p_ramrod = NULL; + struct fcoe_init_func_ramrod_data *p_data; + struct fcoe_conn_context *p_cxt = NULL; + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + struct qed_cxt_info cxt_info; + u32 dummy_cid; + int rc = 0; + u16 tmp; + u8 i; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qed_spq_get_cid(p_hwfn); + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_addr; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + FCOE_RAMROD_CMD_ID_INIT_FUNC, + PROTOCOLID_FCOE, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.fcoe_init; + p_data = &p_ramrod->init_ramrod_data; + fcoe_pf_params = &p_hwfn->pf_params.fcoe_pf_params; + + p_data->mtu = cpu_to_le16(fcoe_pf_params->mtu); + tmp = cpu_to_le16(fcoe_pf_params->sq_num_pbl_pages); + p_data->sq_num_pages_in_pbl = tmp; + + rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_FCOE, &dummy_cid); + if (rc) + return rc; + + cxt_info.iid = dummy_cid; + rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info); + if (rc) { + DP_NOTICE(p_hwfn, "Cannot find context info for dummy cid=%d\n", + dummy_cid); + return rc; + } + p_cxt = cxt_info.p_cxt; + SET_FIELD(p_cxt->tstorm_ag_context.flags3, + TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN, 1); + + fcoe_pf_params->dummy_icid = (u16)dummy_cid; + + tmp = cpu_to_le16(fcoe_pf_params->num_tasks); + p_data->func_params.num_tasks = tmp; + p_data->func_params.log_page_size = fcoe_pf_params->log_page_size; + p_data->func_params.debug_mode = fcoe_pf_params->debug_mode; + + DMA_REGPAIR_LE(p_data->q_params.glbl_q_params_addr, + fcoe_pf_params->glbl_q_params_addr); + + tmp = cpu_to_le16(fcoe_pf_params->cq_num_entries); + p_data->q_params.cq_num_entries = tmp; + + tmp = cpu_to_le16(fcoe_pf_params->cmdq_num_entries); + p_data->q_params.cmdq_num_entries = tmp; + + tmp = fcoe_pf_params->num_cqs; + p_data->q_params.num_queues = (u8)tmp; + + tmp = (u16)p_hwfn->hw_info.resc_start[QED_CMDQS_CQS]; + p_data->q_params.queue_relative_offset = (u8)tmp; + + for (i = 0; i < fcoe_pf_params->num_cqs; i++) { + tmp = cpu_to_le16(p_hwfn->sbs_info[i]->igu_sb_id); + p_data->q_params.cq_cmdq_sb_num_arr[i] = tmp; + } + + p_data->q_params.cq_sb_pi = fcoe_pf_params->gl_rq_pi; + p_data->q_params.cmdq_sb_pi = fcoe_pf_params->gl_cmd_pi; + + p_data->q_params.bdq_resource_id = FCOE_BDQ_ID(p_hwfn->port_id); + + DMA_REGPAIR_LE(p_data->q_params.bdq_pbl_base_address[BDQ_ID_RQ], + fcoe_pf_params->bdq_pbl_base_addr[BDQ_ID_RQ]); + p_data->q_params.bdq_pbl_num_entries[BDQ_ID_RQ] = + fcoe_pf_params->bdq_pbl_num_entries[BDQ_ID_RQ]; + tmp = fcoe_pf_params->bdq_xoff_threshold[BDQ_ID_RQ]; + p_data->q_params.bdq_xoff_threshold[BDQ_ID_RQ] = cpu_to_le16(tmp); + tmp = fcoe_pf_params->bdq_xon_threshold[BDQ_ID_RQ]; + p_data->q_params.bdq_xon_threshold[BDQ_ID_RQ] = cpu_to_le16(tmp); + + DMA_REGPAIR_LE(p_data->q_params.bdq_pbl_base_address[BDQ_ID_IMM_DATA], + fcoe_pf_params->bdq_pbl_base_addr[BDQ_ID_IMM_DATA]); + p_data->q_params.bdq_pbl_num_entries[BDQ_ID_IMM_DATA] = + fcoe_pf_params->bdq_pbl_num_entries[BDQ_ID_IMM_DATA]; + tmp = fcoe_pf_params->bdq_xoff_threshold[BDQ_ID_IMM_DATA]; + p_data->q_params.bdq_xoff_threshold[BDQ_ID_IMM_DATA] = cpu_to_le16(tmp); + tmp = fcoe_pf_params->bdq_xon_threshold[BDQ_ID_IMM_DATA]; + p_data->q_params.bdq_xon_threshold[BDQ_ID_IMM_DATA] = cpu_to_le16(tmp); + tmp = fcoe_pf_params->rq_buffer_size; + p_data->q_params.rq_buffer_size = cpu_to_le16(tmp); + + if (fcoe_pf_params->is_target) { + SET_FIELD(p_data->q_params.q_validity, + SCSI_INIT_FUNC_QUEUES_RQ_VALID, 1); + if (p_data->q_params.bdq_pbl_num_entries[BDQ_ID_IMM_DATA]) + SET_FIELD(p_data->q_params.q_validity, + SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID, 1); + SET_FIELD(p_data->q_params.q_validity, + SCSI_INIT_FUNC_QUEUES_CMD_VALID, 1); + } else { + SET_FIELD(p_data->q_params.q_validity, + SCSI_INIT_FUNC_QUEUES_RQ_VALID, 1); + } + + rc = qed_spq_post(p_hwfn, p_ent, NULL); + + return rc; +} + +static int +qed_sp_fcoe_conn_offload(struct qed_hwfn *p_hwfn, + struct qed_fcoe_conn *p_conn, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_addr) +{ + struct fcoe_conn_offload_ramrod_params *p_ramrod = NULL; + struct fcoe_conn_offload_ramrod_data *p_data; + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + u16 pq_id = 0, tmp; + int rc; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = p_conn->icid; + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_addr; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, + PROTOCOLID_FCOE, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.fcoe_conn_ofld; + p_data = &p_ramrod->offload_ramrod_data; + + /* Transmission PQ is the first of the PF */ + pq_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_FCOE, NULL); + p_conn->physical_q0 = cpu_to_le16(pq_id); + p_data->physical_q0 = cpu_to_le16(pq_id); + + p_data->conn_id = cpu_to_le16(p_conn->conn_id); + DMA_REGPAIR_LE(p_data->sq_pbl_addr, p_conn->sq_pbl_addr); + DMA_REGPAIR_LE(p_data->sq_curr_page_addr, p_conn->sq_curr_page_addr); + DMA_REGPAIR_LE(p_data->sq_next_page_addr, p_conn->sq_next_page_addr); + DMA_REGPAIR_LE(p_data->xferq_pbl_addr, p_conn->xferq_pbl_addr); + DMA_REGPAIR_LE(p_data->xferq_curr_page_addr, p_conn->xferq_addr[0]); + DMA_REGPAIR_LE(p_data->xferq_next_page_addr, p_conn->xferq_addr[1]); + + DMA_REGPAIR_LE(p_data->respq_pbl_addr, p_conn->confq_pbl_addr); + DMA_REGPAIR_LE(p_data->respq_curr_page_addr, p_conn->confq_addr[0]); + DMA_REGPAIR_LE(p_data->respq_next_page_addr, p_conn->confq_addr[1]); + + p_data->dst_mac_addr_lo = cpu_to_le16(p_conn->dst_mac_addr_lo); + p_data->dst_mac_addr_mid = cpu_to_le16(p_conn->dst_mac_addr_mid); + p_data->dst_mac_addr_hi = cpu_to_le16(p_conn->dst_mac_addr_hi); + p_data->src_mac_addr_lo = cpu_to_le16(p_conn->src_mac_addr_lo); + p_data->src_mac_addr_mid = cpu_to_le16(p_conn->src_mac_addr_mid); + p_data->src_mac_addr_hi = cpu_to_le16(p_conn->src_mac_addr_hi); + + tmp = cpu_to_le16(p_conn->tx_max_fc_pay_len); + p_data->tx_max_fc_pay_len = tmp; + tmp = cpu_to_le16(p_conn->e_d_tov_timer_val); + p_data->e_d_tov_timer_val = tmp; + tmp = cpu_to_le16(p_conn->rec_tov_timer_val); + p_data->rec_rr_tov_timer_val = tmp; + tmp = cpu_to_le16(p_conn->rx_max_fc_pay_len); + p_data->rx_max_fc_pay_len = tmp; + + p_data->vlan_tag = cpu_to_le16(p_conn->vlan_tag); + p_data->s_id.addr_hi = p_conn->s_id.addr_hi; + p_data->s_id.addr_mid = p_conn->s_id.addr_mid; + p_data->s_id.addr_lo = p_conn->s_id.addr_lo; + p_data->max_conc_seqs_c3 = p_conn->max_conc_seqs_c3; + p_data->d_id.addr_hi = p_conn->d_id.addr_hi; + p_data->d_id.addr_mid = p_conn->d_id.addr_mid; + p_data->d_id.addr_lo = p_conn->d_id.addr_lo; + p_data->flags = p_conn->flags; + p_data->def_q_idx = p_conn->def_q_idx; + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +static int +qed_sp_fcoe_conn_destroy(struct qed_hwfn *p_hwfn, + struct qed_fcoe_conn *p_conn, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_addr) +{ + struct fcoe_conn_terminate_ramrod_params *p_ramrod = NULL; + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + int rc = 0; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = p_conn->icid; + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_addr; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + FCOE_RAMROD_CMD_ID_TERMINATE_CONN, + PROTOCOLID_FCOE, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.fcoe_conn_terminate; + DMA_REGPAIR_LE(p_ramrod->terminate_ramrod_data.terminate_params_addr, + p_conn->terminate_params); + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +static int +qed_sp_fcoe_func_stop(struct qed_hwfn *p_hwfn, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_addr) +{ + struct qed_ptt *p_ptt = p_hwfn->p_main_ptt; + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + u32 active_segs = 0; + int rc = 0; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = p_hwfn->pf_params.fcoe_pf_params.dummy_icid; + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_addr; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + FCOE_RAMROD_CMD_ID_DESTROY_FUNC, + PROTOCOLID_FCOE, &init_data); + if (rc) + return rc; + + active_segs = qed_rd(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK); + active_segs &= ~BIT(QED_CXT_FCOE_TID_SEG); + qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, active_segs); + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +static int +qed_fcoe_allocate_connection(struct qed_hwfn *p_hwfn, + struct qed_fcoe_conn **p_out_conn) +{ + struct qed_fcoe_conn *p_conn = NULL; + void *p_addr; + u32 i; + + spin_lock_bh(&p_hwfn->p_fcoe_info->lock); + if (!list_empty(&p_hwfn->p_fcoe_info->free_list)) + p_conn = + list_first_entry(&p_hwfn->p_fcoe_info->free_list, + struct qed_fcoe_conn, list_entry); + if (p_conn) { + list_del(&p_conn->list_entry); + spin_unlock_bh(&p_hwfn->p_fcoe_info->lock); + *p_out_conn = p_conn; + return 0; + } + spin_unlock_bh(&p_hwfn->p_fcoe_info->lock); + + p_conn = kzalloc(sizeof(*p_conn), GFP_KERNEL); + if (!p_conn) + return -ENOMEM; + + p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + QED_CHAIN_PAGE_SIZE, + &p_conn->xferq_pbl_addr, GFP_KERNEL); + if (!p_addr) + goto nomem_pbl_xferq; + p_conn->xferq_pbl_addr_virt_addr = p_addr; + + for (i = 0; i < ARRAY_SIZE(p_conn->xferq_addr); i++) { + p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + QED_CHAIN_PAGE_SIZE, + &p_conn->xferq_addr[i], GFP_KERNEL); + if (!p_addr) + goto nomem_xferq; + p_conn->xferq_addr_virt_addr[i] = p_addr; + + p_addr = p_conn->xferq_pbl_addr_virt_addr; + ((dma_addr_t *)p_addr)[i] = p_conn->xferq_addr[i]; + } + + p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + QED_CHAIN_PAGE_SIZE, + &p_conn->confq_pbl_addr, GFP_KERNEL); + if (!p_addr) + goto nomem_xferq; + p_conn->confq_pbl_addr_virt_addr = p_addr; + + for (i = 0; i < ARRAY_SIZE(p_conn->confq_addr); i++) { + p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + QED_CHAIN_PAGE_SIZE, + &p_conn->confq_addr[i], GFP_KERNEL); + if (!p_addr) + goto nomem_confq; + p_conn->confq_addr_virt_addr[i] = p_addr; + + p_addr = p_conn->confq_pbl_addr_virt_addr; + ((dma_addr_t *)p_addr)[i] = p_conn->confq_addr[i]; + } + + p_conn->free_on_delete = true; + *p_out_conn = p_conn; + return 0; + +nomem_confq: + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + QED_CHAIN_PAGE_SIZE, + p_conn->confq_pbl_addr_virt_addr, + p_conn->confq_pbl_addr); + for (i = 0; i < ARRAY_SIZE(p_conn->confq_addr); i++) + if (p_conn->confq_addr_virt_addr[i]) + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + QED_CHAIN_PAGE_SIZE, + p_conn->confq_addr_virt_addr[i], + p_conn->confq_addr[i]); +nomem_xferq: + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + QED_CHAIN_PAGE_SIZE, + p_conn->xferq_pbl_addr_virt_addr, + p_conn->xferq_pbl_addr); + for (i = 0; i < ARRAY_SIZE(p_conn->xferq_addr); i++) + if (p_conn->xferq_addr_virt_addr[i]) + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + QED_CHAIN_PAGE_SIZE, + p_conn->xferq_addr_virt_addr[i], + p_conn->xferq_addr[i]); +nomem_pbl_xferq: + kfree(p_conn); + return -ENOMEM; +} + +static void qed_fcoe_free_connection(struct qed_hwfn *p_hwfn, + struct qed_fcoe_conn *p_conn) +{ + u32 i; + + if (!p_conn) + return; + + if (p_conn->confq_pbl_addr_virt_addr) + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + QED_CHAIN_PAGE_SIZE, + p_conn->confq_pbl_addr_virt_addr, + p_conn->confq_pbl_addr); + + for (i = 0; i < ARRAY_SIZE(p_conn->confq_addr); i++) { + if (!p_conn->confq_addr_virt_addr[i]) + continue; + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + QED_CHAIN_PAGE_SIZE, + p_conn->confq_addr_virt_addr[i], + p_conn->confq_addr[i]); + } + + if (p_conn->xferq_pbl_addr_virt_addr) + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + QED_CHAIN_PAGE_SIZE, + p_conn->xferq_pbl_addr_virt_addr, + p_conn->xferq_pbl_addr); + + for (i = 0; i < ARRAY_SIZE(p_conn->xferq_addr); i++) { + if (!p_conn->xferq_addr_virt_addr[i]) + continue; + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + QED_CHAIN_PAGE_SIZE, + p_conn->xferq_addr_virt_addr[i], + p_conn->xferq_addr[i]); + } + kfree(p_conn); +} + +static void __iomem *qed_fcoe_get_db_addr(struct qed_hwfn *p_hwfn, u32 cid) +{ + return (u8 __iomem *)p_hwfn->doorbells + + qed_db_addr(cid, DQ_DEMS_LEGACY); +} + +static void __iomem *qed_fcoe_get_primary_bdq_prod(struct qed_hwfn *p_hwfn, + u8 bdq_id) +{ + u8 bdq_function_id = FCOE_BDQ_ID(p_hwfn->port_id); + + return (u8 __iomem *)p_hwfn->regview + GTT_BAR0_MAP_REG_MSDM_RAM + + MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(bdq_function_id, bdq_id); +} + +static void __iomem *qed_fcoe_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn, + u8 bdq_id) +{ + u8 bdq_function_id = FCOE_BDQ_ID(p_hwfn->port_id); + + return (u8 __iomem *)p_hwfn->regview + GTT_BAR0_MAP_REG_TSDM_RAM + + TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(bdq_function_id, bdq_id); +} + +struct qed_fcoe_info *qed_fcoe_alloc(struct qed_hwfn *p_hwfn) +{ + struct qed_fcoe_info *p_fcoe_info; + + /* Allocate LL2's set struct */ + p_fcoe_info = kzalloc(sizeof(*p_fcoe_info), GFP_KERNEL); + if (!p_fcoe_info) { + DP_NOTICE(p_hwfn, "Failed to allocate qed_fcoe_info'\n"); + return NULL; + } + INIT_LIST_HEAD(&p_fcoe_info->free_list); + return p_fcoe_info; +} + +void qed_fcoe_setup(struct qed_hwfn *p_hwfn, struct qed_fcoe_info *p_fcoe_info) +{ + struct fcoe_task_context *p_task_ctx = NULL; + int rc; + u32 i; + + spin_lock_init(&p_fcoe_info->lock); + for (i = 0; i < p_hwfn->pf_params.fcoe_pf_params.num_tasks; i++) { + rc = qed_cxt_get_task_ctx(p_hwfn, i, + QED_CTX_WORKING_MEM, + (void **)&p_task_ctx); + if (rc) + continue; + + memset(p_task_ctx, 0, sizeof(struct fcoe_task_context)); + SET_FIELD(p_task_ctx->timer_context.logical_client_0, + TIMERS_CONTEXT_VALIDLC0, 1); + SET_FIELD(p_task_ctx->timer_context.logical_client_1, + TIMERS_CONTEXT_VALIDLC1, 1); + SET_FIELD(p_task_ctx->tstorm_ag_context.flags0, + TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, 1); + } +} + +void qed_fcoe_free(struct qed_hwfn *p_hwfn, struct qed_fcoe_info *p_fcoe_info) +{ + struct qed_fcoe_conn *p_conn = NULL; + + if (!p_fcoe_info) + return; + + while (!list_empty(&p_fcoe_info->free_list)) { + p_conn = list_first_entry(&p_fcoe_info->free_list, + struct qed_fcoe_conn, list_entry); + if (!p_conn) + break; + list_del(&p_conn->list_entry); + qed_fcoe_free_connection(p_hwfn, p_conn); + } + + kfree(p_fcoe_info); +} + +static int +qed_fcoe_acquire_connection(struct qed_hwfn *p_hwfn, + struct qed_fcoe_conn *p_in_conn, + struct qed_fcoe_conn **p_out_conn) +{ + struct qed_fcoe_conn *p_conn = NULL; + int rc = 0; + u32 icid; + + spin_lock_bh(&p_hwfn->p_fcoe_info->lock); + rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_FCOE, &icid); + spin_unlock_bh(&p_hwfn->p_fcoe_info->lock); + if (rc) + return rc; + + /* Use input connection [if provided] or allocate a new one */ + if (p_in_conn) { + p_conn = p_in_conn; + } else { + rc = qed_fcoe_allocate_connection(p_hwfn, &p_conn); + if (rc) { + spin_lock_bh(&p_hwfn->p_fcoe_info->lock); + qed_cxt_release_cid(p_hwfn, icid); + spin_unlock_bh(&p_hwfn->p_fcoe_info->lock); + return rc; + } + } + + p_conn->icid = icid; + p_conn->fw_cid = (p_hwfn->hw_info.opaque_fid << 16) | icid; + *p_out_conn = p_conn; + + return rc; +} + +static void qed_fcoe_release_connection(struct qed_hwfn *p_hwfn, + struct qed_fcoe_conn *p_conn) +{ + spin_lock_bh(&p_hwfn->p_fcoe_info->lock); + list_add_tail(&p_conn->list_entry, &p_hwfn->p_fcoe_info->free_list); + qed_cxt_release_cid(p_hwfn, p_conn->icid); + spin_unlock_bh(&p_hwfn->p_fcoe_info->lock); +} + +static void _qed_fcoe_get_tstats(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_fcoe_stats *p_stats) +{ + struct fcoe_rx_stat tstats; + u32 tstats_addr; + + memset(&tstats, 0, sizeof(tstats)); + tstats_addr = BAR0_MAP_REG_TSDM_RAM + + TSTORM_FCOE_RX_STATS_OFFSET(p_hwfn->rel_pf_id); + qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats)); + + p_stats->fcoe_rx_byte_cnt = HILO_64_REGPAIR(tstats.fcoe_rx_byte_cnt); + p_stats->fcoe_rx_data_pkt_cnt = + HILO_64_REGPAIR(tstats.fcoe_rx_data_pkt_cnt); + p_stats->fcoe_rx_xfer_pkt_cnt = + HILO_64_REGPAIR(tstats.fcoe_rx_xfer_pkt_cnt); + p_stats->fcoe_rx_other_pkt_cnt = + HILO_64_REGPAIR(tstats.fcoe_rx_other_pkt_cnt); + + p_stats->fcoe_silent_drop_pkt_cmdq_full_cnt = + le32_to_cpu(tstats.fcoe_silent_drop_pkt_cmdq_full_cnt); + p_stats->fcoe_silent_drop_pkt_rq_full_cnt = + le32_to_cpu(tstats.fcoe_silent_drop_pkt_rq_full_cnt); + p_stats->fcoe_silent_drop_pkt_crc_error_cnt = + le32_to_cpu(tstats.fcoe_silent_drop_pkt_crc_error_cnt); + p_stats->fcoe_silent_drop_pkt_task_invalid_cnt = + le32_to_cpu(tstats.fcoe_silent_drop_pkt_task_invalid_cnt); + p_stats->fcoe_silent_drop_total_pkt_cnt = + le32_to_cpu(tstats.fcoe_silent_drop_total_pkt_cnt); +} + +static void _qed_fcoe_get_pstats(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_fcoe_stats *p_stats) +{ + struct fcoe_tx_stat pstats; + u32 pstats_addr; + + memset(&pstats, 0, sizeof(pstats)); + pstats_addr = BAR0_MAP_REG_PSDM_RAM + + PSTORM_FCOE_TX_STATS_OFFSET(p_hwfn->rel_pf_id); + qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats)); + + p_stats->fcoe_tx_byte_cnt = HILO_64_REGPAIR(pstats.fcoe_tx_byte_cnt); + p_stats->fcoe_tx_data_pkt_cnt = + HILO_64_REGPAIR(pstats.fcoe_tx_data_pkt_cnt); + p_stats->fcoe_tx_xfer_pkt_cnt = + HILO_64_REGPAIR(pstats.fcoe_tx_xfer_pkt_cnt); + p_stats->fcoe_tx_other_pkt_cnt = + HILO_64_REGPAIR(pstats.fcoe_tx_other_pkt_cnt); +} + +static int qed_fcoe_get_stats(struct qed_hwfn *p_hwfn, + struct qed_fcoe_stats *p_stats) +{ + struct qed_ptt *p_ptt; + + memset(p_stats, 0, sizeof(*p_stats)); + + p_ptt = qed_ptt_acquire(p_hwfn); + + if (!p_ptt) { + DP_ERR(p_hwfn, "Failed to acquire ptt\n"); + return -EINVAL; + } + + _qed_fcoe_get_tstats(p_hwfn, p_ptt, p_stats); + _qed_fcoe_get_pstats(p_hwfn, p_ptt, p_stats); + + qed_ptt_release(p_hwfn, p_ptt); + + return 0; +} + +struct qed_hash_fcoe_con { + struct hlist_node node; + struct qed_fcoe_conn *con; +}; + +static int qed_fill_fcoe_dev_info(struct qed_dev *cdev, + struct qed_dev_fcoe_info *info) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + int rc; + + memset(info, 0, sizeof(*info)); + rc = qed_fill_dev_info(cdev, &info->common); + + info->primary_dbq_rq_addr = + qed_fcoe_get_primary_bdq_prod(hwfn, BDQ_ID_RQ); + info->secondary_bdq_rq_addr = + qed_fcoe_get_secondary_bdq_prod(hwfn, BDQ_ID_RQ); + + return rc; +} + +static void qed_register_fcoe_ops(struct qed_dev *cdev, + struct qed_fcoe_cb_ops *ops, void *cookie) +{ + cdev->protocol_ops.fcoe = ops; + cdev->ops_cookie = cookie; +} + +static struct qed_hash_fcoe_con *qed_fcoe_get_hash(struct qed_dev *cdev, + u32 handle) +{ + struct qed_hash_fcoe_con *hash_con = NULL; + + if (!(cdev->flags & QED_FLAG_STORAGE_STARTED)) + return NULL; + + hash_for_each_possible(cdev->connections, hash_con, node, handle) { + if (hash_con->con->icid == handle) + break; + } + + if (!hash_con || (hash_con->con->icid != handle)) + return NULL; + + return hash_con; +} + +static int qed_fcoe_stop(struct qed_dev *cdev) +{ + int rc; + + if (!(cdev->flags & QED_FLAG_STORAGE_STARTED)) { + DP_NOTICE(cdev, "fcoe already stopped\n"); + return 0; + } + + if (!hash_empty(cdev->connections)) { + DP_NOTICE(cdev, + "Can't stop fcoe - not all connections were returned\n"); + return -EINVAL; + } + + /* Stop the fcoe */ + rc = qed_sp_fcoe_func_stop(QED_LEADING_HWFN(cdev), + QED_SPQ_MODE_EBLOCK, NULL); + cdev->flags &= ~QED_FLAG_STORAGE_STARTED; + + return rc; +} + +static int qed_fcoe_start(struct qed_dev *cdev, struct qed_fcoe_tid *tasks) +{ + int rc; + + if (cdev->flags & QED_FLAG_STORAGE_STARTED) { + DP_NOTICE(cdev, "fcoe already started;\n"); + return 0; + } + + rc = qed_sp_fcoe_func_start(QED_LEADING_HWFN(cdev), + QED_SPQ_MODE_EBLOCK, NULL); + if (rc) { + DP_NOTICE(cdev, "Failed to start fcoe\n"); + return rc; + } + + cdev->flags |= QED_FLAG_STORAGE_STARTED; + hash_init(cdev->connections); + + if (tasks) { + struct qed_tid_mem *tid_info = kzalloc(sizeof(*tid_info), + GFP_ATOMIC); + + if (!tid_info) { + DP_NOTICE(cdev, + "Failed to allocate tasks information\n"); + qed_fcoe_stop(cdev); + return -ENOMEM; + } + + rc = qed_cxt_get_tid_mem_info(QED_LEADING_HWFN(cdev), tid_info); + if (rc) { + DP_NOTICE(cdev, "Failed to gather task information\n"); + qed_fcoe_stop(cdev); + kfree(tid_info); + return rc; + } + + /* Fill task information */ + tasks->size = tid_info->tid_size; + tasks->num_tids_per_block = tid_info->num_tids_per_block; + memcpy(tasks->blocks, tid_info->blocks, + MAX_TID_BLOCKS_FCOE * sizeof(u8 *)); + + kfree(tid_info); + } + + return 0; +} + +static int qed_fcoe_acquire_conn(struct qed_dev *cdev, + u32 *handle, + u32 *fw_cid, void __iomem **p_doorbell) +{ + struct qed_hash_fcoe_con *hash_con; + int rc; + + /* Allocate a hashed connection */ + hash_con = kzalloc(sizeof(*hash_con), GFP_KERNEL); + if (!hash_con) { + DP_NOTICE(cdev, "Failed to allocate hashed connection\n"); + return -ENOMEM; + } + + /* Acquire the connection */ + rc = qed_fcoe_acquire_connection(QED_LEADING_HWFN(cdev), NULL, + &hash_con->con); + if (rc) { + DP_NOTICE(cdev, "Failed to acquire Connection\n"); + kfree(hash_con); + return rc; + } + + /* Added the connection to hash table */ + *handle = hash_con->con->icid; + *fw_cid = hash_con->con->fw_cid; + hash_add(cdev->connections, &hash_con->node, *handle); + + if (p_doorbell) + *p_doorbell = qed_fcoe_get_db_addr(QED_LEADING_HWFN(cdev), + *handle); + + return 0; +} + +static int qed_fcoe_release_conn(struct qed_dev *cdev, u32 handle) +{ + struct qed_hash_fcoe_con *hash_con; + + hash_con = qed_fcoe_get_hash(cdev, handle); + if (!hash_con) { + DP_NOTICE(cdev, "Failed to find connection for handle %d\n", + handle); + return -EINVAL; + } + + hlist_del(&hash_con->node); + qed_fcoe_release_connection(QED_LEADING_HWFN(cdev), hash_con->con); + kfree(hash_con); + + return 0; +} + +static int qed_fcoe_offload_conn(struct qed_dev *cdev, + u32 handle, + struct qed_fcoe_params_offload *conn_info) +{ + struct qed_hash_fcoe_con *hash_con; + struct qed_fcoe_conn *con; + + hash_con = qed_fcoe_get_hash(cdev, handle); + if (!hash_con) { + DP_NOTICE(cdev, "Failed to find connection for handle %d\n", + handle); + return -EINVAL; + } + + /* Update the connection with information from the params */ + con = hash_con->con; + + con->sq_pbl_addr = conn_info->sq_pbl_addr; + con->sq_curr_page_addr = conn_info->sq_curr_page_addr; + con->sq_next_page_addr = conn_info->sq_next_page_addr; + con->tx_max_fc_pay_len = conn_info->tx_max_fc_pay_len; + con->e_d_tov_timer_val = conn_info->e_d_tov_timer_val; + con->rec_tov_timer_val = conn_info->rec_tov_timer_val; + con->rx_max_fc_pay_len = conn_info->rx_max_fc_pay_len; + con->vlan_tag = conn_info->vlan_tag; + con->max_conc_seqs_c3 = conn_info->max_conc_seqs_c3; + con->flags = conn_info->flags; + con->def_q_idx = conn_info->def_q_idx; + + con->src_mac_addr_hi = (conn_info->src_mac[5] << 8) | + conn_info->src_mac[4]; + con->src_mac_addr_mid = (conn_info->src_mac[3] << 8) | + conn_info->src_mac[2]; + con->src_mac_addr_lo = (conn_info->src_mac[1] << 8) | + conn_info->src_mac[0]; + con->dst_mac_addr_hi = (conn_info->dst_mac[5] << 8) | + conn_info->dst_mac[4]; + con->dst_mac_addr_mid = (conn_info->dst_mac[3] << 8) | + conn_info->dst_mac[2]; + con->dst_mac_addr_lo = (conn_info->dst_mac[1] << 8) | + conn_info->dst_mac[0]; + + con->s_id.addr_hi = conn_info->s_id.addr_hi; + con->s_id.addr_mid = conn_info->s_id.addr_mid; + con->s_id.addr_lo = conn_info->s_id.addr_lo; + con->d_id.addr_hi = conn_info->d_id.addr_hi; + con->d_id.addr_mid = conn_info->d_id.addr_mid; + con->d_id.addr_lo = conn_info->d_id.addr_lo; + + return qed_sp_fcoe_conn_offload(QED_LEADING_HWFN(cdev), con, + QED_SPQ_MODE_EBLOCK, NULL); +} + +static int qed_fcoe_destroy_conn(struct qed_dev *cdev, + u32 handle, dma_addr_t terminate_params) +{ + struct qed_hash_fcoe_con *hash_con; + struct qed_fcoe_conn *con; + + hash_con = qed_fcoe_get_hash(cdev, handle); + if (!hash_con) { + DP_NOTICE(cdev, "Failed to find connection for handle %d\n", + handle); + return -EINVAL; + } + + /* Update the connection with information from the params */ + con = hash_con->con; + con->terminate_params = terminate_params; + + return qed_sp_fcoe_conn_destroy(QED_LEADING_HWFN(cdev), con, + QED_SPQ_MODE_EBLOCK, NULL); +} + +static int qed_fcoe_stats(struct qed_dev *cdev, struct qed_fcoe_stats *stats) +{ + return qed_fcoe_get_stats(QED_LEADING_HWFN(cdev), stats); +} + +void qed_get_protocol_stats_fcoe(struct qed_dev *cdev, + struct qed_mcp_fcoe_stats *stats) +{ + struct qed_fcoe_stats proto_stats; + + /* Retrieve FW statistics */ + memset(&proto_stats, 0, sizeof(proto_stats)); + if (qed_fcoe_stats(cdev, &proto_stats)) { + DP_VERBOSE(cdev, QED_MSG_STORAGE, + "Failed to collect FCoE statistics\n"); + return; + } + + /* Translate FW statistics into struct */ + stats->rx_pkts = proto_stats.fcoe_rx_data_pkt_cnt + + proto_stats.fcoe_rx_xfer_pkt_cnt + + proto_stats.fcoe_rx_other_pkt_cnt; + stats->tx_pkts = proto_stats.fcoe_tx_data_pkt_cnt + + proto_stats.fcoe_tx_xfer_pkt_cnt + + proto_stats.fcoe_tx_other_pkt_cnt; + stats->fcs_err = proto_stats.fcoe_silent_drop_pkt_crc_error_cnt; + + /* Request protocol driver to fill-in the rest */ + if (cdev->protocol_ops.fcoe && cdev->ops_cookie) { + struct qed_fcoe_cb_ops *ops = cdev->protocol_ops.fcoe; + void *cookie = cdev->ops_cookie; + + if (ops->get_login_failures) + stats->login_failure = ops->get_login_failures(cookie); + } +} + +static const struct qed_fcoe_ops qed_fcoe_ops_pass = { + .common = &qed_common_ops_pass, + .ll2 = &qed_ll2_ops_pass, + .fill_dev_info = &qed_fill_fcoe_dev_info, + .start = &qed_fcoe_start, + .stop = &qed_fcoe_stop, + .register_ops = &qed_register_fcoe_ops, + .acquire_conn = &qed_fcoe_acquire_conn, + .release_conn = &qed_fcoe_release_conn, + .offload_conn = &qed_fcoe_offload_conn, + .destroy_conn = &qed_fcoe_destroy_conn, + .get_stats = &qed_fcoe_stats, +}; + +const struct qed_fcoe_ops *qed_get_fcoe_ops(void) +{ + return &qed_fcoe_ops_pass; +} +EXPORT_SYMBOL(qed_get_fcoe_ops); + +void qed_put_fcoe_ops(void) +{ +} +EXPORT_SYMBOL(qed_put_fcoe_ops); diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.h b/drivers/net/ethernet/qlogic/qed/qed_fcoe.h new file mode 100644 index 000000000000..472af34a171d --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.h @@ -0,0 +1,87 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _QED_FCOE_H +#define _QED_FCOE_H +#include <linux/types.h> +#include <linux/list.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/qed/qed_fcoe_if.h> +#include <linux/qed/qed_chain.h> +#include "qed.h" +#include "qed_hsi.h" +#include "qed_mcp.h" +#include "qed_sp.h" + +struct qed_fcoe_info { + spinlock_t lock; /* Connection resources. */ + struct list_head free_list; +}; + +#if IS_ENABLED(CONFIG_QED_FCOE) +struct qed_fcoe_info *qed_fcoe_alloc(struct qed_hwfn *p_hwfn); + +void qed_fcoe_setup(struct qed_hwfn *p_hwfn, struct qed_fcoe_info *p_fcoe_info); + +void qed_fcoe_free(struct qed_hwfn *p_hwfn, struct qed_fcoe_info *p_fcoe_info); +void qed_get_protocol_stats_fcoe(struct qed_dev *cdev, + struct qed_mcp_fcoe_stats *stats); +#else /* CONFIG_QED_FCOE */ +static inline struct qed_fcoe_info * +qed_fcoe_alloc(struct qed_hwfn *p_hwfn) +{ + return NULL; +} + +static inline void qed_fcoe_setup(struct qed_hwfn *p_hwfn, + struct qed_fcoe_info *p_fcoe_info) +{ +} + +static inline void qed_fcoe_free(struct qed_hwfn *p_hwfn, + struct qed_fcoe_info *p_fcoe_info) +{ +} + +static inline void qed_get_protocol_stats_fcoe(struct qed_dev *cdev, + struct qed_mcp_fcoe_stats *stats) +{ +} +#endif /* CONFIG_QED_FCOE */ + +#ifdef CONFIG_QED_LL2 +extern const struct qed_common_ops qed_common_ops_pass; +extern const struct qed_ll2_ops qed_ll2_ops_pass; +#endif + +#endif /* _QED_FCOE_H */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 785ab03683eb..37c2bfb663bb 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef _QED_HSI_H @@ -19,10 +43,12 @@ #include <linux/qed/common_hsi.h> #include <linux/qed/storage_common.h> #include <linux/qed/tcp_common.h> +#include <linux/qed/fcoe_common.h> #include <linux/qed/eth_common.h> #include <linux/qed/iscsi_common.h> #include <linux/qed/rdma_common.h> #include <linux/qed/roce_common.h> +#include <linux/qed/qed_fcoe_if.h> struct qed_hwfn; struct qed_ptt; @@ -913,7 +939,7 @@ struct mstorm_vf_zone { enum personality_type { BAD_PERSONALITY_TYP, PERSONALITY_ISCSI, - PERSONALITY_RESERVED2, + PERSONALITY_FCOE, PERSONALITY_RDMA_AND_ETH, PERSONALITY_RESERVED3, PERSONALITY_CORE, @@ -3449,6 +3475,10 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn, #define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \ (IRO[46].base + ((rdma_stat_counter_id) * IRO[46].m1)) #define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[46].size) +#define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) \ + (IRO[43].base + ((pf_id) * IRO[43].m1)) +#define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) \ + (IRO[44].base + ((pf_id) * IRO[44].m1)) static const struct iro iro_arr[47] = { {0x0, 0x0, 0x0, 0x0, 0x8}, @@ -7383,6 +7413,769 @@ struct ystorm_roce_resp_conn_ag_ctx { __le32 reg3; }; +struct ystorm_fcoe_conn_st_ctx { + u8 func_mode; + u8 cos; + u8 conf_version; + u8 eth_hdr_size; + __le16 stat_ram_addr; + __le16 mtu; + __le16 max_fc_payload_len; + __le16 tx_max_fc_pay_len; + u8 fcp_cmd_size; + u8 fcp_rsp_size; + __le16 mss; + struct regpair reserved; + u8 protection_info_flags; +#define YSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_MASK 0x1 +#define YSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_SHIFT 0 +#define YSTORM_FCOE_CONN_ST_CTX_VALID_MASK 0x1 +#define YSTORM_FCOE_CONN_ST_CTX_VALID_SHIFT 1 +#define YSTORM_FCOE_CONN_ST_CTX_RESERVED1_MASK 0x3F +#define YSTORM_FCOE_CONN_ST_CTX_RESERVED1_SHIFT 2 + u8 dst_protection_per_mss; + u8 src_protection_per_mss; + u8 ptu_log_page_size; + u8 flags; +#define YSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK 0x1 +#define YSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT 0 +#define YSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_MASK 0x1 +#define YSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_SHIFT 1 +#define YSTORM_FCOE_CONN_ST_CTX_RSRV_MASK 0x3F +#define YSTORM_FCOE_CONN_ST_CTX_RSRV_SHIFT 2 + u8 fcp_xfer_size; + u8 reserved3[2]; +}; + +struct fcoe_vlan_fields { + __le16 fields; +#define FCOE_VLAN_FIELDS_VID_MASK 0xFFF +#define FCOE_VLAN_FIELDS_VID_SHIFT 0 +#define FCOE_VLAN_FIELDS_CLI_MASK 0x1 +#define FCOE_VLAN_FIELDS_CLI_SHIFT 12 +#define FCOE_VLAN_FIELDS_PRI_MASK 0x7 +#define FCOE_VLAN_FIELDS_PRI_SHIFT 13 +}; + +union fcoe_vlan_field_union { + struct fcoe_vlan_fields fields; + __le16 val; +}; + +union fcoe_vlan_vif_field_union { + union fcoe_vlan_field_union vlan; + __le16 vif; +}; + +struct pstorm_fcoe_eth_context_section { + u8 remote_addr_3; + u8 remote_addr_2; + u8 remote_addr_1; + u8 remote_addr_0; + u8 local_addr_1; + u8 local_addr_0; + u8 remote_addr_5; + u8 remote_addr_4; + u8 local_addr_5; + u8 local_addr_4; + u8 local_addr_3; + u8 local_addr_2; + union fcoe_vlan_vif_field_union vif_outer_vlan; + __le16 vif_outer_eth_type; + union fcoe_vlan_vif_field_union inner_vlan; + __le16 inner_eth_type; +}; + +struct pstorm_fcoe_conn_st_ctx { + u8 func_mode; + u8 cos; + u8 conf_version; + u8 rsrv; + __le16 stat_ram_addr; + __le16 mss; + struct regpair abts_cleanup_addr; + struct pstorm_fcoe_eth_context_section eth; + u8 sid_2; + u8 sid_1; + u8 sid_0; + u8 flags; +#define PSTORM_FCOE_CONN_ST_CTX_VNTAG_VLAN_MASK 0x1 +#define PSTORM_FCOE_CONN_ST_CTX_VNTAG_VLAN_SHIFT 0 +#define PSTORM_FCOE_CONN_ST_CTX_SUPPORT_REC_RR_TOV_MASK 0x1 +#define PSTORM_FCOE_CONN_ST_CTX_SUPPORT_REC_RR_TOV_SHIFT 1 +#define PSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK 0x1 +#define PSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT 2 +#define PSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_MASK 0x1 +#define PSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_SHIFT 3 +#define PSTORM_FCOE_CONN_ST_CTX_RESERVED_MASK 0xF +#define PSTORM_FCOE_CONN_ST_CTX_RESERVED_SHIFT 4 + u8 did_2; + u8 did_1; + u8 did_0; + u8 src_mac_index; + __le16 rec_rr_tov_val; + u8 q_relative_offset; + u8 reserved1; +}; + +struct xstorm_fcoe_conn_st_ctx { + u8 func_mode; + u8 src_mac_index; + u8 conf_version; + u8 cached_wqes_avail; + __le16 stat_ram_addr; + u8 flags; +#define XSTORM_FCOE_CONN_ST_CTX_SQ_DEFERRED_MASK 0x1 +#define XSTORM_FCOE_CONN_ST_CTX_SQ_DEFERRED_SHIFT 0 +#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK 0x1 +#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT 1 +#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_ORIG_MASK 0x1 +#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_ORIG_SHIFT 2 +#define XSTORM_FCOE_CONN_ST_CTX_LAST_QUEUE_HANDLED_MASK 0x3 +#define XSTORM_FCOE_CONN_ST_CTX_LAST_QUEUE_HANDLED_SHIFT 3 +#define XSTORM_FCOE_CONN_ST_CTX_RSRV_MASK 0x7 +#define XSTORM_FCOE_CONN_ST_CTX_RSRV_SHIFT 5 + u8 cached_wqes_offset; + u8 reserved2; + u8 eth_hdr_size; + u8 seq_id; + u8 max_conc_seqs; + __le16 num_pages_in_pbl; + __le16 reserved; + struct regpair sq_pbl_addr; + struct regpair sq_curr_page_addr; + struct regpair sq_next_page_addr; + struct regpair xferq_pbl_addr; + struct regpair xferq_curr_page_addr; + struct regpair xferq_next_page_addr; + struct regpair respq_pbl_addr; + struct regpair respq_curr_page_addr; + struct regpair respq_next_page_addr; + __le16 mtu; + __le16 tx_max_fc_pay_len; + __le16 max_fc_payload_len; + __le16 min_frame_size; + __le16 sq_pbl_next_index; + __le16 respq_pbl_next_index; + u8 fcp_cmd_byte_credit; + u8 fcp_rsp_byte_credit; + __le16 protection_info; +#define XSTORM_FCOE_CONN_ST_CTX_PROTECTION_PERF_MASK 0x1 +#define XSTORM_FCOE_CONN_ST_CTX_PROTECTION_PERF_SHIFT 0 +#define XSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_MASK 0x1 +#define XSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_SHIFT 1 +#define XSTORM_FCOE_CONN_ST_CTX_VALID_MASK 0x1 +#define XSTORM_FCOE_CONN_ST_CTX_VALID_SHIFT 2 +#define XSTORM_FCOE_CONN_ST_CTX_FRAME_PROT_ALIGNED_MASK 0x1 +#define XSTORM_FCOE_CONN_ST_CTX_FRAME_PROT_ALIGNED_SHIFT 3 +#define XSTORM_FCOE_CONN_ST_CTX_RESERVED3_MASK 0xF +#define XSTORM_FCOE_CONN_ST_CTX_RESERVED3_SHIFT 4 +#define XSTORM_FCOE_CONN_ST_CTX_DST_PROTECTION_PER_MSS_MASK 0xFF +#define XSTORM_FCOE_CONN_ST_CTX_DST_PROTECTION_PER_MSS_SHIFT 8 + __le16 xferq_pbl_next_index; + __le16 page_size; + u8 mid_seq; + u8 fcp_xfer_byte_credit; + u8 reserved1[2]; + struct fcoe_wqe cached_wqes[16]; +}; + +struct xstorm_fcoe_conn_ag_ctx { + u8 reserved0; + u8 fcoe_state; + u8 flags0; +#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED1_SHIFT 1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED2_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED3_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED4_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED4_SHIFT 5 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED5_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED5_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED6_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED6_SHIFT 7 + u8 flags1; +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED7_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED7_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED8_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED8_SHIFT 1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED9_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED9_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_BIT11_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT11_SHIFT 3 +#define XSTORM_FCOE_CONN_AG_CTX_BIT12_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT12_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_BIT13_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT13_SHIFT 5 +#define XSTORM_FCOE_CONN_AG_CTX_BIT14_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT14_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_BIT15_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT15_SHIFT 7 + u8 flags2; +#define XSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_CF3_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF3_SHIFT 6 + u8 flags3; +#define XSTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_CF7_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT 6 + u8 flags4; +#define XSTORM_FCOE_CONN_AG_CTX_CF8_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_CF9_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_CF10_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_CF11_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF11_SHIFT 6 + u8 flags5; +#define XSTORM_FCOE_CONN_AG_CTX_CF12_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF12_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_CF13_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF13_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_CF14_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF14_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_CF15_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF15_SHIFT 6 + u8 flags6; +#define XSTORM_FCOE_CONN_AG_CTX_CF16_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF16_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_CF17_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF17_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_CF18_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF18_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_SHIFT 6 + u8 flags7; +#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED10_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED10_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 7 + u8 flags8; +#define XSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_CF3EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT 1 +#define XSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 3 +#define XSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT 5 +#define XSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT 7 + u8 flags9; +#define XSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_CF11EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF11EN_SHIFT 1 +#define XSTORM_FCOE_CONN_AG_CTX_CF12EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF12EN_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_CF13EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF13EN_SHIFT 3 +#define XSTORM_FCOE_CONN_AG_CTX_CF14EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF14EN_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_CF15EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF15EN_SHIFT 5 +#define XSTORM_FCOE_CONN_AG_CTX_CF16EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF16EN_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_CF17EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF17EN_SHIFT 7 + u8 flags10; +#define XSTORM_FCOE_CONN_AG_CTX_CF18EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF18EN_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_SHIFT 1 +#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED11_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED11_SHIFT 3 +#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_CF23EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF23EN_SHIFT 5 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED12_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED12_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED13_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED13_SHIFT 7 + u8 flags11; +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED14_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED14_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED15_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED15_SHIFT 1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED16_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED16_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define XSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_SHIFT 7 + u8 flags12; +#define XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define XSTORM_FCOE_CONN_AG_CTX_RULE14EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define XSTORM_FCOE_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RULE17EN_SHIFT 7 + u8 flags13; +#define XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_RULE19EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 + u8 flags14; +#define XSTORM_FCOE_CONN_AG_CTX_BIT16_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT16_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_BIT17_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT17_SHIFT 1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT18_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT18_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_BIT19_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT19_SHIFT 3 +#define XSTORM_FCOE_CONN_AG_CTX_BIT20_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT20_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_BIT21_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT21_SHIFT 5 +#define XSTORM_FCOE_CONN_AG_CTX_CF23_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF23_SHIFT 6 + u8 byte2; + __le16 physical_q0; + __le16 word1; + __le16 word2; + __le16 sq_cons; + __le16 sq_prod; + __le16 xferq_prod; + __le16 xferq_cons; + u8 byte3; + u8 byte4; + u8 byte5; + u8 byte6; + __le32 remain_io; + __le32 reg1; + __le32 reg2; + __le32 reg3; + __le32 reg4; + __le32 reg5; + __le32 reg6; + __le16 respq_prod; + __le16 respq_cons; + __le16 word9; + __le16 word10; + __le32 reg7; + __le32 reg8; +}; + +struct ustorm_fcoe_conn_st_ctx { + struct regpair respq_pbl_addr; + __le16 num_pages_in_pbl; + u8 ptu_log_page_size; + u8 log_page_size; + __le16 respq_prod; + u8 reserved[2]; +}; + +struct tstorm_fcoe_conn_ag_ctx { + u8 reserved0; + u8 fcoe_state; + u8 flags0; +#define TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define TSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 +#define TSTORM_FCOE_CONN_AG_CTX_BIT2_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_BIT2_SHIFT 2 +#define TSTORM_FCOE_CONN_AG_CTX_BIT3_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_BIT3_SHIFT 3 +#define TSTORM_FCOE_CONN_AG_CTX_BIT4_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_BIT4_SHIFT 4 +#define TSTORM_FCOE_CONN_AG_CTX_BIT5_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_BIT5_SHIFT 5 +#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_SHIFT 6 + u8 flags1; +#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 0 +#define TSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 2 +#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4 +#define TSTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 6 + u8 flags2; +#define TSTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 0 +#define TSTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 2 +#define TSTORM_FCOE_CONN_AG_CTX_CF7_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT 4 +#define TSTORM_FCOE_CONN_AG_CTX_CF8_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT 6 + u8 flags3; +#define TSTORM_FCOE_CONN_AG_CTX_CF9_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT 0 +#define TSTORM_FCOE_CONN_AG_CTX_CF10_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT 2 +#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_SHIFT 4 +#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5 +#define TSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 6 +#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7 + u8 flags4; +#define TSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 0 +#define TSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 1 +#define TSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 2 +#define TSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT 3 +#define TSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT 4 +#define TSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT 5 +#define TSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT 6 +#define TSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags5; +#define TSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define TSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7 + __le32 reg0; + __le32 reg1; +}; + +struct ustorm_fcoe_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define USTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0 +#define USTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 +#define USTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 +#define USTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2 +#define USTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 +#define USTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4 +#define USTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 +#define USTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define USTORM_FCOE_CONN_AG_CTX_CF3_MASK 0x3 +#define USTORM_FCOE_CONN_AG_CTX_CF3_SHIFT 0 +#define USTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3 +#define USTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 2 +#define USTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3 +#define USTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 4 +#define USTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3 +#define USTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 6 + u8 flags2; +#define USTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define USTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define USTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define USTORM_FCOE_CONN_AG_CTX_CF3EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 4 +#define USTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 5 +#define USTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 6 +#define USTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags3; +#define USTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define USTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define USTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define USTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define USTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define USTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define USTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define USTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7 + u8 byte2; + u8 byte3; + __le16 word0; + __le16 word1; + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 reg3; + __le16 word2; + __le16 word3; +}; + +struct tstorm_fcoe_conn_st_ctx { + __le16 stat_ram_addr; + __le16 rx_max_fc_payload_len; + __le16 e_d_tov_val; + u8 flags; +#define TSTORM_FCOE_CONN_ST_CTX_INC_SEQ_CNT_MASK 0x1 +#define TSTORM_FCOE_CONN_ST_CTX_INC_SEQ_CNT_SHIFT 0 +#define TSTORM_FCOE_CONN_ST_CTX_SUPPORT_CONF_MASK 0x1 +#define TSTORM_FCOE_CONN_ST_CTX_SUPPORT_CONF_SHIFT 1 +#define TSTORM_FCOE_CONN_ST_CTX_DEF_Q_IDX_MASK 0x3F +#define TSTORM_FCOE_CONN_ST_CTX_DEF_Q_IDX_SHIFT 2 + u8 timers_cleanup_invocation_cnt; + __le32 reserved1[2]; + __le32 dst_mac_address_bytes0to3; + __le16 dst_mac_address_bytes4to5; + __le16 ramrod_echo; + u8 flags1; +#define TSTORM_FCOE_CONN_ST_CTX_MODE_MASK 0x3 +#define TSTORM_FCOE_CONN_ST_CTX_MODE_SHIFT 0 +#define TSTORM_FCOE_CONN_ST_CTX_RESERVED_MASK 0x3F +#define TSTORM_FCOE_CONN_ST_CTX_RESERVED_SHIFT 2 + u8 q_relative_offset; + u8 bdq_resource_id; + u8 reserved0[5]; +}; + +struct mstorm_fcoe_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define MSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0 +#define MSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 +#define MSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 +#define MSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2 +#define MSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 +#define MSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4 +#define MSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 +#define MSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define MSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define MSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define MSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define MSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define MSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define MSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define MSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define MSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7 + __le16 word0; + __le16 word1; + __le32 reg0; + __le32 reg1; +}; + +struct fcoe_mstorm_fcoe_conn_st_ctx_fp { + __le16 xfer_prod; + __le16 reserved1; + u8 protection_info; +#define FCOE_MSTORM_FCOE_CONN_ST_CTX_FP_SUPPORT_PROTECTION_MASK 0x1 +#define FCOE_MSTORM_FCOE_CONN_ST_CTX_FP_SUPPORT_PROTECTION_SHIFT 0 +#define FCOE_MSTORM_FCOE_CONN_ST_CTX_FP_VALID_MASK 0x1 +#define FCOE_MSTORM_FCOE_CONN_ST_CTX_FP_VALID_SHIFT 1 +#define FCOE_MSTORM_FCOE_CONN_ST_CTX_FP_RESERVED0_MASK 0x3F +#define FCOE_MSTORM_FCOE_CONN_ST_CTX_FP_RESERVED0_SHIFT 2 + u8 q_relative_offset; + u8 reserved2[2]; +}; + +struct fcoe_mstorm_fcoe_conn_st_ctx_non_fp { + __le16 conn_id; + __le16 stat_ram_addr; + __le16 num_pages_in_pbl; + u8 ptu_log_page_size; + u8 log_page_size; + __le16 unsolicited_cq_count; + __le16 cmdq_count; + u8 bdq_resource_id; + u8 reserved0[3]; + struct regpair xferq_pbl_addr; + struct regpair reserved1; + struct regpair reserved2[3]; +}; + +struct mstorm_fcoe_conn_st_ctx { + struct fcoe_mstorm_fcoe_conn_st_ctx_fp fp; + struct fcoe_mstorm_fcoe_conn_st_ctx_non_fp non_fp; +}; + +struct fcoe_conn_context { + struct ystorm_fcoe_conn_st_ctx ystorm_st_context; + struct pstorm_fcoe_conn_st_ctx pstorm_st_context; + struct regpair pstorm_st_padding[2]; + struct xstorm_fcoe_conn_st_ctx xstorm_st_context; + struct xstorm_fcoe_conn_ag_ctx xstorm_ag_context; + struct regpair xstorm_ag_padding[6]; + struct ustorm_fcoe_conn_st_ctx ustorm_st_context; + struct regpair ustorm_st_padding[2]; + struct tstorm_fcoe_conn_ag_ctx tstorm_ag_context; + struct regpair tstorm_ag_padding[2]; + struct timers_context timer_context; + struct ustorm_fcoe_conn_ag_ctx ustorm_ag_context; + struct tstorm_fcoe_conn_st_ctx tstorm_st_context; + struct mstorm_fcoe_conn_ag_ctx mstorm_ag_context; + struct mstorm_fcoe_conn_st_ctx mstorm_st_context; +}; + +struct fcoe_conn_offload_ramrod_params { + struct fcoe_conn_offload_ramrod_data offload_ramrod_data; +}; + +struct fcoe_conn_terminate_ramrod_params { + struct fcoe_conn_terminate_ramrod_data terminate_ramrod_data; +}; + +enum fcoe_event_type { + FCOE_EVENT_INIT_FUNC, + FCOE_EVENT_DESTROY_FUNC, + FCOE_EVENT_STAT_FUNC, + FCOE_EVENT_OFFLOAD_CONN, + FCOE_EVENT_TERMINATE_CONN, + FCOE_EVENT_ERROR, + MAX_FCOE_EVENT_TYPE +}; + +struct fcoe_init_ramrod_params { + struct fcoe_init_func_ramrod_data init_ramrod_data; +}; + +enum fcoe_ramrod_cmd_id { + FCOE_RAMROD_CMD_ID_INIT_FUNC, + FCOE_RAMROD_CMD_ID_DESTROY_FUNC, + FCOE_RAMROD_CMD_ID_STAT_FUNC, + FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, + FCOE_RAMROD_CMD_ID_TERMINATE_CONN, + MAX_FCOE_RAMROD_CMD_ID +}; + +struct fcoe_stat_ramrod_params { + struct fcoe_stat_ramrod_data stat_ramrod_data; +}; + +struct ystorm_fcoe_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define YSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0 +#define YSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 +#define YSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 +#define YSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2 +#define YSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 +#define YSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4 +#define YSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 +#define YSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define YSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define YSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define YSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define YSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define YSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define YSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define YSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define YSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7 + u8 byte2; + u8 byte3; + __le16 word0; + __le32 reg0; + __le32 reg1; + __le16 word1; + __le16 word2; + __le16 word3; + __le16 word4; + __le32 reg2; + __le32 reg3; +}; + struct ystorm_iscsi_conn_st_ctx { __le32 reserved[4]; }; @@ -8411,6 +9204,7 @@ struct public_func { #define FUNC_MF_CFG_PROTOCOL_SHIFT 4 #define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000000 #define FUNC_MF_CFG_PROTOCOL_ISCSI 0x00000010 +#define FUNC_MF_CFG_PROTOCOL_FCOE 0x00000020 #define FUNC_MF_CFG_PROTOCOL_ROCE 0x00000030 #define FUNC_MF_CFG_PROTOCOL_MAX 0x00000030 @@ -8505,6 +9299,13 @@ struct lan_stats_stc { u32 rserved; }; +struct fcoe_stats_stc { + u64 rx_pkts; + u64 tx_pkts; + u32 fcs_err; + u32 login_failure; +}; + struct ocbb_data_stc { u32 ocbb_host_addr; u32 ocsd_host_addr; @@ -8578,6 +9379,7 @@ union drv_union_data { struct drv_version_stc drv_version; struct lan_stats_stc lan_stats; + struct fcoe_stats_stc fcoe_stats; struct ocbb_data_stc ocbb_info; struct temperature_status_stc temp_info; struct resource_info resource; @@ -8881,6 +9683,7 @@ struct nvm_cfg1_glob { u32 misc_sig; u32 device_capabilities; #define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET 0x1 +#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE 0x2 #define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI 0x4 #define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE 0x8 u32 power_dissipated; diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c index 6e4fae9b1430..899cad7f97ea 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hw.c +++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #include <linux/types.h> @@ -817,6 +841,9 @@ u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn, if (pq_id > p_hwfn->qm_info.num_pf_rls) pq_id = p_hwfn->qm_info.offload_pq; break; + case PROTOCOLID_FCOE: + pq_id = p_hwfn->qm_info.offload_pq; + break; default: pq_id = 0; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.h b/drivers/net/ethernet/qlogic/qed/qed_hw.h index d01557092868..9277264d2e65 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hw.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hw.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef _QED_HW_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c index 23e455f22adc..d891a6852695 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #include <linux/types.h> diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c index d567ba94c8d1..243b64e0d4dc 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #include <linux/types.h> diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.h b/drivers/net/ethernet/qlogic/qed/qed_init_ops.h index 1e832049983d..555dd086796d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.h +++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation - * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef _QED_INIT_OPS_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index c68dbf7092b1..84310b60849b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #include <linux/types.h> diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h index 0948be64dc78..0ae0bb4593ef 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.h +++ b/drivers/net/ethernet/qlogic/qed/qed_int.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation - * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef _QED_INT_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c index 17a70122df05..3a44d6b395fa 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #include <linux/types.h> diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.h b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h index 67c25f3db4d5..20c187f4ed0b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef _QED_ISCSI_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 6a3727c4c0c6..df932be5a4e5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #include <linux/types.h> @@ -74,6 +98,7 @@ _qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn, p_cid->cid = cid; p_cid->vf_qid = vf_qid; p_cid->rel = *p_params; + p_cid->p_owner = p_hwfn; /* Don't try calculating the absolute indices for VFs */ if (IS_VF(p_hwfn->cdev)) { @@ -189,6 +214,7 @@ int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, p_ramrod->vport_id = abs_vport_id; p_ramrod->mtu = cpu_to_le16(p_params->mtu); + p_ramrod->handle_ptp_pkts = p_params->handle_ptp_pkts; p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan; p_ramrod->drop_ttl0_en = p_params->drop_ttl0; p_ramrod->untagged = p_params->only_untagged; @@ -248,76 +274,103 @@ static int qed_sp_vport_start(struct qed_hwfn *p_hwfn, static int qed_sp_vport_update_rss(struct qed_hwfn *p_hwfn, struct vport_update_ramrod_data *p_ramrod, - struct qed_rss_params *p_params) + struct qed_rss_params *p_rss) { - struct eth_vport_rss_config *rss = &p_ramrod->rss_config; - u16 abs_l2_queue = 0, capabilities = 0; - int rc = 0, i; + struct eth_vport_rss_config *p_config; + u16 capabilities = 0; + int i, table_size; + int rc = 0; - if (!p_params) { + if (!p_rss) { p_ramrod->common.update_rss_flg = 0; return rc; } + p_config = &p_ramrod->rss_config; - BUILD_BUG_ON(QED_RSS_IND_TABLE_SIZE != - ETH_RSS_IND_TABLE_ENTRIES_NUM); + BUILD_BUG_ON(QED_RSS_IND_TABLE_SIZE != ETH_RSS_IND_TABLE_ENTRIES_NUM); - rc = qed_fw_rss_eng(p_hwfn, p_params->rss_eng_id, &rss->rss_id); + rc = qed_fw_rss_eng(p_hwfn, p_rss->rss_eng_id, &p_config->rss_id); if (rc) return rc; - p_ramrod->common.update_rss_flg = p_params->update_rss_config; - rss->update_rss_capabilities = p_params->update_rss_capabilities; - rss->update_rss_ind_table = p_params->update_rss_ind_table; - rss->update_rss_key = p_params->update_rss_key; + p_ramrod->common.update_rss_flg = p_rss->update_rss_config; + p_config->update_rss_capabilities = p_rss->update_rss_capabilities; + p_config->update_rss_ind_table = p_rss->update_rss_ind_table; + p_config->update_rss_key = p_rss->update_rss_key; - rss->rss_mode = p_params->rss_enable ? - ETH_VPORT_RSS_MODE_REGULAR : - ETH_VPORT_RSS_MODE_DISABLED; + p_config->rss_mode = p_rss->rss_enable ? + ETH_VPORT_RSS_MODE_REGULAR : + ETH_VPORT_RSS_MODE_DISABLED; SET_FIELD(capabilities, ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY, - !!(p_params->rss_caps & QED_RSS_IPV4)); + !!(p_rss->rss_caps & QED_RSS_IPV4)); SET_FIELD(capabilities, ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY, - !!(p_params->rss_caps & QED_RSS_IPV6)); + !!(p_rss->rss_caps & QED_RSS_IPV6)); SET_FIELD(capabilities, ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY, - !!(p_params->rss_caps & QED_RSS_IPV4_TCP)); + !!(p_rss->rss_caps & QED_RSS_IPV4_TCP)); SET_FIELD(capabilities, ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY, - !!(p_params->rss_caps & QED_RSS_IPV6_TCP)); + !!(p_rss->rss_caps & QED_RSS_IPV6_TCP)); SET_FIELD(capabilities, ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY, - !!(p_params->rss_caps & QED_RSS_IPV4_UDP)); + !!(p_rss->rss_caps & QED_RSS_IPV4_UDP)); SET_FIELD(capabilities, ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY, - !!(p_params->rss_caps & QED_RSS_IPV6_UDP)); - rss->tbl_size = p_params->rss_table_size_log; + !!(p_rss->rss_caps & QED_RSS_IPV6_UDP)); + p_config->tbl_size = p_rss->rss_table_size_log; - rss->capabilities = cpu_to_le16(capabilities); + p_config->capabilities = cpu_to_le16(capabilities); DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n", p_ramrod->common.update_rss_flg, - rss->rss_mode, rss->update_rss_capabilities, - capabilities, rss->update_rss_ind_table, - rss->update_rss_key); + p_config->rss_mode, + p_config->update_rss_capabilities, + p_config->capabilities, + p_config->update_rss_ind_table, p_config->update_rss_key); - for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) { - rc = qed_fw_l2_queue(p_hwfn, - (u8)p_params->rss_ind_table[i], - &abs_l2_queue); - if (rc) - return rc; + table_size = min_t(int, QED_RSS_IND_TABLE_SIZE, + 1 << p_config->tbl_size); + for (i = 0; i < table_size; i++) { + struct qed_queue_cid *p_queue = p_rss->rss_ind_table[i]; + + if (!p_queue) + return -EINVAL; - rss->indirection_table[i] = cpu_to_le16(abs_l2_queue); - DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, "i= %d, queue = %d\n", - i, rss->indirection_table[i]); + p_config->indirection_table[i] = + cpu_to_le16(p_queue->abs.queue_id); + } + + DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, + "Configured RSS indirection table [%d entries]:\n", + table_size); + for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i += 0x10) { + DP_VERBOSE(p_hwfn, + NETIF_MSG_IFUP, + "%04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x\n", + le16_to_cpu(p_config->indirection_table[i]), + le16_to_cpu(p_config->indirection_table[i + 1]), + le16_to_cpu(p_config->indirection_table[i + 2]), + le16_to_cpu(p_config->indirection_table[i + 3]), + le16_to_cpu(p_config->indirection_table[i + 4]), + le16_to_cpu(p_config->indirection_table[i + 5]), + le16_to_cpu(p_config->indirection_table[i + 6]), + le16_to_cpu(p_config->indirection_table[i + 7]), + le16_to_cpu(p_config->indirection_table[i + 8]), + le16_to_cpu(p_config->indirection_table[i + 9]), + le16_to_cpu(p_config->indirection_table[i + 10]), + le16_to_cpu(p_config->indirection_table[i + 11]), + le16_to_cpu(p_config->indirection_table[i + 12]), + le16_to_cpu(p_config->indirection_table[i + 13]), + le16_to_cpu(p_config->indirection_table[i + 14]), + le16_to_cpu(p_config->indirection_table[i + 15])); } for (i = 0; i < 10; i++) - rss->rss_key[i] = cpu_to_le32(p_params->rss_key[i]); + p_config->rss_key[i] = cpu_to_le32(p_rss->rss_key[i]); return rc; } @@ -1729,13 +1782,31 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev, int max_vf_mac_filters = 0; if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { - for_each_hwfn(cdev, i) - info->num_queues += - FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE); - if (cdev->int_params.fp_msix_cnt) - info->num_queues = - min_t(u8, info->num_queues, - cdev->int_params.fp_msix_cnt); + u16 num_queues = 0; + + /* Since the feature controls only queue-zones, + * make sure we have the contexts [rx, tx, xdp] to + * match. + */ + for_each_hwfn(cdev, i) { + struct qed_hwfn *hwfn = &cdev->hwfns[i]; + u16 l2_queues = (u16)FEAT_NUM(hwfn, + QED_PF_L2_QUE); + u16 cids; + + cids = hwfn->pf_params.eth_pf_params.num_cons; + num_queues += min_t(u16, l2_queues, cids / 3); + } + + /* queues might theoretically be >256, but interrupts' + * upper-limit guarantes that it would fit in a u8. + */ + if (cdev->int_params.fp_msix_cnt) { + u8 irqs = cdev->int_params.fp_msix_cnt; + + info->num_queues = (u8)min_t(u16, + num_queues, irqs); + } } else { info->num_queues = cdev->num_hwfns; } @@ -1776,7 +1847,7 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev, qed_fill_dev_info(cdev, &info->common); if (IS_VF(cdev)) - memset(info->common.hw_mac, 0, ETH_ALEN); + eth_zero_addr(info->common.hw_mac); return 0; } @@ -1816,6 +1887,7 @@ static int qed_start_vport(struct qed_dev *cdev, start.drop_ttl0 = params->drop_ttl0; start.opaque_fid = p_hwfn->hw_info.opaque_fid; start.concrete_fid = p_hwfn->hw_info.concrete_fid; + start.handle_ptp_pkts = params->handle_ptp_pkts; start.vport_id = params->vport_id; start.max_buffers_per_cqe = 16; start.mtu = params->mtu; @@ -1857,18 +1929,84 @@ static int qed_stop_vport(struct qed_dev *cdev, u8 vport_id) return 0; } +static int qed_update_vport_rss(struct qed_dev *cdev, + struct qed_update_vport_rss_params *input, + struct qed_rss_params *rss) +{ + int i, fn; + + /* Update configuration with what's correct regardless of CMT */ + rss->update_rss_config = 1; + rss->rss_enable = 1; + rss->update_rss_capabilities = 1; + rss->update_rss_ind_table = 1; + rss->update_rss_key = 1; + rss->rss_caps = input->rss_caps; + memcpy(rss->rss_key, input->rss_key, QED_RSS_KEY_SIZE * sizeof(u32)); + + /* In regular scenario, we'd simply need to take input handlers. + * But in CMT, we'd have to split the handlers according to the + * engine they were configured on. We'd then have to understand + * whether RSS is really required, since 2-queues on CMT doesn't + * require RSS. + */ + if (cdev->num_hwfns == 1) { + memcpy(rss->rss_ind_table, + input->rss_ind_table, + QED_RSS_IND_TABLE_SIZE * sizeof(void *)); + rss->rss_table_size_log = 7; + return 0; + } + + /* Start by copying the non-spcific information to the 2nd copy */ + memcpy(&rss[1], &rss[0], sizeof(struct qed_rss_params)); + + /* CMT should be round-robin */ + for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) { + struct qed_queue_cid *cid = input->rss_ind_table[i]; + struct qed_rss_params *t_rss; + + if (cid->p_owner == QED_LEADING_HWFN(cdev)) + t_rss = &rss[0]; + else + t_rss = &rss[1]; + + t_rss->rss_ind_table[i / cdev->num_hwfns] = cid; + } + + /* Make sure RSS is actually required */ + for_each_hwfn(cdev, fn) { + for (i = 1; i < QED_RSS_IND_TABLE_SIZE / cdev->num_hwfns; i++) { + if (rss[fn].rss_ind_table[i] != + rss[fn].rss_ind_table[0]) + break; + } + if (i == QED_RSS_IND_TABLE_SIZE / cdev->num_hwfns) { + DP_VERBOSE(cdev, NETIF_MSG_IFUP, + "CMT - 1 queue per-hwfn; Disabling RSS\n"); + return -EINVAL; + } + rss[fn].rss_table_size_log = 6; + } + + return 0; +} + static int qed_update_vport(struct qed_dev *cdev, struct qed_update_vport_params *params) { struct qed_sp_vport_update_params sp_params; - struct qed_rss_params sp_rss_params; - int rc, i; + struct qed_rss_params *rss; + int rc = 0, i; if (!cdev) return -ENODEV; + rss = vzalloc(sizeof(*rss) * cdev->num_hwfns); + if (!rss) + return -ENOMEM; + memset(&sp_params, 0, sizeof(sp_params)); - memset(&sp_rss_params, 0, sizeof(sp_rss_params)); /* Translate protocol params into sp params */ sp_params.vport_id = params->vport_id; @@ -1882,66 +2020,24 @@ static int qed_update_vport(struct qed_dev *cdev, sp_params.update_accept_any_vlan_flg = params->update_accept_any_vlan_flg; - /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns. - * We need to re-fix the rss values per engine for CMT. - */ - if (cdev->num_hwfns > 1 && params->update_rss_flg) { - struct qed_update_vport_rss_params *rss = ¶ms->rss_params; - int k, max = 0; - - /* Find largest entry, since it's possible RSS needs to - * be disabled [in case only 1 queue per-hwfn] - */ - for (k = 0; k < QED_RSS_IND_TABLE_SIZE; k++) - max = (max > rss->rss_ind_table[k]) ? - max : rss->rss_ind_table[k]; - - /* Either fix RSS values or disable RSS */ - if (cdev->num_hwfns < max + 1) { - int divisor = (max + cdev->num_hwfns - 1) / - cdev->num_hwfns; - - DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), - "CMT - fixing RSS values (modulo %02x)\n", - divisor); - - for (k = 0; k < QED_RSS_IND_TABLE_SIZE; k++) - rss->rss_ind_table[k] = - rss->rss_ind_table[k] % divisor; - } else { - DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), - "CMT - 1 queue per-hwfn; Disabling RSS\n"); + /* Prepare the RSS configuration */ + if (params->update_rss_flg) + if (qed_update_vport_rss(cdev, ¶ms->rss_params, rss)) params->update_rss_flg = 0; - } - } - - /* Now, update the RSS configuration for actual configuration */ - if (params->update_rss_flg) { - sp_rss_params.update_rss_config = 1; - sp_rss_params.rss_enable = 1; - sp_rss_params.update_rss_capabilities = 1; - sp_rss_params.update_rss_ind_table = 1; - sp_rss_params.update_rss_key = 1; - sp_rss_params.rss_caps = params->rss_params.rss_caps; - sp_rss_params.rss_table_size_log = 7; /* 2^7 = 128 */ - memcpy(sp_rss_params.rss_ind_table, - params->rss_params.rss_ind_table, - QED_RSS_IND_TABLE_SIZE * sizeof(u16)); - memcpy(sp_rss_params.rss_key, params->rss_params.rss_key, - QED_RSS_KEY_SIZE * sizeof(u32)); - sp_params.rss_params = &sp_rss_params; - } for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + if (params->update_rss_flg) + sp_params.rss_params = &rss[i]; + sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid; rc = qed_sp_vport_update(p_hwfn, &sp_params, QED_SPQ_MODE_EBLOCK, NULL); if (rc) { DP_ERR(cdev, "Failed to update VPORT\n"); - return rc; + goto out; } DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), @@ -1950,7 +2046,9 @@ static int qed_update_vport(struct qed_dev *cdev, params->update_vport_active_flg); } - return 0; +out: + vfree(rss); + return rc; } static int qed_start_rxq(struct qed_dev *cdev, @@ -2114,11 +2212,14 @@ static int qed_configure_filter_rx_mode(struct qed_dev *cdev, QED_ACCEPT_MCAST_MATCHED | QED_ACCEPT_BCAST; - if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) + if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) { accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED; - else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) + accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED; + } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) { accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED; + accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED; + } return qed_filter_accept_cmd(cdev, 0, accept_flags, false, false, QED_SPQ_MODE_CB, NULL); @@ -2229,6 +2330,8 @@ extern const struct qed_iov_hv_ops qed_iov_ops_pass; extern const struct qed_eth_dcbnl_ops qed_dcbnl_ops_pass; #endif +extern const struct qed_eth_ptp_ops qed_ptp_ops_pass; + static const struct qed_eth_ops qed_eth_ops_pass = { .common = &qed_common_ops_pass, #ifdef CONFIG_QED_SRIOV @@ -2237,6 +2340,7 @@ static const struct qed_eth_ops qed_eth_ops_pass = { #ifdef CONFIG_DCB .dcb = &qed_dcbnl_ops_pass, #endif + .ptp = &qed_ptp_ops_pass, .fill_dev_info = &qed_fill_eth_dev_info, .register_ops = &qed_register_eth_ops, .check_mac = &qed_check_mac, diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h index 48c9bfc28140..e763abd334f6 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef _QED_L2_H #define _QED_L2_H @@ -15,6 +39,20 @@ #include "qed.h" #include "qed_hw.h" #include "qed_sp.h" +struct qed_rss_params { + u8 update_rss_config; + u8 rss_enable; + u8 rss_eng_id; + u8 update_rss_capabilities; + u8 update_rss_ind_table; + u8 update_rss_key; + u8 rss_caps; + u8 rss_table_size_log; + + /* Indirection table consist of rx queue handles */ + void *rss_ind_table[QED_RSS_IND_TABLE_SIZE]; + u32 rss_key[QED_RSS_KEY_SIZE]; +}; struct qed_sge_tpa_params { u8 max_buffers_per_cqe; @@ -118,6 +156,7 @@ struct qed_sp_vport_start_params { enum qed_tpa_mode tpa_mode; bool remove_inner_vlan; bool tx_switching; + bool handle_ptp_pkts; bool only_untagged; bool drop_ttl0; u8 max_buffers_per_cqe; @@ -132,18 +171,6 @@ struct qed_sp_vport_start_params { int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, struct qed_sp_vport_start_params *p_params); -struct qed_rss_params { - u8 update_rss_config; - u8 rss_enable; - u8 rss_eng_id; - u8 update_rss_capabilities; - u8 update_rss_ind_table; - u8 update_rss_key; - u8 rss_caps; - u8 rss_table_size_log; - u16 rss_ind_table[QED_RSS_IND_TABLE_SIZE]; - u32 rss_key[QED_RSS_KEY_SIZE]; -}; struct qed_filter_accept_flags { u8 update_rx_mode_config; @@ -263,6 +290,8 @@ struct qed_queue_cid { /* Legacy VFs might have Rx producer located elsewhere */ bool b_legacy_vf; + + struct qed_hwfn *p_owner; }; void qed_eth_queue_cid_release(struct qed_hwfn *p_hwfn, diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index 873ce2cd76ba..9a0b9af10a57 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -1,10 +1,33 @@ /* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation * - * Copyright (c) 2015 QLogic Corporation + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #include <linux/types.h> @@ -1107,6 +1130,9 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn, p_ramrod->qm_pq_id = cpu_to_le16(pq_id); switch (conn_type) { + case QED_LL2_TYPE_FCOE: + p_ramrod->conn_type = PROTOCOLID_FCOE; + break; case QED_LL2_TYPE_ISCSI: case QED_LL2_TYPE_ISCSI_OOO: p_ramrod->conn_type = PROTOCOLID_ISCSI; @@ -1435,6 +1461,15 @@ int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle) qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn); + if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_FCOE) { + qed_llh_add_protocol_filter(p_hwfn, p_hwfn->p_main_ptt, + 0x8906, 0, + QED_LLH_FILTER_ETHERTYPE); + qed_llh_add_protocol_filter(p_hwfn, p_hwfn->p_main_ptt, + 0x8914, 0, + QED_LLH_FILTER_ETHERTYPE); + } + return rc; } @@ -1808,6 +1843,15 @@ int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle) if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO) qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info); + if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_FCOE) { + qed_llh_remove_protocol_filter(p_hwfn, p_hwfn->p_main_ptt, + 0x8906, 0, + QED_LLH_FILTER_ETHERTYPE); + qed_llh_remove_protocol_filter(p_hwfn, p_hwfn->p_main_ptt, + 0x8914, 0, + QED_LLH_FILTER_ETHERTYPE); + } + return rc; } @@ -2016,6 +2060,10 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params) } switch (QED_LEADING_HWFN(cdev)->hw_info.personality) { + case QED_PCI_FCOE: + conn_type = QED_LL2_TYPE_FCOE; + gsi_enable = 0; + break; case QED_PCI_ISCSI: conn_type = QED_LL2_TYPE_ISCSI; gsi_enable = 0; diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h index 31417928b635..31a409033c41 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h @@ -1,10 +1,33 @@ /* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation * - * Copyright (c) 2015 QLogic Corporation + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef _QED_LL2_H @@ -31,7 +54,7 @@ enum qed_ll2_roce_flavor_type { }; enum qed_ll2_conn_type { - QED_LL2_TYPE_RESERVED, + QED_LL2_TYPE_FCOE, QED_LL2_TYPE_ISCSI, QED_LL2_TYPE_TEST, QED_LL2_TYPE_ISCSI_OOO, diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index aeb98d8c5626..eef30a598b40 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #include <linux/stddef.h> @@ -29,9 +53,11 @@ #include "qed_sp.h" #include "qed_dev_api.h" #include "qed_ll2.h" +#include "qed_fcoe.h" #include "qed_mcp.h" #include "qed_hw.h" #include "qed_selftest.h" +#include "qed_debug.h" #define QED_ROCE_QPS (8192) #define QED_ROCE_DPIS (8) @@ -853,6 +879,17 @@ static void qed_update_pf_params(struct qed_dev *cdev, params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX; } + /* In case we might support RDMA, don't allow qede to be greedy + * with the L2 contexts. Allow for 64 queues [rx, tx, xdp] per hwfn. + */ + if (QED_LEADING_HWFN(cdev)->hw_info.personality == + QED_PCI_ETH_ROCE) { + u16 *num_cons; + + num_cons = ¶ms->eth_pf_params.num_cons; + *num_cons = min_t(u16, *num_cons, 192); + } + for (i = 0; i < cdev->num_hwfns; i++) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; @@ -867,6 +904,7 @@ static int qed_slowpath_start(struct qed_dev *cdev, struct qed_mcp_drv_version drv_version; const u8 *data = NULL; struct qed_hwfn *hwfn; + struct qed_ptt *p_ptt; int rc = -EINVAL; if (qed_iov_wq_start(cdev)) @@ -881,6 +919,14 @@ static int qed_slowpath_start(struct qed_dev *cdev, QED_FW_FILE_NAME); goto err; } + + p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); + if (p_ptt) { + QED_LEADING_HWFN(cdev)->p_ptp_ptt = p_ptt; + } else { + DP_NOTICE(cdev, "Failed to acquire PTT for PTP\n"); + goto err; + } } cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS; @@ -968,6 +1014,10 @@ err: if (IS_PF(cdev)) release_firmware(cdev->firmware); + if (IS_PF(cdev) && QED_LEADING_HWFN(cdev)->p_ptp_ptt) + qed_ptt_release(QED_LEADING_HWFN(cdev), + QED_LEADING_HWFN(cdev)->p_ptp_ptt); + qed_iov_wq_stop(cdev, false); return rc; @@ -981,6 +1031,8 @@ static int qed_slowpath_stop(struct qed_dev *cdev) qed_ll2_dealloc_if(cdev); if (IS_PF(cdev)) { + qed_ptt_release(QED_LEADING_HWFN(cdev), + QED_LEADING_HWFN(cdev)->p_ptp_ptt); qed_free_stream_mem(cdev); if (IS_QED_ETH_IF(cdev)) qed_sriov_disable(cdev, true); @@ -1020,6 +1072,7 @@ static u32 qed_sb_init(struct qed_dev *cdev, enum qed_sb_type type) { struct qed_hwfn *p_hwfn; + struct qed_ptt *p_ptt; int hwfn_index; u16 rel_sb_id; u8 n_hwfns; @@ -1041,8 +1094,18 @@ static u32 qed_sb_init(struct qed_dev *cdev, "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n", hwfn_index, rel_sb_id, sb_id); - rc = qed_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info, - sb_virt_addr, sb_phy_addr, rel_sb_id); + if (IS_PF(p_hwfn->cdev)) { + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) + return -EBUSY; + + rc = qed_int_sb_init(p_hwfn, p_ptt, sb_info, sb_virt_addr, + sb_phy_addr, rel_sb_id); + qed_ptt_release(p_hwfn, p_ptt); + } else { + rc = qed_int_sb_init(p_hwfn, NULL, sb_info, sb_virt_addr, + sb_phy_addr, rel_sb_id); + } return rc; } @@ -1083,12 +1146,18 @@ static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params) if (!cdev) return -ENODEV; - if (IS_VF(cdev)) - return 0; - /* The link should be set only once per PF */ hwfn = &cdev->hwfns[0]; + /* When VF wants to set link, force it to read the bulletin instead. + * This mimics the PF behavior, where a noitification [both immediate + * and possible later] would be generated when changing properties. + */ + if (IS_VF(cdev)) { + qed_schedule_iov(hwfn, QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG); + return 0; + } + ptt = qed_ptt_acquire(hwfn); if (!ptt) return -EBUSY; @@ -1553,6 +1622,8 @@ const struct qed_common_ops qed_common_ops_pass = { .sb_release = &qed_sb_release, .simd_handler_config = &qed_simd_handler_config, .simd_handler_clean = &qed_simd_handler_clean, + .dbg_grc = &qed_dbg_grc, + .dbg_grc_size = &qed_dbg_grc_size, .can_link_change = &qed_can_link_change, .set_link = &qed_set_link, .get_link = &qed_get_current_link, @@ -1586,6 +1657,9 @@ void qed_get_protocol_stats(struct qed_dev *cdev, stats->lan_stats.ucast_tx_pkts = eth_stats.tx_ucast_pkts; stats->lan_stats.fcs_err = -1; break; + case QED_MCP_FCOE_STATS: + qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats); + break; default: DP_ERR(cdev, "Invalid protocol type = %d\n", type); return; diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 6dd3ce443484..87fde205149f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #include <linux/types.h> @@ -168,6 +192,7 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) /* Initialize the MFW spinlock */ spin_lock_init(&p_info->lock); + spin_lock_init(&p_info->link_lock); return 0; @@ -586,6 +611,9 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn, u8 max_bw, min_bw; u32 status = 0; + /* Prevent SW/attentions from doing this at the same time */ + spin_lock_bh(&p_hwfn->mcp_info->link_lock); + p_link = &p_hwfn->mcp_info->link_output; memset(p_link, 0, sizeof(*p_link)); if (!b_reset) { @@ -600,7 +628,7 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn, } else { DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, "Resetting link indications\n"); - return; + goto out; } if (p_hwfn->b_drv_link_init) @@ -651,7 +679,8 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn, /* Min bandwidth configuration */ __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt, p_link, min_bw); - qed_configure_vp_wfq_on_link_change(p_hwfn->cdev, p_link->min_pf_rate); + qed_configure_vp_wfq_on_link_change(p_hwfn->cdev, p_ptt, + p_link->min_pf_rate); p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED); p_link->an_complete = !!(status & @@ -707,6 +736,8 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn, p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT); qed_link_update(p_hwfn); +out: + spin_unlock_bh(&p_hwfn->mcp_info->link_lock); } int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up) @@ -756,9 +787,13 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up) return rc; } - /* Reset the link status if needed */ - if (!b_up) - qed_mcp_handle_link_change(p_hwfn, p_ptt, true); + /* Mimic link-change attention, done for several reasons: + * - On reset, there's no guarantee MFW would trigger + * an attention. + * - On initialization, older MFWs might not indicate link change + * during LFA, so we'll never get an UP indication. + */ + qed_mcp_handle_link_change(p_hwfn, p_ptt, !b_up); return 0; } @@ -1098,12 +1133,17 @@ qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn, switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) { case FUNC_MF_CFG_PROTOCOL_ETHERNET: - if (qed_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto)) + if (!IS_ENABLED(CONFIG_QED_RDMA)) + *p_proto = QED_PCI_ETH; + else if (qed_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto)) qed_mcp_get_shmem_proto_legacy(p_hwfn, p_proto); break; case FUNC_MF_CFG_PROTOCOL_ISCSI: *p_proto = QED_PCI_ISCSI; break; + case FUNC_MF_CFG_PROTOCOL_FCOE: + *p_proto = QED_PCI_FCOE; + break; case FUNC_MF_CFG_PROTOCOL_ROCE: DP_NOTICE(p_hwfn, "RoCE personality is not a valid value!\n"); /* Fallthrough */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index 407a2c1830fb..368e88de146c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef _QED_MCP_H @@ -13,6 +37,7 @@ #include <linux/delay.h> #include <linux/slab.h> #include <linux/spinlock.h> +#include <linux/qed/qed_fcoe_if.h> #include "qed_hsi.h" struct qed_mcp_link_speed_params { @@ -460,7 +485,13 @@ int qed_mcp_bist_nvm_test_get_image_att(struct qed_hwfn *p_hwfn, #define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \ ((_p_hwfn)->cdev->num_ports_in_engines * 2)) struct qed_mcp_info { + /* Spinlock used for protecting the access to the MFW mailbox */ spinlock_t lock; + + /* Spinlock used for syncing SW link-changes and link-changes + * originating from attention context. + */ + spinlock_t link_lock; bool block_mb_sending; u32 public_base; u32 drv_mb_addr; diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.c b/drivers/net/ethernet/qlogic/qed/qed_ooo.c index 155abcb507fd..7d731c6cb892 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ooo.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.c @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #include <linux/types.h> diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.h b/drivers/net/ethernet/qlogic/qed/qed_ooo.h index 7a0670a9a074..4f138fb5f533 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ooo.h +++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef _QED_OOO_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_ptp.c b/drivers/net/ethernet/qlogic/qed/qed_ptp.c new file mode 100644 index 000000000000..d27aa85da23c --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_ptp.c @@ -0,0 +1,323 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include <linux/types.h> +#include "qed.h" +#include "qed_dev_api.h" +#include "qed_hw.h" +#include "qed_l2.h" +#include "qed_ptp.h" +#include "qed_reg_addr.h" + +/* 16 nano second time quantas to wait before making a Drift adjustment */ +#define QED_DRIFT_CNTR_TIME_QUANTA_SHIFT 0 +/* Nano seconds to add/subtract when making a Drift adjustment */ +#define QED_DRIFT_CNTR_ADJUSTMENT_SHIFT 28 +/* Add/subtract the Adjustment_Value when making a Drift adjustment */ +#define QED_DRIFT_CNTR_DIRECTION_SHIFT 31 +#define QED_TIMESTAMP_MASK BIT(16) + +/* Read Rx timestamp */ +static int qed_ptp_hw_read_rx_ts(struct qed_dev *cdev, u64 *timestamp) +{ + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt; + u32 val; + + *timestamp = 0; + val = qed_rd(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID); + if (!(val & QED_TIMESTAMP_MASK)) { + DP_INFO(p_hwfn, "Invalid Rx timestamp, buf_seqid = %d\n", val); + return -EINVAL; + } + + val = qed_rd(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_TS_LSB); + *timestamp = qed_rd(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_TS_MSB); + *timestamp <<= 32; + *timestamp |= val; + + /* Reset timestamp register to allow new timestamp */ + qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID, + QED_TIMESTAMP_MASK); + + return 0; +} + +/* Read Tx timestamp */ +static int qed_ptp_hw_read_tx_ts(struct qed_dev *cdev, u64 *timestamp) +{ + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt; + u32 val; + + *timestamp = 0; + val = qed_rd(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_SEQID); + if (!(val & QED_TIMESTAMP_MASK)) { + DP_INFO(p_hwfn, "Invalid Tx timestamp, buf_seqid = %d\n", val); + return -EINVAL; + } + + val = qed_rd(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_TS_LSB); + *timestamp = qed_rd(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_TS_MSB); + *timestamp <<= 32; + *timestamp |= val; + + /* Reset timestamp register to allow new timestamp */ + qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_SEQID, QED_TIMESTAMP_MASK); + + return 0; +} + +/* Read Phy Hardware Clock */ +static int qed_ptp_hw_read_cc(struct qed_dev *cdev, u64 *phc_cycles) +{ + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt; + u32 temp = 0; + + temp = qed_rd(p_hwfn, p_ptt, NIG_REG_TSGEN_SYNC_TIME_LSB); + *phc_cycles = qed_rd(p_hwfn, p_ptt, NIG_REG_TSGEN_SYNC_TIME_MSB); + *phc_cycles <<= 32; + *phc_cycles |= temp; + + return 0; +} + +/* Filter PTP protocol packets that need to be timestamped */ +static int qed_ptp_hw_cfg_rx_filters(struct qed_dev *cdev, + enum qed_ptp_filter_type type) +{ + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt; + u32 rule_mask, parm_mask; + + switch (type) { + case QED_PTP_FILTER_L2_IPV4_IPV6: + parm_mask = 0x6AA; + rule_mask = 0x3EEE; + break; + case QED_PTP_FILTER_L2: + parm_mask = 0x6BF; + rule_mask = 0x3EFF; + break; + case QED_PTP_FILTER_IPV4_IPV6: + parm_mask = 0x7EA; + rule_mask = 0x3FFE; + break; + case QED_PTP_FILTER_IPV4: + parm_mask = 0x7EE; + rule_mask = 0x3FFE; + break; + default: + DP_INFO(p_hwfn, "Invalid PTP filter type %d\n", type); + return -EINVAL; + } + + qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, parm_mask); + qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_RULE_MASK, rule_mask); + + qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_TO_HOST, 0x1); + + /* Reset possibly old timestamps */ + qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID, + QED_TIMESTAMP_MASK); + + return 0; +} + +/* Adjust the HW clock by a rate given in parts-per-billion (ppb) units. + * FW/HW accepts the adjustment value in terms of 3 parameters: + * Drift period - adjustment happens once in certain number of nano seconds. + * Drift value - time is adjusted by a certain value, for example by 5 ns. + * Drift direction - add or subtract the adjustment value. + * The routine translates ppb into the adjustment triplet in an optimal manner. + */ +static int qed_ptp_hw_adjfreq(struct qed_dev *cdev, s32 ppb) +{ + s64 best_val = 0, val, best_period = 0, period, approx_dev, dif, dif2; + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt; + u32 drift_ctr_cfg = 0, drift_state; + int drift_dir = 1; + + if (ppb < 0) { + ppb = -ppb; + drift_dir = 0; + } + + if (ppb > 1) { + s64 best_dif = ppb, best_approx_dev = 1; + + /* Adjustment value is up to +/-7ns, find an optimal value in + * this range. + */ + for (val = 7; val > 0; val--) { + period = div_s64(val * 1000000000, ppb); + period -= 8; + period >>= 4; + if (period < 1) + period = 1; + if (period > 0xFFFFFFE) + period = 0xFFFFFFE; + + /* Check both rounding ends for approximate error */ + approx_dev = period * 16 + 8; + dif = ppb * approx_dev - val * 1000000000; + dif2 = dif + 16 * ppb; + + if (dif < 0) + dif = -dif; + if (dif2 < 0) + dif2 = -dif2; + + /* Determine which end gives better approximation */ + if (dif * (approx_dev + 16) > dif2 * approx_dev) { + period++; + approx_dev += 16; + dif = dif2; + } + + /* Track best approximation found so far */ + if (best_dif * approx_dev > dif * best_approx_dev) { + best_dif = dif; + best_val = val; + best_period = period; + best_approx_dev = approx_dev; + } + } + } else if (ppb == 1) { + /* This is a special case as its the only value which wouldn't + * fit in a s64 variable. In order to prevent castings simple + * handle it seperately. + */ + best_val = 4; + best_period = 0xee6b27f; + } else { + best_val = 0; + best_period = 0xFFFFFFF; + } + + drift_ctr_cfg = (best_period << QED_DRIFT_CNTR_TIME_QUANTA_SHIFT) | + (((int)best_val) << QED_DRIFT_CNTR_ADJUSTMENT_SHIFT) | + (((int)drift_dir) << QED_DRIFT_CNTR_DIRECTION_SHIFT); + + qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR, 0x1); + + drift_state = qed_rd(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR); + if (drift_state & 1) { + qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_DRIFT_CNTR_CONF, + drift_ctr_cfg); + } else { + DP_INFO(p_hwfn, "Drift counter is not reset\n"); + return -EINVAL; + } + + qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR, 0x0); + + return 0; +} + +static int qed_ptp_hw_enable(struct qed_dev *cdev) +{ + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt; + + /* Reset PTP event detection rules - will be configured in the IOCTL */ + qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, 0x7FF); + qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_RULE_MASK, 0x3FFF); + qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0x7FF); + qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, 0x3FFF); + + qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, 7); + qed_wr(p_hwfn, p_ptt, NIG_REG_RX_PTP_EN, 7); + + qed_wr(p_hwfn, p_ptt, NIG_REG_TS_OUTPUT_ENABLE_PDA, 0x1); + + /* Pause free running counter */ + qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 2); + + qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREE_CNT_VALUE_LSB, 0); + qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREE_CNT_VALUE_MSB, 0); + /* Resume free running counter */ + qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 4); + + /* Disable drift register */ + qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_DRIFT_CNTR_CONF, 0x0); + qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR, 0x0); + + /* Reset possibly old timestamps */ + qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID, + QED_TIMESTAMP_MASK); + qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_SEQID, QED_TIMESTAMP_MASK); + + return 0; +} + +static int qed_ptp_hw_hwtstamp_tx_on(struct qed_dev *cdev) +{ + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt; + + qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0x6AA); + qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, 0x3EEE); + + return 0; +} + +static int qed_ptp_hw_disable(struct qed_dev *cdev) +{ + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt; + + /* Reset PTP event detection rules */ + qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, 0x7FF); + qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_RULE_MASK, 0x3FFF); + + qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0x7FF); + qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, 0x3FFF); + + /* Disable the PTP feature */ + qed_wr(p_hwfn, p_ptt, NIG_REG_RX_PTP_EN, 0x0); + qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, 0x0); + + return 0; +} + +const struct qed_eth_ptp_ops qed_ptp_ops_pass = { + .hwtstamp_tx_on = qed_ptp_hw_hwtstamp_tx_on, + .cfg_rx_filters = qed_ptp_hw_cfg_rx_filters, + .read_rx_ts = qed_ptp_hw_read_rx_ts, + .read_tx_ts = qed_ptp_hw_read_tx_ts, + .read_cc = qed_ptp_hw_read_cc, + .adjfreq = qed_ptp_hw_adjfreq, + .disable = qed_ptp_hw_disable, + .enable = qed_ptp_hw_enable, +}; diff --git a/drivers/net/ethernet/qlogic/qed/qed_ptp.h b/drivers/net/ethernet/qlogic/qed/qed_ptp.h new file mode 100644 index 000000000000..63c666d0b739 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_ptp.h @@ -0,0 +1,47 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef _QED_PTP_H +#define _QED_PTP_H +#include <linux/types.h> + +int qed_ptp_hwtstamp_tx_on(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); +int qed_ptp_cfg_rx_filters(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + enum qed_ptp_filter_type type); +int qed_ptp_read_rx_ts(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u64 *ts); +int qed_ptp_read_tx_ts(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u64 *ts); +int qed_ptp_read_cc(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u64 *cycles); +int qed_ptp_adjfreq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, s32 ppb); +int qed_ptp_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); +int qed_ptp_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); + +#endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h index 97544205a8c1..d59d9df60cd2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef REG_ADDR_H @@ -86,6 +110,8 @@ 0x1e80000UL #define NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF \ 0x5011f4UL +#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE \ + 0x1f0164UL #define PRS_REG_SEARCH_TCP \ 0x1f0400UL #define PRS_REG_SEARCH_UDP \ @@ -96,6 +122,12 @@ 0x1f040cUL #define PRS_REG_SEARCH_OPENFLOW \ 0x1f0434UL +#define PRS_REG_SEARCH_TAG1 \ + 0x1f0444UL +#define PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST \ + 0x1f0a0cUL +#define PRS_REG_SEARCH_TCP_FIRST_FRAG \ + 0x1f0410UL #define TM_REG_PF_ENABLE_CONN \ 0x2c043cUL #define TM_REG_PF_ENABLE_TASK \ @@ -1457,4 +1489,35 @@ #define DORQ_REG_PF_ICID_BIT_SHIFT_NORM 0x100448UL #define DORQ_REG_PF_MIN_ADDR_REG1 0x100400UL #define DORQ_REG_PF_DPI_BIT_SHIFT 0x100450UL +#define NIG_REG_RX_PTP_EN 0x501900UL +#define NIG_REG_TX_PTP_EN 0x501904UL +#define NIG_REG_LLH_PTP_TO_HOST 0x501908UL +#define NIG_REG_LLH_PTP_TO_MCP 0x50190cUL +#define NIG_REG_PTP_SW_TXTSEN 0x501910UL +#define NIG_REG_LLH_PTP_ETHERTYPE_1 0x501914UL +#define NIG_REG_LLH_PTP_MAC_DA_2_LSB 0x501918UL +#define NIG_REG_LLH_PTP_MAC_DA_2_MSB 0x50191cUL +#define NIG_REG_LLH_PTP_PARAM_MASK 0x501920UL +#define NIG_REG_LLH_PTP_RULE_MASK 0x501924UL +#define NIG_REG_TX_LLH_PTP_PARAM_MASK 0x501928UL +#define NIG_REG_TX_LLH_PTP_RULE_MASK 0x50192cUL +#define NIG_REG_LLH_PTP_HOST_BUF_SEQID 0x501930UL +#define NIG_REG_LLH_PTP_HOST_BUF_TS_LSB 0x501934UL +#define NIG_REG_LLH_PTP_HOST_BUF_TS_MSB 0x501938UL +#define NIG_REG_LLH_PTP_MCP_BUF_SEQID 0x50193cUL +#define NIG_REG_LLH_PTP_MCP_BUF_TS_LSB 0x501940UL +#define NIG_REG_LLH_PTP_MCP_BUF_TS_MSB 0x501944UL +#define NIG_REG_TX_LLH_PTP_BUF_SEQID 0x501948UL +#define NIG_REG_TX_LLH_PTP_BUF_TS_LSB 0x50194cUL +#define NIG_REG_TX_LLH_PTP_BUF_TS_MSB 0x501950UL +#define NIG_REG_RX_PTP_TS_MSB_ERR 0x501954UL +#define NIG_REG_TX_PTP_TS_MSB_ERR 0x501958UL +#define NIG_REG_TSGEN_SYNC_TIME_LSB 0x5088c0UL +#define NIG_REG_TSGEN_SYNC_TIME_MSB 0x5088c4UL +#define NIG_REG_TSGEN_RST_DRIFT_CNTR 0x5088d8UL +#define NIG_REG_TSGEN_DRIFT_CNTR_CONF 0x5088dcUL +#define NIG_REG_TS_OUTPUT_ENABLE_PDA 0x508870UL +#define NIG_REG_TIMESYNC_GEN_REG_BB 0x500d00UL +#define NIG_REG_TSGEN_FREE_CNT_VALUE_LSB 0x5088a8UL +#define NIG_REG_TSGEN_FREE_CNT_VALUE_MSB 0x5088acUL #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c index 2dbdb3298991..d9ff6b28591c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.c +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c @@ -1,5 +1,5 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015-2016 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -948,7 +948,9 @@ static int qed_rdma_create_cq(void *rdma_cxt, err: /* release allocated icid */ + spin_lock_bh(&p_info->lock); qed_bmap_release_id(p_hwfn, &p_info->cq_map, returned_id); + spin_unlock_bh(&p_info->lock); DP_NOTICE(p_hwfn, "Create CQ failed, rc = %d\n", rc); return rc; @@ -1766,13 +1768,13 @@ static int qed_roce_query_qp(struct qed_hwfn *p_hwfn, if (rc) goto err_resp; - dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res), - p_resp_ramrod_res, resp_ramrod_res_phys); - out_params->rq_psn = le32_to_cpu(p_resp_ramrod_res->psn); rq_err_state = GET_FIELD(le32_to_cpu(p_resp_ramrod_res->err_flag), ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG); + dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res), + p_resp_ramrod_res, resp_ramrod_res_phys); + if (!(qp->req_offloaded)) { /* Don't send query qp for the requester */ out_params->sq_psn = qp->sq_psn; @@ -1813,9 +1815,6 @@ static int qed_roce_query_qp(struct qed_hwfn *p_hwfn, if (rc) goto err_req; - dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res), - p_req_ramrod_res, req_ramrod_res_phys); - out_params->sq_psn = le32_to_cpu(p_req_ramrod_res->psn); sq_err_state = GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags), ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG); @@ -1823,6 +1822,9 @@ static int qed_roce_query_qp(struct qed_hwfn *p_hwfn, GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags), ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG); + dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res), + p_req_ramrod_res, req_ramrod_res_phys); + out_params->draining = false; if (rq_err_state) @@ -1847,6 +1849,7 @@ err_resp: static int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) { + struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; u32 num_invalidated_mw = 0; u32 num_bound_mw = 0; u32 start_cid; @@ -1861,35 +1864,39 @@ static int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) return -EINVAL; } - rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp, &num_invalidated_mw); - if (rc) - return rc; + if (qp->cur_state != QED_ROCE_QP_STATE_RESET) { + rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp, + &num_invalidated_mw); + if (rc) + return rc; - /* Send destroy requester ramrod */ - rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp, &num_bound_mw); - if (rc) - return rc; + /* Send destroy requester ramrod */ + rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp, + &num_bound_mw); + if (rc) + return rc; - if (num_invalidated_mw != num_bound_mw) { - DP_NOTICE(p_hwfn, - "number of invalidate memory windows is different from bounded ones\n"); - return -EINVAL; - } + if (num_invalidated_mw != num_bound_mw) { + DP_NOTICE(p_hwfn, + "number of invalidate memory windows is different from bounded ones\n"); + return -EINVAL; + } - spin_lock_bh(&p_hwfn->p_rdma_info->lock); + spin_lock_bh(&p_rdma_info->lock); - start_cid = qed_cxt_get_proto_cid_start(p_hwfn, - p_hwfn->p_rdma_info->proto); + start_cid = qed_cxt_get_proto_cid_start(p_hwfn, + p_rdma_info->proto); - /* Release responder's icid */ - qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, - qp->icid - start_cid); + /* Release responder's icid */ + qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, + qp->icid - start_cid); - /* Release requester's icid */ - qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, - qp->icid + 1 - start_cid); + /* Release requester's icid */ + qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, + qp->icid + 1 - start_cid); - spin_unlock_bh(&p_hwfn->p_rdma_info->lock); + spin_unlock_bh(&p_rdma_info->lock); + } return 0; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.h b/drivers/net/ethernet/qlogic/qed/qed_roce.h index 279f342af8db..36cf4b2ab7fa 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.h +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.h @@ -1,5 +1,5 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015-2016 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/net/ethernet/qlogic/qed/qed_selftest.c b/drivers/net/ethernet/qlogic/qed/qed_selftest.c index 48bfaecaf6dc..1bafc05db2b8 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_selftest.c +++ b/drivers/net/ethernet/qlogic/qed/qed_selftest.c @@ -1,3 +1,35 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015-2016 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + #include <linux/crc32.h> #include "qed.h" #include "qed_dev_api.h" diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index 9c897bc68d05..30393ffaa8e5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation - * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef _QED_SP_H @@ -85,6 +109,10 @@ union ramrod_data { struct rdma_srq_destroy_ramrod_data rdma_destroy_srq; struct rdma_srq_modify_ramrod_data rdma_modify_srq; struct roce_init_func_ramrod_data roce_init_func; + struct fcoe_init_ramrod_params fcoe_init; + struct fcoe_conn_offload_ramrod_params fcoe_conn_ofld; + struct fcoe_conn_terminate_ramrod_params fcoe_conn_terminate; + struct fcoe_stat_ramrod_params fcoe_stat; struct iscsi_slow_path_hdr iscsi_empty; struct iscsi_init_ramrod_params iscsi_init; diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c index a39ef2e7a9a6..6fb80f9ef446 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #include <linux/types.h> @@ -362,6 +386,9 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, case QED_PCI_ETH: p_ramrod->personality = PERSONALITY_ETH; break; + case QED_PCI_FCOE: + p_ramrod->personality = PERSONALITY_FCOE; + break; case QED_PCI_ISCSI: p_ramrod->personality = PERSONALITY_ISCSI; break; diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c index f022469bdcf8..645328a9f0cf 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #include <linux/types.h> diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 85b09dd1787a..253c2bbe1e4e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -1,13 +1,38 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #include <linux/etherdevice.h> #include <linux/crc32.h> +#include <linux/vmalloc.h> #include <linux/qed/qed_iov_if.h> #include "qed_cxt.h" #include "qed_hsi.h" @@ -806,10 +831,52 @@ static void qed_iov_free_vf_igu_sbs(struct qed_hwfn *p_hwfn, vf->num_sbs = 0; } +static void qed_iov_set_link(struct qed_hwfn *p_hwfn, + u16 vfid, + struct qed_mcp_link_params *params, + struct qed_mcp_link_state *link, + struct qed_mcp_link_capabilities *p_caps) +{ + struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn, + vfid, + false); + struct qed_bulletin_content *p_bulletin; + + if (!p_vf) + return; + + p_bulletin = p_vf->bulletin.p_virt; + p_bulletin->req_autoneg = params->speed.autoneg; + p_bulletin->req_adv_speed = params->speed.advertised_speeds; + p_bulletin->req_forced_speed = params->speed.forced_speed; + p_bulletin->req_autoneg_pause = params->pause.autoneg; + p_bulletin->req_forced_rx = params->pause.forced_rx; + p_bulletin->req_forced_tx = params->pause.forced_tx; + p_bulletin->req_loopback = params->loopback_mode; + + p_bulletin->link_up = link->link_up; + p_bulletin->speed = link->speed; + p_bulletin->full_duplex = link->full_duplex; + p_bulletin->autoneg = link->an; + p_bulletin->autoneg_complete = link->an_complete; + p_bulletin->parallel_detection = link->parallel_detection; + p_bulletin->pfc_enabled = link->pfc_enabled; + p_bulletin->partner_adv_speed = link->partner_adv_speed; + p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en; + p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en; + p_bulletin->partner_adv_pause = link->partner_adv_pause; + p_bulletin->sfp_tx_fault = link->sfp_tx_fault; + + p_bulletin->capability_speed = p_caps->speed_capabilities; +} + static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_iov_vf_init_params *p_params) { + struct qed_mcp_link_capabilities link_caps; + struct qed_mcp_link_params link_params; + struct qed_mcp_link_state link_state; u8 num_of_vf_avaiable_chains = 0; struct qed_vf_info *vf = NULL; u16 qid, num_irqs; @@ -898,6 +965,15 @@ static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn, p_queue->fw_tx_qid, p_queue->fw_cid); } + /* Update the link configuration in bulletin */ + memcpy(&link_params, qed_mcp_get_link_params(p_hwfn), + sizeof(link_params)); + memcpy(&link_state, qed_mcp_get_link_state(p_hwfn), sizeof(link_state)); + memcpy(&link_caps, qed_mcp_get_link_capabilities(p_hwfn), + sizeof(link_caps)); + qed_iov_set_link(p_hwfn, p_params->rel_vf_id, + &link_params, &link_state, &link_caps); + rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, vf); if (!rc) { vf->b_init = true; @@ -909,45 +985,6 @@ static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn, return rc; } -static void qed_iov_set_link(struct qed_hwfn *p_hwfn, - u16 vfid, - struct qed_mcp_link_params *params, - struct qed_mcp_link_state *link, - struct qed_mcp_link_capabilities *p_caps) -{ - struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn, - vfid, - false); - struct qed_bulletin_content *p_bulletin; - - if (!p_vf) - return; - - p_bulletin = p_vf->bulletin.p_virt; - p_bulletin->req_autoneg = params->speed.autoneg; - p_bulletin->req_adv_speed = params->speed.advertised_speeds; - p_bulletin->req_forced_speed = params->speed.forced_speed; - p_bulletin->req_autoneg_pause = params->pause.autoneg; - p_bulletin->req_forced_rx = params->pause.forced_rx; - p_bulletin->req_forced_tx = params->pause.forced_tx; - p_bulletin->req_loopback = params->loopback_mode; - - p_bulletin->link_up = link->link_up; - p_bulletin->speed = link->speed; - p_bulletin->full_duplex = link->full_duplex; - p_bulletin->autoneg = link->an; - p_bulletin->autoneg_complete = link->an_complete; - p_bulletin->parallel_detection = link->parallel_detection; - p_bulletin->pfc_enabled = link->pfc_enabled; - p_bulletin->partner_adv_speed = link->partner_adv_speed; - p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en; - p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en; - p_bulletin->partner_adv_pause = link->partner_adv_pause; - p_bulletin->sfp_tx_fault = link->sfp_tx_fault; - - p_bulletin->capability_speed = p_caps->speed_capabilities; -} - static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 rel_vf_id) { @@ -1199,7 +1236,10 @@ static void qed_iov_clean_vf(struct qed_hwfn *p_hwfn, u8 vfid) return; /* Clear the VF mac */ - memset(vf_info->mac, 0, ETH_ALEN); + eth_zero_addr(vf_info->mac); + + vf_info->rx_accept_mode = 0; + vf_info->tx_accept_mode = 0; } static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn, @@ -2294,12 +2334,14 @@ qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn, struct qed_vf_info *vf, struct qed_sp_vport_update_params *p_data, struct qed_rss_params *p_rss, - struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) + struct qed_iov_vf_mbx *p_mbx, + u16 *tlvs_mask, u16 *tlvs_accepted) { struct vfpf_vport_update_rss_tlv *p_rss_tlv; u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS; - u16 i, q_idx, max_q_idx; + bool b_reject = false; u16 table_size; + u16 i, q_idx; p_rss_tlv = (struct vfpf_vport_update_rss_tlv *) qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); @@ -2323,34 +2365,39 @@ qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn, p_rss->rss_eng_id = vf->relative_vf_id + 1; p_rss->rss_caps = p_rss_tlv->rss_caps; p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log; - memcpy(p_rss->rss_ind_table, p_rss_tlv->rss_ind_table, - sizeof(p_rss->rss_ind_table)); memcpy(p_rss->rss_key, p_rss_tlv->rss_key, sizeof(p_rss->rss_key)); table_size = min_t(u16, ARRAY_SIZE(p_rss->rss_ind_table), (1 << p_rss_tlv->rss_table_size_log)); - max_q_idx = ARRAY_SIZE(vf->vf_queues); - for (i = 0; i < table_size; i++) { - u16 index = vf->vf_queues[0].fw_rx_qid; + q_idx = p_rss_tlv->rss_ind_table[i]; + if (!qed_iov_validate_rxq(p_hwfn, vf, q_idx)) { + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF[%d]: Omitting RSS due to wrong queue %04x\n", + vf->relative_vf_id, q_idx); + b_reject = true; + goto out; + } - q_idx = p_rss->rss_ind_table[i]; - if (q_idx >= max_q_idx) - DP_NOTICE(p_hwfn, - "rss_ind_table[%d] = %d, rxq is out of range\n", - i, q_idx); - else if (!vf->vf_queues[q_idx].p_rx_cid) - DP_NOTICE(p_hwfn, - "rss_ind_table[%d] = %d, rxq is not active\n", - i, q_idx); - else - index = vf->vf_queues[q_idx].fw_rx_qid; - p_rss->rss_ind_table[i] = index; + if (!vf->vf_queues[q_idx].p_rx_cid) { + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF[%d]: Omitting RSS due to inactive queue %08x\n", + vf->relative_vf_id, q_idx); + b_reject = true; + goto out; + } + + p_rss->rss_ind_table[i] = vf->vf_queues[q_idx].p_rx_cid; } p_data->rss_params = p_rss; +out: *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_RSS; + if (!b_reject) + *tlvs_accepted |= 1 << QED_IOV_VP_UPDATE_RSS; } static void @@ -2401,16 +2448,49 @@ qed_iov_vp_update_sge_tpa_param(struct qed_hwfn *p_hwfn, *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_SGE_TPA; } +static int qed_iov_pre_update_vport(struct qed_hwfn *hwfn, + u8 vfid, + struct qed_sp_vport_update_params *params, + u16 *tlvs) +{ + u8 mask = QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED; + struct qed_filter_accept_flags *flags = ¶ms->accept_flags; + struct qed_public_vf_info *vf_info; + + /* Untrusted VFs can't even be trusted to know that fact. + * Simply indicate everything is configured fine, and trace + * configuration 'behind their back'. + */ + if (!(*tlvs & BIT(QED_IOV_VP_UPDATE_ACCEPT_PARAM))) + return 0; + + vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true); + + if (flags->update_rx_mode_config) { + vf_info->rx_accept_mode = flags->rx_accept_filter; + if (!vf_info->is_trusted_configured) + flags->rx_accept_filter &= ~mask; + } + + if (flags->update_tx_mode_config) { + vf_info->tx_accept_mode = flags->tx_accept_filter; + if (!vf_info->is_trusted_configured) + flags->tx_accept_filter &= ~mask; + } + + return 0; +} + static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_vf_info *vf) { + struct qed_rss_params *p_rss_params = NULL; struct qed_sp_vport_update_params params; struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; struct qed_sge_tpa_params sge_tpa_params; - struct qed_rss_params rss_params; + u16 tlvs_mask = 0, tlvs_accepted = 0; u8 status = PFVF_STATUS_SUCCESS; - u16 tlvs_mask = 0; u16 length; int rc; @@ -2423,6 +2503,11 @@ static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn, status = PFVF_STATUS_FAILURE; goto out; } + p_rss_params = vzalloc(sizeof(*p_rss_params)); + if (p_rss_params == NULL) { + status = PFVF_STATUS_FAILURE; + goto out; + } memset(¶ms, 0, sizeof(params)); params.opaque_fid = vf->opaque_fid; @@ -2437,20 +2522,33 @@ static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn, qed_iov_vp_update_tx_switch(p_hwfn, ¶ms, mbx, &tlvs_mask); qed_iov_vp_update_mcast_bin_param(p_hwfn, ¶ms, mbx, &tlvs_mask); qed_iov_vp_update_accept_flag(p_hwfn, ¶ms, mbx, &tlvs_mask); - qed_iov_vp_update_rss_param(p_hwfn, vf, ¶ms, &rss_params, - mbx, &tlvs_mask); qed_iov_vp_update_accept_any_vlan(p_hwfn, ¶ms, mbx, &tlvs_mask); qed_iov_vp_update_sge_tpa_param(p_hwfn, vf, ¶ms, &sge_tpa_params, mbx, &tlvs_mask); - /* Just log a message if there is no single extended tlv in buffer. - * When all features of vport update ramrod would be requested by VF - * as extended TLVs in buffer then an error can be returned in response - * if there is no extended TLV present in buffer. + tlvs_accepted = tlvs_mask; + + /* Some of the extended TLVs need to be validated first; In that case, + * they can update the mask without updating the accepted [so that + * PF could communicate to VF it has rejected request]. */ - if (!tlvs_mask) { - DP_NOTICE(p_hwfn, - "No feature tlvs found for vport update\n"); + qed_iov_vp_update_rss_param(p_hwfn, vf, ¶ms, p_rss_params, + mbx, &tlvs_mask, &tlvs_accepted); + + if (qed_iov_pre_update_vport(p_hwfn, vf->relative_vf_id, + ¶ms, &tlvs_accepted)) { + tlvs_accepted = 0; + status = PFVF_STATUS_NOT_SUPPORTED; + goto out; + } + + if (!tlvs_accepted) { + if (tlvs_mask) + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "Upper-layer prevents VF vport configuration\n"); + else + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "No feature tlvs found for vport update\n"); status = PFVF_STATUS_NOT_SUPPORTED; goto out; } @@ -2461,8 +2559,9 @@ static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn, status = PFVF_STATUS_FAILURE; out: + vfree(p_rss_params); length = qed_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status, - tlvs_mask, tlvs_mask); + tlvs_mask, tlvs_accepted); qed_iov_send_response(p_hwfn, p_ptt, vf, length, status); } @@ -2539,8 +2638,7 @@ static int qed_iov_vf_update_mac_shadow(struct qed_hwfn *p_hwfn, for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) { if (ether_addr_equal(p_vf->shadow_config.macs[i], p_params->mac)) { - memset(p_vf->shadow_config.macs[i], 0, - ETH_ALEN); + eth_zero_addr(p_vf->shadow_config.macs[i]); break; } } @@ -2553,7 +2651,7 @@ static int qed_iov_vf_update_mac_shadow(struct qed_hwfn *p_hwfn, } else if (p_params->opcode == QED_FILTER_REPLACE || p_params->opcode == QED_FILTER_FLUSH) { for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) - memset(p_vf->shadow_config.macs[i], 0, ETH_ALEN); + eth_zero_addr(p_vf->shadow_config.macs[i]); } /* List the new MAC address */ @@ -2916,8 +3014,7 @@ cleanup: ack_vfs[vfid / 32] |= BIT((vfid % 32)); p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &= ~(1ULL << (rel_vf_id % 64)); - p_hwfn->pf_iov_info->pending_events[rel_vf_id / 64] &= - ~(1ULL << (rel_vf_id % 64)); + p_vf->vf_mbx.b_pending_msg = false; } return rc; @@ -3030,11 +3127,20 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn, mbx = &p_vf->vf_mbx; /* qed_iov_process_mbx_request */ - DP_VERBOSE(p_hwfn, QED_MSG_IOV, - "VF[%02x]: Processing mailbox message\n", p_vf->abs_vf_id); + if (!mbx->b_pending_msg) { + DP_NOTICE(p_hwfn, + "VF[%02x]: Trying to process mailbox message when none is pending\n", + p_vf->abs_vf_id); + return; + } + mbx->b_pending_msg = false; mbx->first_tlv = mbx->req_virt->first_tlv; + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF[%02x]: Processing mailbox message [type %04x]\n", + p_vf->abs_vf_id, mbx->first_tlv.tl.type); + /* check if tlv type is known */ if (qed_iov_tlv_supported(mbx->first_tlv.tl.type) && !p_vf->b_malicious) { @@ -3121,20 +3227,19 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn, } } -static void qed_iov_pf_add_pending_events(struct qed_hwfn *p_hwfn, u8 vfid) +void qed_iov_pf_get_pending_events(struct qed_hwfn *p_hwfn, u64 *events) { - u64 add_bit = 1ULL << (vfid % 64); + int i; - p_hwfn->pf_iov_info->pending_events[vfid / 64] |= add_bit; -} + memset(events, 0, sizeof(u64) * QED_VF_ARRAY_LENGTH); -static void qed_iov_pf_get_and_clear_pending_events(struct qed_hwfn *p_hwfn, - u64 *events) -{ - u64 *p_pending_events = p_hwfn->pf_iov_info->pending_events; + qed_for_each_vf(p_hwfn, i) { + struct qed_vf_info *p_vf; - memcpy(events, p_pending_events, sizeof(u64) * QED_VF_ARRAY_LENGTH); - memset(p_pending_events, 0, sizeof(u64) * QED_VF_ARRAY_LENGTH); + p_vf = &p_hwfn->pf_iov_info->vfs_array[i]; + if (p_vf->vf_mbx.b_pending_msg) + events[i / 64] |= 1ULL << (i % 64); + } } static struct qed_vf_info *qed_sriov_get_vf_from_absid(struct qed_hwfn *p_hwfn, @@ -3168,7 +3273,7 @@ static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn, p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo; /* Mark the event and schedule the workqueue */ - qed_iov_pf_add_pending_events(p_hwfn, p_vf->relative_vf_id); + p_vf->vf_mbx.b_pending_msg = true; qed_schedule_iov(p_hwfn, QED_IOV_WQ_MSG_FLAG); return 0; @@ -3892,6 +3997,32 @@ static int qed_set_vf_rate(struct qed_dev *cdev, return 0; } +static int qed_set_vf_trust(struct qed_dev *cdev, int vfid, bool trust) +{ + int i; + + for_each_hwfn(cdev, i) { + struct qed_hwfn *hwfn = &cdev->hwfns[i]; + struct qed_public_vf_info *vf; + + if (!qed_iov_pf_sanity_check(hwfn, vfid)) { + DP_NOTICE(hwfn, + "SR-IOV sanity check failed, can't set trust\n"); + return -EINVAL; + } + + vf = qed_iov_get_public_vf_info(hwfn, vfid, true); + + if (vf->is_trusted_request == trust) + return 0; + vf->is_trusted_request = trust; + + qed_schedule_iov(hwfn, QED_IOV_WQ_TRUST_FLAG); + } + + return 0; +} + static void qed_handle_vf_msg(struct qed_hwfn *hwfn) { u64 events[QED_VF_ARRAY_LENGTH]; @@ -3906,7 +4037,7 @@ static void qed_handle_vf_msg(struct qed_hwfn *hwfn) return; } - qed_iov_pf_get_and_clear_pending_events(hwfn, events); + qed_iov_pf_get_pending_events(hwfn, events); DP_VERBOSE(hwfn, QED_MSG_IOV, "Event mask of VF events: 0x%llx 0x%llx 0x%llx\n", @@ -3996,6 +4127,61 @@ static void qed_handle_bulletin_post(struct qed_hwfn *hwfn) qed_ptt_release(hwfn, ptt); } +static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn) +{ + struct qed_sp_vport_update_params params; + struct qed_filter_accept_flags *flags; + struct qed_public_vf_info *vf_info; + struct qed_vf_info *vf; + u8 mask; + int i; + + mask = QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED; + flags = ¶ms.accept_flags; + + qed_for_each_vf(hwfn, i) { + /* Need to make sure current requested configuration didn't + * flip so that we'll end up configuring something that's not + * needed. + */ + vf_info = qed_iov_get_public_vf_info(hwfn, i, true); + if (vf_info->is_trusted_configured == + vf_info->is_trusted_request) + continue; + vf_info->is_trusted_configured = vf_info->is_trusted_request; + + /* Validate that the VF has a configured vport */ + vf = qed_iov_get_vf_info(hwfn, i, true); + if (!vf->vport_instance) + continue; + + memset(¶ms, 0, sizeof(params)); + params.opaque_fid = vf->opaque_fid; + params.vport_id = vf->vport_id; + + if (vf_info->rx_accept_mode & mask) { + flags->update_rx_mode_config = 1; + flags->rx_accept_filter = vf_info->rx_accept_mode; + } + + if (vf_info->tx_accept_mode & mask) { + flags->update_tx_mode_config = 1; + flags->tx_accept_filter = vf_info->tx_accept_mode; + } + + /* Remove if needed; Otherwise this would set the mask */ + if (!vf_info->is_trusted_configured) { + flags->rx_accept_filter &= ~mask; + flags->tx_accept_filter &= ~mask; + } + + if (flags->update_rx_mode_config || + flags->update_tx_mode_config) + qed_sp_vport_update(hwfn, ¶ms, + QED_SPQ_MODE_EBLOCK, NULL); + } +} + static void qed_iov_pf_task(struct work_struct *work) { @@ -4031,6 +4217,9 @@ static void qed_iov_pf_task(struct work_struct *work) if (test_and_clear_bit(QED_IOV_WQ_BULLETIN_UPDATE_FLAG, &hwfn->iov_task_flags)) qed_handle_bulletin_post(hwfn); + + if (test_and_clear_bit(QED_IOV_WQ_TRUST_FLAG, &hwfn->iov_task_flags)) + qed_iov_handle_trust_change(hwfn); } void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first) @@ -4093,4 +4282,5 @@ const struct qed_iov_hv_ops qed_iov_ops_pass = { .set_link_state = &qed_set_vf_link_state, .set_spoof = &qed_spoof_configure, .set_rate = &qed_set_vf_rate, + .set_trust = &qed_set_vf_trust, }; diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h index 509c02b4772e..a89605821522 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef _QED_SRIOV_H @@ -56,6 +80,14 @@ struct qed_public_vf_info { /* Currently configured Tx rate in MB/sec. 0 if unconfigured */ int tx_rate; + + /* Trusted VFs can configure promiscuous mode. + * Also store shadow promisc configuration if needed. + */ + bool is_trusted_configured; + bool is_trusted_request; + u8 rx_accept_mode; + u8 tx_accept_mode; }; struct qed_iov_vf_init_params { @@ -108,6 +140,9 @@ struct qed_iov_vf_mbx { /* Address in VF where a pending message is located */ dma_addr_t pending_req; + /* Message from VF awaits handling */ + bool b_pending_msg; + u8 *offset; /* saved VF request header */ @@ -200,7 +235,6 @@ struct qed_vf_info { */ struct qed_pf_iov { struct qed_vf_info vfs_array[MAX_NUM_VFS]; - u64 pending_events[QED_VF_ARRAY_LENGTH]; u64 pending_flr[QED_VF_ARRAY_LENGTH]; /* Allocate message address continuosuly and split to each VF */ @@ -221,6 +255,8 @@ enum qed_iov_wq_flag { QED_IOV_WQ_BULLETIN_UPDATE_FLAG, QED_IOV_WQ_STOP_WQ_FLAG, QED_IOV_WQ_FLR_FLAG, + QED_IOV_WQ_TRUST_FLAG, + QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG, }; #ifdef CONFIG_QED_SRIOV diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index 60b31a8ede73..15d2855ec563 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #include <linux/crc32.h> @@ -814,6 +838,7 @@ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn, if (p_params->rss_params) { struct qed_rss_params *rss_params = p_params->rss_params; struct vfpf_vport_update_rss_tlv *p_rss_tlv; + int i, table_size; size = sizeof(struct vfpf_vport_update_rss_tlv); p_rss_tlv = qed_add_tlv(p_hwfn, @@ -836,8 +861,15 @@ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn, p_rss_tlv->rss_enable = rss_params->rss_enable; p_rss_tlv->rss_caps = rss_params->rss_caps; p_rss_tlv->rss_table_size_log = rss_params->rss_table_size_log; - memcpy(p_rss_tlv->rss_ind_table, rss_params->rss_ind_table, - sizeof(rss_params->rss_ind_table)); + + table_size = min_t(int, T_ETH_INDIRECTION_TABLE_SIZE, + 1 << p_rss_tlv->rss_table_size_log); + for (i = 0; i < table_size; i++) { + struct qed_queue_cid *p_queue; + + p_queue = rss_params->rss_ind_table[i]; + p_rss_tlv->rss_ind_table[i] = p_queue->rel.queue_id; + } memcpy(p_rss_tlv->rss_key, rss_params->rss_key, sizeof(rss_params->rss_key)); } @@ -1253,6 +1285,9 @@ void qed_iov_vf_task(struct work_struct *work) /* Handle bulletin board changes */ qed_vf_read_bulletin(hwfn, &change); + if (test_and_clear_bit(QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG, + &hwfn->iov_task_flags)) + change = 1; if (change) qed_handle_bulletin_change(hwfn); diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h index 11eb3854e6f2..7da0b165d8bc 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.h +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef _QED_VF_H diff --git a/drivers/net/ethernet/qlogic/qede/Makefile b/drivers/net/ethernet/qlogic/qede/Makefile index 048a230c3ce0..bc5f7c3b277d 100644 --- a/drivers/net/ethernet/qlogic/qede/Makefile +++ b/drivers/net/ethernet/qlogic/qede/Makefile @@ -1,5 +1,5 @@ obj-$(CONFIG_QEDE) := qede.o -qede-y := qede_main.o qede_ethtool.o +qede-y := qede_main.o qede_fp.o qede_filter.o qede_ethtool.o qede_ptp.o qede-$(CONFIG_DCB) += qede_dcbnl.o qede-$(CONFIG_QED_RDMA) += qede_roce.o diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index c79dc78746fc..f2aaef2cfb86 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -1,11 +1,34 @@ /* QLogic qede NIC Driver -* Copyright (c) 2015 QLogic Corporation -* -* This software is available under the terms of the GNU General Public License -* (GPL) Version 2, available from the file COPYING in the main directory of -* this source tree. -*/ - + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ #ifndef _QEDE_H_ #define _QEDE_H_ #include <linux/compiler.h> @@ -26,7 +49,7 @@ #define QEDE_MAJOR_VERSION 8 #define QEDE_MINOR_VERSION 10 -#define QEDE_REVISION_VERSION 9 +#define QEDE_REVISION_VERSION 10 #define QEDE_ENGINEERING_VERSION 20 #define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \ __stringify(QEDE_MINOR_VERSION) "." \ @@ -114,6 +137,8 @@ struct qede_rdma_dev { struct workqueue_struct *roce_wq; }; +struct qede_ptp; + struct qede_dev { struct qed_dev *cdev; struct net_device *ndev; @@ -125,8 +150,10 @@ struct qede_dev { u32 flags; #define QEDE_FLAG_IS_VF BIT(0) #define IS_VF(edev) (!!((edev)->flags & QEDE_FLAG_IS_VF)) +#define QEDE_TX_TIMESTAMPING_EN BIT(1) const struct qed_eth_ops *ops; + struct qede_ptp *ptp; struct qed_dev_eth_info dev_info; #define QEDE_MAX_RSS_CNT(edev) ((edev)->dev_info.num_queues) @@ -141,6 +168,7 @@ struct qede_dev { u16 num_queues; #define QEDE_QUEUE_CNT(edev) ((edev)->num_queues) #define QEDE_RSS_COUNT(edev) ((edev)->num_queues - (edev)->fp_num_tx) +#define QEDE_RX_QUEUE_IDX(edev, i) (i) #define QEDE_TSS_COUNT(edev) ((edev)->num_queues - (edev)->fp_num_rx) struct qed_int_info int_info; @@ -171,7 +199,10 @@ struct qede_dev { #define QEDE_RSS_KEY_INITED BIT(1) #define QEDE_RSS_CAPS_INITED BIT(2) u32 rss_params_inited; /* bit-field to track initialized rss params */ - struct qed_update_vport_rss_params rss_params; + u16 rss_ind_table[128]; + u32 rss_key[10]; + u8 rss_caps; + u16 q_num_rx_buffers; /* Must be a power of two */ u16 q_num_tx_buffers; /* Must be a power of two */ @@ -257,7 +288,7 @@ struct qede_rx_queue { u16 sw_rx_cons; u16 sw_rx_prod; - u16 num_rx_buffers; /* Slowpath */ + u16 filled_buffers; u8 data_direction; u8 rxq_id; @@ -270,6 +301,9 @@ struct qede_rx_queue { struct qed_chain rx_bd_ring; struct qed_chain rx_comp_ring ____cacheline_aligned; + /* Used once per each NAPI run */ + u16 num_rx_buffers; + /* GRO */ struct qede_agg_info tpa_info[ETH_TPA_MAX_AGGS_NUM]; @@ -385,9 +419,42 @@ struct qede_reload_args { } u; }; +/* Datapath functions definition */ +netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev); +netdev_features_t qede_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features); +void qede_tx_log_print(struct qede_dev *edev, struct qede_fastpath *fp); +int qede_alloc_rx_buffer(struct qede_rx_queue *rxq, bool allow_lazy); +int qede_free_tx_pkt(struct qede_dev *edev, + struct qede_tx_queue *txq, int *len); +int qede_poll(struct napi_struct *napi, int budget); +irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie); + +/* Filtering function definitions */ +void qede_force_mac(void *dev, u8 *mac, bool forced); +int qede_set_mac_addr(struct net_device *ndev, void *p); + +int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid); +int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid); +void qede_vlan_mark_nonconfigured(struct qede_dev *edev); +int qede_configure_vlan_filters(struct qede_dev *edev); + +int qede_set_features(struct net_device *dev, netdev_features_t features); +void qede_set_rx_mode(struct net_device *ndev); +void qede_config_rx_mode(struct net_device *ndev); +void qede_fill_rss_params(struct qede_dev *edev, + struct qed_update_vport_rss_params *rss, u8 *update); + +void qede_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti); +void qede_udp_tunnel_del(struct net_device *dev, struct udp_tunnel_info *ti); + +int qede_xdp(struct net_device *dev, struct netdev_xdp *xdp); + #ifdef CONFIG_DCB void qede_set_dcbnl_ops(struct net_device *ndev); #endif + void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level); void qede_set_ethtool_ops(struct net_device *netdev); void qede_reload(struct qede_dev *edev, diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index 1c48f445c93b..897953133245 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -1,11 +1,34 @@ /* QLogic qede NIC Driver -* Copyright (c) 2015 QLogic Corporation -* -* This software is available under the terms of the GNU General Public License -* (GPL) Version 2, available from the file COPYING in the main directory of -* this source tree. -*/ - + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ #include <linux/version.h> #include <linux/types.h> #include <linux/netdevice.h> @@ -14,7 +37,9 @@ #include <linux/string.h> #include <linux/pci.h> #include <linux/capability.h> +#include <linux/vmalloc.h> #include "qede.h" +#include "qede_ptp.h" #define QEDE_RQSTAT_OFFSET(stat_name) \ (offsetof(struct qede_rx_queue, stat_name)) @@ -908,8 +933,7 @@ static int qede_set_channels(struct net_device *dev, /* Reset the indirection table if rx queue count is updated */ if ((edev->req_queues - edev->req_num_tx) != QEDE_RSS_COUNT(edev)) { edev->rss_params_inited &= ~QEDE_RSS_INDIR_INITED; - memset(&edev->rss_params.rss_ind_table, 0, - sizeof(edev->rss_params.rss_ind_table)); + memset(edev->rss_ind_table, 0, sizeof(edev->rss_ind_table)); } qede_reload(edev, NULL, false); @@ -917,6 +941,14 @@ static int qede_set_channels(struct net_device *dev, return 0; } +static int qede_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + struct qede_dev *edev = netdev_priv(dev); + + return qede_ptp_get_ts_info(edev, info); +} + static int qede_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state) { @@ -955,11 +987,11 @@ static int qede_get_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info) info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; break; case UDP_V4_FLOW: - if (edev->rss_params.rss_caps & QED_RSS_IPV4_UDP) + if (edev->rss_caps & QED_RSS_IPV4_UDP) info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; break; case UDP_V6_FLOW: - if (edev->rss_params.rss_caps & QED_RSS_IPV6_UDP) + if (edev->rss_caps & QED_RSS_IPV6_UDP) info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; break; case IPV4_FLOW: @@ -992,8 +1024,9 @@ static int qede_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, static int qede_set_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info) { - struct qed_update_vport_params vport_update_params; + struct qed_update_vport_params *vport_update_params; u8 set_caps = 0, clr_caps = 0; + int rc = 0; DP_VERBOSE(edev, QED_MSG_DEBUG, "Set rss flags command parameters: flow type = %d, data = %llu\n", @@ -1068,27 +1101,29 @@ static int qede_set_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info) } /* No action is needed if there is no change in the rss capability */ - if (edev->rss_params.rss_caps == ((edev->rss_params.rss_caps & - ~clr_caps) | set_caps)) + if (edev->rss_caps == ((edev->rss_caps & ~clr_caps) | set_caps)) return 0; /* Update internal configuration */ - edev->rss_params.rss_caps = (edev->rss_params.rss_caps & ~clr_caps) | - set_caps; + edev->rss_caps = ((edev->rss_caps & ~clr_caps) | set_caps); edev->rss_params_inited |= QEDE_RSS_CAPS_INITED; /* Re-configure if possible */ - if (netif_running(edev->ndev)) { - memset(&vport_update_params, 0, sizeof(vport_update_params)); - vport_update_params.update_rss_flg = 1; - vport_update_params.vport_id = 0; - memcpy(&vport_update_params.rss_params, &edev->rss_params, - sizeof(vport_update_params.rss_params)); - return edev->ops->vport_update(edev->cdev, - &vport_update_params); + __qede_lock(edev); + if (edev->state == QEDE_STATE_OPEN) { + vport_update_params = vzalloc(sizeof(*vport_update_params)); + if (!vport_update_params) { + __qede_unlock(edev); + return -ENOMEM; + } + qede_fill_rss_params(edev, &vport_update_params->rss_params, + &vport_update_params->update_rss_flg); + rc = edev->ops->vport_update(edev->cdev, vport_update_params); + vfree(vport_update_params); } + __qede_unlock(edev); - return 0; + return rc; } static int qede_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info) @@ -1113,7 +1148,7 @@ static u32 qede_get_rxfh_key_size(struct net_device *dev) { struct qede_dev *edev = netdev_priv(dev); - return sizeof(edev->rss_params.rss_key); + return sizeof(edev->rss_key); } static int qede_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc) @@ -1128,11 +1163,10 @@ static int qede_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc) return 0; for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) - indir[i] = edev->rss_params.rss_ind_table[i]; + indir[i] = edev->rss_ind_table[i]; if (key) - memcpy(key, edev->rss_params.rss_key, - qede_get_rxfh_key_size(dev)); + memcpy(key, edev->rss_key, qede_get_rxfh_key_size(dev)); return 0; } @@ -1140,9 +1174,9 @@ static int qede_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc) static int qede_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key, const u8 hfunc) { - struct qed_update_vport_params vport_update_params; + struct qed_update_vport_params *vport_update_params; struct qede_dev *edev = netdev_priv(dev); - int i; + int i, rc = 0; if (edev->dev_info.common.num_hwfns > 1) { DP_INFO(edev, @@ -1158,27 +1192,30 @@ static int qede_set_rxfh(struct net_device *dev, const u32 *indir, if (indir) { for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) - edev->rss_params.rss_ind_table[i] = indir[i]; + edev->rss_ind_table[i] = indir[i]; edev->rss_params_inited |= QEDE_RSS_INDIR_INITED; } if (key) { - memcpy(&edev->rss_params.rss_key, key, - qede_get_rxfh_key_size(dev)); + memcpy(&edev->rss_key, key, qede_get_rxfh_key_size(dev)); edev->rss_params_inited |= QEDE_RSS_KEY_INITED; } - if (netif_running(edev->ndev)) { - memset(&vport_update_params, 0, sizeof(vport_update_params)); - vport_update_params.update_rss_flg = 1; - vport_update_params.vport_id = 0; - memcpy(&vport_update_params.rss_params, &edev->rss_params, - sizeof(vport_update_params.rss_params)); - return edev->ops->vport_update(edev->cdev, - &vport_update_params); + __qede_lock(edev); + if (edev->state == QEDE_STATE_OPEN) { + vport_update_params = vzalloc(sizeof(*vport_update_params)); + if (!vport_update_params) { + __qede_unlock(edev); + return -ENOMEM; + } + qede_fill_rss_params(edev, &vport_update_params->rss_params, + &vport_update_params->update_rss_flg); + rc = edev->ops->vport_update(edev->cdev, vport_update_params); + vfree(vport_update_params); } + __qede_unlock(edev); - return 0; + return rc; } /* This function enables the interrupt generation and the NAPI on the device */ @@ -1296,7 +1333,7 @@ static int qede_selftest_receive_traffic(struct qede_dev *edev) struct qede_rx_queue *rxq = NULL; struct sw_rx_data *sw_rx_data; union eth_rx_cqe *cqe; - int i, rc = 0; + int i, iter, rc = 0; u8 *data_ptr; for_each_queue(i) { @@ -1315,7 +1352,7 @@ static int qede_selftest_receive_traffic(struct qede_dev *edev) * enabled. This is because the queue 0 is configured as the default * queue and that the loopback traffic is not IP. */ - for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) { + for (iter = 0; iter < QEDE_SELFTEST_POLL_COUNT; iter++) { if (!qede_has_rx_work(rxq)) { usleep_range(100, 200); continue; @@ -1362,7 +1399,7 @@ static int qede_selftest_receive_traffic(struct qede_dev *edev) qed_chain_recycle_consumed(&rxq->rx_comp_ring); } - if (i == QEDE_SELFTEST_POLL_COUNT) { + if (iter == QEDE_SELFTEST_POLL_COUNT) { DP_NOTICE(edev, "Failed to receive the traffic\n"); return -1; } @@ -1558,6 +1595,7 @@ static const struct ethtool_ops qede_ethtool_ops = { .get_rxfh_key_size = qede_get_rxfh_key_size, .get_rxfh = qede_get_rxfh, .set_rxfh = qede_set_rxfh, + .get_ts_info = qede_get_ts_info, .get_channels = qede_get_channels, .set_channels = qede_set_channels, .self_test = qede_self_test, diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c new file mode 100644 index 000000000000..107c3fda4792 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c @@ -0,0 +1,759 @@ +/* QLogic qede NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <net/udp_tunnel.h> +#include <linux/bitops.h> +#include <linux/vmalloc.h> + +#include <linux/qed/qed_if.h> +#include "qede.h" + +void qede_force_mac(void *dev, u8 *mac, bool forced) +{ + struct qede_dev *edev = dev; + + /* MAC hints take effect only if we haven't set one already */ + if (is_valid_ether_addr(edev->ndev->dev_addr) && !forced) + return; + + ether_addr_copy(edev->ndev->dev_addr, mac); + ether_addr_copy(edev->primary_mac, mac); +} + +void qede_fill_rss_params(struct qede_dev *edev, + struct qed_update_vport_rss_params *rss, u8 *update) +{ + bool need_reset = false; + int i; + + if (QEDE_RSS_COUNT(edev) <= 1) { + memset(rss, 0, sizeof(*rss)); + *update = 0; + return; + } + + /* Need to validate current RSS config uses valid entries */ + for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) { + if (edev->rss_ind_table[i] >= QEDE_RSS_COUNT(edev)) { + need_reset = true; + break; + } + } + + if (!(edev->rss_params_inited & QEDE_RSS_INDIR_INITED) || need_reset) { + for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) { + u16 indir_val, val; + + val = QEDE_RSS_COUNT(edev); + indir_val = ethtool_rxfh_indir_default(i, val); + edev->rss_ind_table[i] = indir_val; + } + edev->rss_params_inited |= QEDE_RSS_INDIR_INITED; + } + + /* Now that we have the queue-indirection, prepare the handles */ + for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) { + u16 idx = QEDE_RX_QUEUE_IDX(edev, edev->rss_ind_table[i]); + + rss->rss_ind_table[i] = edev->fp_array[idx].rxq->handle; + } + + if (!(edev->rss_params_inited & QEDE_RSS_KEY_INITED)) { + netdev_rss_key_fill(edev->rss_key, sizeof(edev->rss_key)); + edev->rss_params_inited |= QEDE_RSS_KEY_INITED; + } + memcpy(rss->rss_key, edev->rss_key, sizeof(rss->rss_key)); + + if (!(edev->rss_params_inited & QEDE_RSS_CAPS_INITED)) { + edev->rss_caps = QED_RSS_IPV4 | QED_RSS_IPV6 | + QED_RSS_IPV4_TCP | QED_RSS_IPV6_TCP; + edev->rss_params_inited |= QEDE_RSS_CAPS_INITED; + } + rss->rss_caps = edev->rss_caps; + + *update = 1; +} + +static int qede_set_ucast_rx_mac(struct qede_dev *edev, + enum qed_filter_xcast_params_type opcode, + unsigned char mac[ETH_ALEN]) +{ + struct qed_filter_params filter_cmd; + + memset(&filter_cmd, 0, sizeof(filter_cmd)); + filter_cmd.type = QED_FILTER_TYPE_UCAST; + filter_cmd.filter.ucast.type = opcode; + filter_cmd.filter.ucast.mac_valid = 1; + ether_addr_copy(filter_cmd.filter.ucast.mac, mac); + + return edev->ops->filter_config(edev->cdev, &filter_cmd); +} + +static int qede_set_ucast_rx_vlan(struct qede_dev *edev, + enum qed_filter_xcast_params_type opcode, + u16 vid) +{ + struct qed_filter_params filter_cmd; + + memset(&filter_cmd, 0, sizeof(filter_cmd)); + filter_cmd.type = QED_FILTER_TYPE_UCAST; + filter_cmd.filter.ucast.type = opcode; + filter_cmd.filter.ucast.vlan_valid = 1; + filter_cmd.filter.ucast.vlan = vid; + + return edev->ops->filter_config(edev->cdev, &filter_cmd); +} + +static int qede_config_accept_any_vlan(struct qede_dev *edev, bool action) +{ + struct qed_update_vport_params *params; + int rc; + + /* Proceed only if action actually needs to be performed */ + if (edev->accept_any_vlan == action) + return 0; + + params = vzalloc(sizeof(*params)); + if (!params) + return -ENOMEM; + + params->vport_id = 0; + params->accept_any_vlan = action; + params->update_accept_any_vlan_flg = 1; + + rc = edev->ops->vport_update(edev->cdev, params); + if (rc) { + DP_ERR(edev, "Failed to %s accept-any-vlan\n", + action ? "enable" : "disable"); + } else { + DP_INFO(edev, "%s accept-any-vlan\n", + action ? "enabled" : "disabled"); + edev->accept_any_vlan = action; + } + + vfree(params); + return 0; +} + +int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) +{ + struct qede_dev *edev = netdev_priv(dev); + struct qede_vlan *vlan, *tmp; + int rc = 0; + + DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan 0x%04x\n", vid); + + vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); + if (!vlan) { + DP_INFO(edev, "Failed to allocate struct for vlan\n"); + return -ENOMEM; + } + INIT_LIST_HEAD(&vlan->list); + vlan->vid = vid; + vlan->configured = false; + + /* Verify vlan isn't already configured */ + list_for_each_entry(tmp, &edev->vlan_list, list) { + if (tmp->vid == vlan->vid) { + DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), + "vlan already configured\n"); + kfree(vlan); + return -EEXIST; + } + } + + /* If interface is down, cache this VLAN ID and return */ + __qede_lock(edev); + if (edev->state != QEDE_STATE_OPEN) { + DP_VERBOSE(edev, NETIF_MSG_IFDOWN, + "Interface is down, VLAN %d will be configured when interface is up\n", + vid); + if (vid != 0) + edev->non_configured_vlans++; + list_add(&vlan->list, &edev->vlan_list); + goto out; + } + + /* Check for the filter limit. + * Note - vlan0 has a reserved filter and can be added without + * worrying about quota + */ + if ((edev->configured_vlans < edev->dev_info.num_vlan_filters) || + (vlan->vid == 0)) { + rc = qede_set_ucast_rx_vlan(edev, + QED_FILTER_XCAST_TYPE_ADD, + vlan->vid); + if (rc) { + DP_ERR(edev, "Failed to configure VLAN %d\n", + vlan->vid); + kfree(vlan); + goto out; + } + vlan->configured = true; + + /* vlan0 filter isn't consuming out of our quota */ + if (vlan->vid != 0) + edev->configured_vlans++; + } else { + /* Out of quota; Activate accept-any-VLAN mode */ + if (!edev->non_configured_vlans) { + rc = qede_config_accept_any_vlan(edev, true); + if (rc) { + kfree(vlan); + goto out; + } + } + + edev->non_configured_vlans++; + } + + list_add(&vlan->list, &edev->vlan_list); + +out: + __qede_unlock(edev); + return rc; +} + +static void qede_del_vlan_from_list(struct qede_dev *edev, + struct qede_vlan *vlan) +{ + /* vlan0 filter isn't consuming out of our quota */ + if (vlan->vid != 0) { + if (vlan->configured) + edev->configured_vlans--; + else + edev->non_configured_vlans--; + } + + list_del(&vlan->list); + kfree(vlan); +} + +int qede_configure_vlan_filters(struct qede_dev *edev) +{ + int rc = 0, real_rc = 0, accept_any_vlan = 0; + struct qed_dev_eth_info *dev_info; + struct qede_vlan *vlan = NULL; + + if (list_empty(&edev->vlan_list)) + return 0; + + dev_info = &edev->dev_info; + + /* Configure non-configured vlans */ + list_for_each_entry(vlan, &edev->vlan_list, list) { + if (vlan->configured) + continue; + + /* We have used all our credits, now enable accept_any_vlan */ + if ((vlan->vid != 0) && + (edev->configured_vlans == dev_info->num_vlan_filters)) { + accept_any_vlan = 1; + continue; + } + + DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan %d\n", vlan->vid); + + rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_ADD, + vlan->vid); + if (rc) { + DP_ERR(edev, "Failed to configure VLAN %u\n", + vlan->vid); + real_rc = rc; + continue; + } + + vlan->configured = true; + /* vlan0 filter doesn't consume our VLAN filter's quota */ + if (vlan->vid != 0) { + edev->non_configured_vlans--; + edev->configured_vlans++; + } + } + + /* enable accept_any_vlan mode if we have more VLANs than credits, + * or remove accept_any_vlan mode if we've actually removed + * a non-configured vlan, and all remaining vlans are truly configured. + */ + + if (accept_any_vlan) + rc = qede_config_accept_any_vlan(edev, true); + else if (!edev->non_configured_vlans) + rc = qede_config_accept_any_vlan(edev, false); + + if (rc && !real_rc) + real_rc = rc; + + return real_rc; +} + +int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) +{ + struct qede_dev *edev = netdev_priv(dev); + struct qede_vlan *vlan = NULL; + int rc = 0; + + DP_VERBOSE(edev, NETIF_MSG_IFDOWN, "Removing vlan 0x%04x\n", vid); + + /* Find whether entry exists */ + __qede_lock(edev); + list_for_each_entry(vlan, &edev->vlan_list, list) + if (vlan->vid == vid) + break; + + if (!vlan || (vlan->vid != vid)) { + DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), + "Vlan isn't configured\n"); + goto out; + } + + if (edev->state != QEDE_STATE_OPEN) { + /* As interface is already down, we don't have a VPORT + * instance to remove vlan filter. So just update vlan list + */ + DP_VERBOSE(edev, NETIF_MSG_IFDOWN, + "Interface is down, removing VLAN from list only\n"); + qede_del_vlan_from_list(edev, vlan); + goto out; + } + + /* Remove vlan */ + if (vlan->configured) { + rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_DEL, + vid); + if (rc) { + DP_ERR(edev, "Failed to remove VLAN %d\n", vid); + goto out; + } + } + + qede_del_vlan_from_list(edev, vlan); + + /* We have removed a VLAN - try to see if we can + * configure non-configured VLAN from the list. + */ + rc = qede_configure_vlan_filters(edev); + +out: + __qede_unlock(edev); + return rc; +} + +void qede_vlan_mark_nonconfigured(struct qede_dev *edev) +{ + struct qede_vlan *vlan = NULL; + + if (list_empty(&edev->vlan_list)) + return; + + list_for_each_entry(vlan, &edev->vlan_list, list) { + if (!vlan->configured) + continue; + + vlan->configured = false; + + /* vlan0 filter isn't consuming out of our quota */ + if (vlan->vid != 0) { + edev->non_configured_vlans++; + edev->configured_vlans--; + } + + DP_VERBOSE(edev, NETIF_MSG_IFDOWN, + "marked vlan %d as non-configured\n", vlan->vid); + } + + edev->accept_any_vlan = false; +} + +static void qede_set_features_reload(struct qede_dev *edev, + struct qede_reload_args *args) +{ + edev->ndev->features = args->u.features; +} + +int qede_set_features(struct net_device *dev, netdev_features_t features) +{ + struct qede_dev *edev = netdev_priv(dev); + netdev_features_t changes = features ^ dev->features; + bool need_reload = false; + + /* No action needed if hardware GRO is disabled during driver load */ + if (changes & NETIF_F_GRO) { + if (dev->features & NETIF_F_GRO) + need_reload = !edev->gro_disable; + else + need_reload = edev->gro_disable; + } + + if (need_reload) { + struct qede_reload_args args; + + args.u.features = features; + args.func = &qede_set_features_reload; + + /* Make sure that we definitely need to reload. + * In case of an eBPF attached program, there will be no FW + * aggregations, so no need to actually reload. + */ + __qede_lock(edev); + if (edev->xdp_prog) + args.func(edev, &args); + else + qede_reload(edev, &args, true); + __qede_unlock(edev); + + return 1; + } + + return 0; +} + +void qede_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti) +{ + struct qede_dev *edev = netdev_priv(dev); + u16 t_port = ntohs(ti->port); + + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + if (edev->vxlan_dst_port) + return; + + edev->vxlan_dst_port = t_port; + + DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d\n", + t_port); + + set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags); + break; + case UDP_TUNNEL_TYPE_GENEVE: + if (edev->geneve_dst_port) + return; + + edev->geneve_dst_port = t_port; + + DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d\n", + t_port); + set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags); + break; + default: + return; + } + + schedule_delayed_work(&edev->sp_task, 0); +} + +void qede_udp_tunnel_del(struct net_device *dev, struct udp_tunnel_info *ti) +{ + struct qede_dev *edev = netdev_priv(dev); + u16 t_port = ntohs(ti->port); + + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + if (t_port != edev->vxlan_dst_port) + return; + + edev->vxlan_dst_port = 0; + + DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d\n", + t_port); + + set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags); + break; + case UDP_TUNNEL_TYPE_GENEVE: + if (t_port != edev->geneve_dst_port) + return; + + edev->geneve_dst_port = 0; + + DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d\n", + t_port); + set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags); + break; + default: + return; + } + + schedule_delayed_work(&edev->sp_task, 0); +} + +static void qede_xdp_reload_func(struct qede_dev *edev, + struct qede_reload_args *args) +{ + struct bpf_prog *old; + + old = xchg(&edev->xdp_prog, args->u.new_prog); + if (old) + bpf_prog_put(old); +} + +static int qede_xdp_set(struct qede_dev *edev, struct bpf_prog *prog) +{ + struct qede_reload_args args; + + if (prog && prog->xdp_adjust_head) { + DP_ERR(edev, "Does not support bpf_xdp_adjust_head()\n"); + return -EOPNOTSUPP; + } + + /* If we're called, there was already a bpf reference increment */ + args.func = &qede_xdp_reload_func; + args.u.new_prog = prog; + qede_reload(edev, &args, false); + + return 0; +} + +int qede_xdp(struct net_device *dev, struct netdev_xdp *xdp) +{ + struct qede_dev *edev = netdev_priv(dev); + + switch (xdp->command) { + case XDP_SETUP_PROG: + return qede_xdp_set(edev, xdp->prog); + case XDP_QUERY_PROG: + xdp->prog_attached = !!edev->xdp_prog; + return 0; + default: + return -EINVAL; + } +} + +static int qede_set_mcast_rx_mac(struct qede_dev *edev, + enum qed_filter_xcast_params_type opcode, + unsigned char *mac, int num_macs) +{ + struct qed_filter_params filter_cmd; + int i; + + memset(&filter_cmd, 0, sizeof(filter_cmd)); + filter_cmd.type = QED_FILTER_TYPE_MCAST; + filter_cmd.filter.mcast.type = opcode; + filter_cmd.filter.mcast.num = num_macs; + + for (i = 0; i < num_macs; i++, mac += ETH_ALEN) + ether_addr_copy(filter_cmd.filter.mcast.mac[i], mac); + + return edev->ops->filter_config(edev->cdev, &filter_cmd); +} + +int qede_set_mac_addr(struct net_device *ndev, void *p) +{ + struct qede_dev *edev = netdev_priv(ndev); + struct sockaddr *addr = p; + int rc; + + ASSERT_RTNL(); /* @@@TBD To be removed */ + + DP_INFO(edev, "Set_mac_addr called\n"); + + if (!is_valid_ether_addr(addr->sa_data)) { + DP_NOTICE(edev, "The MAC address is not valid\n"); + return -EFAULT; + } + + if (!edev->ops->check_mac(edev->cdev, addr->sa_data)) { + DP_NOTICE(edev, "qed prevents setting MAC\n"); + return -EINVAL; + } + + ether_addr_copy(ndev->dev_addr, addr->sa_data); + + if (!netif_running(ndev)) { + DP_NOTICE(edev, "The device is currently down\n"); + return 0; + } + + /* Remove the previous primary mac */ + rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL, + edev->primary_mac); + if (rc) + return rc; + + edev->ops->common->update_mac(edev->cdev, addr->sa_data); + + /* Add MAC filter according to the new unicast HW MAC address */ + ether_addr_copy(edev->primary_mac, ndev->dev_addr); + return qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD, + edev->primary_mac); +} + +static int +qede_configure_mcast_filtering(struct net_device *ndev, + enum qed_filter_rx_mode_type *accept_flags) +{ + struct qede_dev *edev = netdev_priv(ndev); + unsigned char *mc_macs, *temp; + struct netdev_hw_addr *ha; + int rc = 0, mc_count; + size_t size; + + size = 64 * ETH_ALEN; + + mc_macs = kzalloc(size, GFP_KERNEL); + if (!mc_macs) { + DP_NOTICE(edev, + "Failed to allocate memory for multicast MACs\n"); + rc = -ENOMEM; + goto exit; + } + + temp = mc_macs; + + /* Remove all previously configured MAC filters */ + rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL, + mc_macs, 1); + if (rc) + goto exit; + + netif_addr_lock_bh(ndev); + + mc_count = netdev_mc_count(ndev); + if (mc_count < 64) { + netdev_for_each_mc_addr(ha, ndev) { + ether_addr_copy(temp, ha->addr); + temp += ETH_ALEN; + } + } + + netif_addr_unlock_bh(ndev); + + /* Check for all multicast @@@TBD resource allocation */ + if ((ndev->flags & IFF_ALLMULTI) || (mc_count > 64)) { + if (*accept_flags == QED_FILTER_RX_MODE_TYPE_REGULAR) + *accept_flags = QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC; + } else { + /* Add all multicast MAC filters */ + rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD, + mc_macs, mc_count); + } + +exit: + kfree(mc_macs); + return rc; +} + +void qede_set_rx_mode(struct net_device *ndev) +{ + struct qede_dev *edev = netdev_priv(ndev); + + set_bit(QEDE_SP_RX_MODE, &edev->sp_flags); + schedule_delayed_work(&edev->sp_task, 0); +} + +/* Must be called with qede_lock held */ +void qede_config_rx_mode(struct net_device *ndev) +{ + enum qed_filter_rx_mode_type accept_flags; + struct qede_dev *edev = netdev_priv(ndev); + struct qed_filter_params rx_mode; + unsigned char *uc_macs, *temp; + struct netdev_hw_addr *ha; + int rc, uc_count; + size_t size; + + netif_addr_lock_bh(ndev); + + uc_count = netdev_uc_count(ndev); + size = uc_count * ETH_ALEN; + + uc_macs = kzalloc(size, GFP_ATOMIC); + if (!uc_macs) { + DP_NOTICE(edev, "Failed to allocate memory for unicast MACs\n"); + netif_addr_unlock_bh(ndev); + return; + } + + temp = uc_macs; + netdev_for_each_uc_addr(ha, ndev) { + ether_addr_copy(temp, ha->addr); + temp += ETH_ALEN; + } + + netif_addr_unlock_bh(ndev); + + /* Configure the struct for the Rx mode */ + memset(&rx_mode, 0, sizeof(struct qed_filter_params)); + rx_mode.type = QED_FILTER_TYPE_RX_MODE; + + /* Remove all previous unicast secondary macs and multicast macs + * (configrue / leave the primary mac) + */ + rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_REPLACE, + edev->primary_mac); + if (rc) + goto out; + + /* Check for promiscuous */ + if (ndev->flags & IFF_PROMISC) + accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC; + else + accept_flags = QED_FILTER_RX_MODE_TYPE_REGULAR; + + /* Configure all filters regardless, in case promisc is rejected */ + if (uc_count < edev->dev_info.num_mac_filters) { + int i; + + temp = uc_macs; + for (i = 0; i < uc_count; i++) { + rc = qede_set_ucast_rx_mac(edev, + QED_FILTER_XCAST_TYPE_ADD, + temp); + if (rc) + goto out; + + temp += ETH_ALEN; + } + } else { + accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC; + } + + rc = qede_configure_mcast_filtering(ndev, &accept_flags); + if (rc) + goto out; + + /* take care of VLAN mode */ + if (ndev->flags & IFF_PROMISC) { + qede_config_accept_any_vlan(edev, true); + } else if (!edev->non_configured_vlans) { + /* It's possible that accept_any_vlan mode is set due to a + * previous setting of IFF_PROMISC. If vlan credits are + * sufficient, disable accept_any_vlan. + */ + qede_config_accept_any_vlan(edev, false); + } + + rx_mode.filter.accept_flags = accept_flags; + edev->ops->filter_config(edev->cdev, &rx_mode); +out: + kfree(uc_macs); +} diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c new file mode 100644 index 000000000000..1e65038c8fc0 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c @@ -0,0 +1,1700 @@ +/* QLogic qede NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/skbuff.h> +#include <linux/bpf_trace.h> +#include <net/udp_tunnel.h> +#include <linux/ip.h> +#include <net/ipv6.h> +#include <net/tcp.h> +#include <linux/if_ether.h> +#include <linux/if_vlan.h> +#include <net/ip6_checksum.h> +#include "qede_ptp.h" + +#include <linux/qed/qed_if.h> +#include "qede.h" +/********************************* + * Content also used by slowpath * + *********************************/ + +int qede_alloc_rx_buffer(struct qede_rx_queue *rxq, bool allow_lazy) +{ + struct sw_rx_data *sw_rx_data; + struct eth_rx_bd *rx_bd; + dma_addr_t mapping; + struct page *data; + + /* In case lazy-allocation is allowed, postpone allocation until the + * end of the NAPI run. We'd still need to make sure the Rx ring has + * sufficient buffers to guarantee an additional Rx interrupt. + */ + if (allow_lazy && likely(rxq->filled_buffers > 12)) { + rxq->filled_buffers--; + return 0; + } + + data = alloc_pages(GFP_ATOMIC, 0); + if (unlikely(!data)) + return -ENOMEM; + + /* Map the entire page as it would be used + * for multiple RX buffer segment size mapping. + */ + mapping = dma_map_page(rxq->dev, data, 0, + PAGE_SIZE, rxq->data_direction); + if (unlikely(dma_mapping_error(rxq->dev, mapping))) { + __free_page(data); + return -ENOMEM; + } + + sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX]; + sw_rx_data->page_offset = 0; + sw_rx_data->data = data; + sw_rx_data->mapping = mapping; + + /* Advance PROD and get BD pointer */ + rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring); + WARN_ON(!rx_bd); + rx_bd->addr.hi = cpu_to_le32(upper_32_bits(mapping)); + rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping)); + + rxq->sw_rx_prod++; + rxq->filled_buffers++; + + return 0; +} + +/* Unmap the data and free skb */ +int qede_free_tx_pkt(struct qede_dev *edev, struct qede_tx_queue *txq, int *len) +{ + u16 idx = txq->sw_tx_cons & NUM_TX_BDS_MAX; + struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb; + struct eth_tx_1st_bd *first_bd; + struct eth_tx_bd *tx_data_bd; + int bds_consumed = 0; + int nbds; + bool data_split = txq->sw_tx_ring.skbs[idx].flags & QEDE_TSO_SPLIT_BD; + int i, split_bd_len = 0; + + if (unlikely(!skb)) { + DP_ERR(edev, + "skb is null for txq idx=%d txq->sw_tx_cons=%d txq->sw_tx_prod=%d\n", + idx, txq->sw_tx_cons, txq->sw_tx_prod); + return -1; + } + + *len = skb->len; + + first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl); + + bds_consumed++; + + nbds = first_bd->data.nbds; + + if (data_split) { + struct eth_tx_bd *split = (struct eth_tx_bd *) + qed_chain_consume(&txq->tx_pbl); + split_bd_len = BD_UNMAP_LEN(split); + bds_consumed++; + } + dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd), + BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE); + + /* Unmap the data of the skb frags */ + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) { + tx_data_bd = (struct eth_tx_bd *) + qed_chain_consume(&txq->tx_pbl); + dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(tx_data_bd), + BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE); + } + + while (bds_consumed++ < nbds) + qed_chain_consume(&txq->tx_pbl); + + /* Free skb */ + dev_kfree_skb_any(skb); + txq->sw_tx_ring.skbs[idx].skb = NULL; + txq->sw_tx_ring.skbs[idx].flags = 0; + + return 0; +} + +/* Unmap the data and free skb when mapping failed during start_xmit */ +static void qede_free_failed_tx_pkt(struct qede_tx_queue *txq, + struct eth_tx_1st_bd *first_bd, + int nbd, bool data_split) +{ + u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX; + struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb; + struct eth_tx_bd *tx_data_bd; + int i, split_bd_len = 0; + + /* Return prod to its position before this skb was handled */ + qed_chain_set_prod(&txq->tx_pbl, + le16_to_cpu(txq->tx_db.data.bd_prod), first_bd); + + first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl); + + if (data_split) { + struct eth_tx_bd *split = (struct eth_tx_bd *) + qed_chain_produce(&txq->tx_pbl); + split_bd_len = BD_UNMAP_LEN(split); + nbd--; + } + + dma_unmap_single(txq->dev, BD_UNMAP_ADDR(first_bd), + BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE); + + /* Unmap the data of the skb frags */ + for (i = 0; i < nbd; i++) { + tx_data_bd = (struct eth_tx_bd *) + qed_chain_produce(&txq->tx_pbl); + if (tx_data_bd->nbytes) + dma_unmap_page(txq->dev, + BD_UNMAP_ADDR(tx_data_bd), + BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE); + } + + /* Return again prod to its position before this skb was handled */ + qed_chain_set_prod(&txq->tx_pbl, + le16_to_cpu(txq->tx_db.data.bd_prod), first_bd); + + /* Free skb */ + dev_kfree_skb_any(skb); + txq->sw_tx_ring.skbs[idx].skb = NULL; + txq->sw_tx_ring.skbs[idx].flags = 0; +} + +static u32 qede_xmit_type(struct sk_buff *skb, int *ipv6_ext) +{ + u32 rc = XMIT_L4_CSUM; + __be16 l3_proto; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return XMIT_PLAIN; + + l3_proto = vlan_get_protocol(skb); + if (l3_proto == htons(ETH_P_IPV6) && + (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6)) + *ipv6_ext = 1; + + if (skb->encapsulation) { + rc |= XMIT_ENC; + if (skb_is_gso(skb)) { + unsigned short gso_type = skb_shinfo(skb)->gso_type; + + if ((gso_type & SKB_GSO_UDP_TUNNEL_CSUM) || + (gso_type & SKB_GSO_GRE_CSUM)) + rc |= XMIT_ENC_GSO_L4_CSUM; + + rc |= XMIT_LSO; + return rc; + } + } + + if (skb_is_gso(skb)) + rc |= XMIT_LSO; + + return rc; +} + +static void qede_set_params_for_ipv6_ext(struct sk_buff *skb, + struct eth_tx_2nd_bd *second_bd, + struct eth_tx_3rd_bd *third_bd) +{ + u8 l4_proto; + u16 bd2_bits1 = 0, bd2_bits2 = 0; + + bd2_bits1 |= (1 << ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT); + + bd2_bits2 |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) & + ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK) + << ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT; + + bd2_bits1 |= (ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH << + ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT); + + if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) + l4_proto = ipv6_hdr(skb)->nexthdr; + else + l4_proto = ip_hdr(skb)->protocol; + + if (l4_proto == IPPROTO_UDP) + bd2_bits1 |= 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT; + + if (third_bd) + third_bd->data.bitfields |= + cpu_to_le16(((tcp_hdrlen(skb) / 4) & + ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK) << + ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT); + + second_bd->data.bitfields1 = cpu_to_le16(bd2_bits1); + second_bd->data.bitfields2 = cpu_to_le16(bd2_bits2); +} + +static int map_frag_to_bd(struct qede_tx_queue *txq, + skb_frag_t *frag, struct eth_tx_bd *bd) +{ + dma_addr_t mapping; + + /* Map skb non-linear frag data for DMA */ + mapping = skb_frag_dma_map(txq->dev, frag, 0, + skb_frag_size(frag), DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(txq->dev, mapping))) + return -ENOMEM; + + /* Setup the data pointer of the frag data */ + BD_SET_UNMAP_ADDR_LEN(bd, mapping, skb_frag_size(frag)); + + return 0; +} + +static u16 qede_get_skb_hlen(struct sk_buff *skb, bool is_encap_pkt) +{ + if (is_encap_pkt) + return (skb_inner_transport_header(skb) + + inner_tcp_hdrlen(skb) - skb->data); + else + return (skb_transport_header(skb) + + tcp_hdrlen(skb) - skb->data); +} + +/* +2 for 1st BD for headers and 2nd BD for headlen (if required) */ +#if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET) +static bool qede_pkt_req_lin(struct sk_buff *skb, u8 xmit_type) +{ + int allowed_frags = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET - 1; + + if (xmit_type & XMIT_LSO) { + int hlen; + + hlen = qede_get_skb_hlen(skb, xmit_type & XMIT_ENC); + + /* linear payload would require its own BD */ + if (skb_headlen(skb) > hlen) + allowed_frags--; + } + + return (skb_shinfo(skb)->nr_frags > allowed_frags); +} +#endif + +static inline void qede_update_tx_producer(struct qede_tx_queue *txq) +{ + /* wmb makes sure that the BDs data is updated before updating the + * producer, otherwise FW may read old data from the BDs. + */ + wmb(); + barrier(); + writel(txq->tx_db.raw, txq->doorbell_addr); + + /* mmiowb is needed to synchronize doorbell writes from more than one + * processor. It guarantees that the write arrives to the device before + * the queue lock is released and another start_xmit is called (possibly + * on another CPU). Without this barrier, the next doorbell can bypass + * this doorbell. This is applicable to IA64/Altix systems. + */ + mmiowb(); +} + +static int qede_xdp_xmit(struct qede_dev *edev, struct qede_fastpath *fp, + struct sw_rx_data *metadata, u16 padding, u16 length) +{ + struct qede_tx_queue *txq = fp->xdp_tx; + u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX; + struct eth_tx_1st_bd *first_bd; + + if (!qed_chain_get_elem_left(&txq->tx_pbl)) { + txq->stopped_cnt++; + return -ENOMEM; + } + + first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl); + + memset(first_bd, 0, sizeof(*first_bd)); + first_bd->data.bd_flags.bitfields = + BIT(ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT); + first_bd->data.bitfields |= + (length & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) << + ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT; + first_bd->data.nbds = 1; + + /* We can safely ignore the offset, as it's 0 for XDP */ + BD_SET_UNMAP_ADDR_LEN(first_bd, metadata->mapping + padding, length); + + /* Synchronize the buffer back to device, as program [probably] + * has changed it. + */ + dma_sync_single_for_device(&edev->pdev->dev, + metadata->mapping + padding, + length, PCI_DMA_TODEVICE); + + txq->sw_tx_ring.pages[idx] = metadata->data; + txq->sw_tx_prod++; + + /* Mark the fastpath for future XDP doorbell */ + fp->xdp_xmit = 1; + + return 0; +} + +int qede_txq_has_work(struct qede_tx_queue *txq) +{ + u16 hw_bd_cons; + + /* Tell compiler that consumer and producer can change */ + barrier(); + hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr); + if (qed_chain_get_cons_idx(&txq->tx_pbl) == hw_bd_cons + 1) + return 0; + + return hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl); +} + +static void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq) +{ + struct eth_tx_1st_bd *bd; + u16 hw_bd_cons; + + hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr); + barrier(); + + while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) { + bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl); + + dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(bd), + PAGE_SIZE, DMA_BIDIRECTIONAL); + __free_page(txq->sw_tx_ring.pages[txq->sw_tx_cons & + NUM_TX_BDS_MAX]); + + txq->sw_tx_cons++; + txq->xmit_pkts++; + } +} + +static int qede_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq) +{ + struct netdev_queue *netdev_txq; + u16 hw_bd_cons; + unsigned int pkts_compl = 0, bytes_compl = 0; + int rc; + + netdev_txq = netdev_get_tx_queue(edev->ndev, txq->index); + + hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr); + barrier(); + + while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) { + int len = 0; + + rc = qede_free_tx_pkt(edev, txq, &len); + if (rc) { + DP_NOTICE(edev, "hw_bd_cons = %d, chain_cons=%d\n", + hw_bd_cons, + qed_chain_get_cons_idx(&txq->tx_pbl)); + break; + } + + bytes_compl += len; + pkts_compl++; + txq->sw_tx_cons++; + txq->xmit_pkts++; + } + + netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl); + + /* Need to make the tx_bd_cons update visible to start_xmit() + * before checking for netif_tx_queue_stopped(). Without the + * memory barrier, there is a small possibility that + * start_xmit() will miss it and cause the queue to be stopped + * forever. + * On the other hand we need an rmb() here to ensure the proper + * ordering of bit testing in the following + * netif_tx_queue_stopped(txq) call. + */ + smp_mb(); + + if (unlikely(netif_tx_queue_stopped(netdev_txq))) { + /* Taking tx_lock is needed to prevent reenabling the queue + * while it's empty. This could have happen if rx_action() gets + * suspended in qede_tx_int() after the condition before + * netif_tx_wake_queue(), while tx_action (qede_start_xmit()): + * + * stops the queue->sees fresh tx_bd_cons->releases the queue-> + * sends some packets consuming the whole queue again-> + * stops the queue + */ + + __netif_tx_lock(netdev_txq, smp_processor_id()); + + if ((netif_tx_queue_stopped(netdev_txq)) && + (edev->state == QEDE_STATE_OPEN) && + (qed_chain_get_elem_left(&txq->tx_pbl) + >= (MAX_SKB_FRAGS + 1))) { + netif_tx_wake_queue(netdev_txq); + DP_VERBOSE(edev, NETIF_MSG_TX_DONE, + "Wake queue was called\n"); + } + + __netif_tx_unlock(netdev_txq); + } + + return 0; +} + +bool qede_has_rx_work(struct qede_rx_queue *rxq) +{ + u16 hw_comp_cons, sw_comp_cons; + + /* Tell compiler that status block fields can change */ + barrier(); + + hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr); + sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring); + + return hw_comp_cons != sw_comp_cons; +} + +static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq) +{ + qed_chain_consume(&rxq->rx_bd_ring); + rxq->sw_rx_cons++; +} + +/* This function reuses the buffer(from an offset) from + * consumer index to producer index in the bd ring + */ +static inline void qede_reuse_page(struct qede_rx_queue *rxq, + struct sw_rx_data *curr_cons) +{ + struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring); + struct sw_rx_data *curr_prod; + dma_addr_t new_mapping; + + curr_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX]; + *curr_prod = *curr_cons; + + new_mapping = curr_prod->mapping + curr_prod->page_offset; + + rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(new_mapping)); + rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(new_mapping)); + + rxq->sw_rx_prod++; + curr_cons->data = NULL; +} + +/* In case of allocation failures reuse buffers + * from consumer index to produce buffers for firmware + */ +void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, u8 count) +{ + struct sw_rx_data *curr_cons; + + for (; count > 0; count--) { + curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX]; + qede_reuse_page(rxq, curr_cons); + qede_rx_bd_ring_consume(rxq); + } +} + +static inline int qede_realloc_rx_buffer(struct qede_rx_queue *rxq, + struct sw_rx_data *curr_cons) +{ + /* Move to the next segment in the page */ + curr_cons->page_offset += rxq->rx_buf_seg_size; + + if (curr_cons->page_offset == PAGE_SIZE) { + if (unlikely(qede_alloc_rx_buffer(rxq, true))) { + /* Since we failed to allocate new buffer + * current buffer can be used again. + */ + curr_cons->page_offset -= rxq->rx_buf_seg_size; + + return -ENOMEM; + } + + dma_unmap_page(rxq->dev, curr_cons->mapping, + PAGE_SIZE, rxq->data_direction); + } else { + /* Increment refcount of the page as we don't want + * network stack to take the ownership of the page + * which can be recycled multiple times by the driver. + */ + page_ref_inc(curr_cons->data); + qede_reuse_page(rxq, curr_cons); + } + + return 0; +} + +void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq) +{ + u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring); + u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring); + struct eth_rx_prod_data rx_prods = {0}; + + /* Update producers */ + rx_prods.bd_prod = cpu_to_le16(bd_prod); + rx_prods.cqe_prod = cpu_to_le16(cqe_prod); + + /* Make sure that the BD and SGE data is updated before updating the + * producers since FW might read the BD/SGE right after the producer + * is updated. + */ + wmb(); + + internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods), + (u32 *)&rx_prods); + + /* mmiowb is needed to synchronize doorbell writes from more than one + * processor. It guarantees that the write arrives to the device before + * the napi lock is released and another qede_poll is called (possibly + * on another CPU). Without this barrier, the next doorbell can bypass + * this doorbell. This is applicable to IA64/Altix systems. + */ + mmiowb(); +} + +static void qede_get_rxhash(struct sk_buff *skb, u8 bitfields, __le32 rss_hash) +{ + enum pkt_hash_types hash_type = PKT_HASH_TYPE_NONE; + enum rss_hash_type htype; + u32 hash = 0; + + htype = GET_FIELD(bitfields, ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE); + if (htype) { + hash_type = ((htype == RSS_HASH_TYPE_IPV4) || + (htype == RSS_HASH_TYPE_IPV6)) ? + PKT_HASH_TYPE_L3 : PKT_HASH_TYPE_L4; + hash = le32_to_cpu(rss_hash); + } + skb_set_hash(skb, hash, hash_type); +} + +static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag) +{ + skb_checksum_none_assert(skb); + + if (csum_flag & QEDE_CSUM_UNNECESSARY) + skb->ip_summed = CHECKSUM_UNNECESSARY; + + if (csum_flag & QEDE_TUNN_CSUM_UNNECESSARY) { + skb->csum_level = 1; + skb->encapsulation = 1; + } +} + +static inline void qede_skb_receive(struct qede_dev *edev, + struct qede_fastpath *fp, + struct qede_rx_queue *rxq, + struct sk_buff *skb, u16 vlan_tag) +{ + if (vlan_tag) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); + + napi_gro_receive(&fp->napi, skb); + rxq->rcv_pkts++; +} + +static void qede_set_gro_params(struct qede_dev *edev, + struct sk_buff *skb, + struct eth_fast_path_rx_tpa_start_cqe *cqe) +{ + u16 parsing_flags = le16_to_cpu(cqe->pars_flags.flags); + + if (((parsing_flags >> PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) & + PARSING_AND_ERR_FLAGS_L3TYPE_MASK) == 2) + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; + else + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; + + skb_shinfo(skb)->gso_size = __le16_to_cpu(cqe->len_on_first_bd) - + cqe->header_len; +} + +static int qede_fill_frag_skb(struct qede_dev *edev, + struct qede_rx_queue *rxq, + u8 tpa_agg_index, u16 len_on_bd) +{ + struct sw_rx_data *current_bd = &rxq->sw_rx_ring[rxq->sw_rx_cons & + NUM_RX_BDS_MAX]; + struct qede_agg_info *tpa_info = &rxq->tpa_info[tpa_agg_index]; + struct sk_buff *skb = tpa_info->skb; + + if (unlikely(tpa_info->state != QEDE_AGG_STATE_START)) + goto out; + + /* Add one frag and update the appropriate fields in the skb */ + skb_fill_page_desc(skb, tpa_info->frag_id++, + current_bd->data, current_bd->page_offset, + len_on_bd); + + if (unlikely(qede_realloc_rx_buffer(rxq, current_bd))) { + /* Incr page ref count to reuse on allocation failure + * so that it doesn't get freed while freeing SKB. + */ + page_ref_inc(current_bd->data); + goto out; + } + + qed_chain_consume(&rxq->rx_bd_ring); + rxq->sw_rx_cons++; + + skb->data_len += len_on_bd; + skb->truesize += rxq->rx_buf_seg_size; + skb->len += len_on_bd; + + return 0; + +out: + tpa_info->state = QEDE_AGG_STATE_ERROR; + qede_recycle_rx_bd_ring(rxq, 1); + + return -ENOMEM; +} + +static bool qede_tunn_exist(u16 flag) +{ + return !!(flag & (PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK << + PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT)); +} + +static u8 qede_check_tunn_csum(u16 flag) +{ + u16 csum_flag = 0; + u8 tcsum = 0; + + if (flag & (PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK << + PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT)) + csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK << + PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT; + + if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK << + PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) { + csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK << + PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT; + tcsum = QEDE_TUNN_CSUM_UNNECESSARY; + } + + csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK << + PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT | + PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK << + PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT; + + if (csum_flag & flag) + return QEDE_CSUM_ERROR; + + return QEDE_CSUM_UNNECESSARY | tcsum; +} + +static void qede_tpa_start(struct qede_dev *edev, + struct qede_rx_queue *rxq, + struct eth_fast_path_rx_tpa_start_cqe *cqe) +{ + struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index]; + struct eth_rx_bd *rx_bd_cons = qed_chain_consume(&rxq->rx_bd_ring); + struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring); + struct sw_rx_data *replace_buf = &tpa_info->buffer; + dma_addr_t mapping = tpa_info->buffer_mapping; + struct sw_rx_data *sw_rx_data_cons; + struct sw_rx_data *sw_rx_data_prod; + + sw_rx_data_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX]; + sw_rx_data_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX]; + + /* Use pre-allocated replacement buffer - we can't release the agg. + * start until its over and we don't want to risk allocation failing + * here, so re-allocate when aggregation will be over. + */ + sw_rx_data_prod->mapping = replace_buf->mapping; + + sw_rx_data_prod->data = replace_buf->data; + rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(mapping)); + rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(mapping)); + sw_rx_data_prod->page_offset = replace_buf->page_offset; + + rxq->sw_rx_prod++; + + /* move partial skb from cons to pool (don't unmap yet) + * save mapping, incase we drop the packet later on. + */ + tpa_info->buffer = *sw_rx_data_cons; + mapping = HILO_U64(le32_to_cpu(rx_bd_cons->addr.hi), + le32_to_cpu(rx_bd_cons->addr.lo)); + + tpa_info->buffer_mapping = mapping; + rxq->sw_rx_cons++; + + /* set tpa state to start only if we are able to allocate skb + * for this aggregation, otherwise mark as error and aggregation will + * be dropped + */ + tpa_info->skb = netdev_alloc_skb(edev->ndev, + le16_to_cpu(cqe->len_on_first_bd)); + if (unlikely(!tpa_info->skb)) { + DP_NOTICE(edev, "Failed to allocate SKB for gro\n"); + tpa_info->state = QEDE_AGG_STATE_ERROR; + goto cons_buf; + } + + /* Start filling in the aggregation info */ + skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd)); + tpa_info->frag_id = 0; + tpa_info->state = QEDE_AGG_STATE_START; + + /* Store some information from first CQE */ + tpa_info->start_cqe_placement_offset = cqe->placement_offset; + tpa_info->start_cqe_bd_len = le16_to_cpu(cqe->len_on_first_bd); + if ((le16_to_cpu(cqe->pars_flags.flags) >> + PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT) & + PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK) + tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag); + else + tpa_info->vlan_tag = 0; + + qede_get_rxhash(tpa_info->skb, cqe->bitfields, cqe->rss_hash); + + /* This is needed in order to enable forwarding support */ + qede_set_gro_params(edev, tpa_info->skb, cqe); + +cons_buf: /* We still need to handle bd_len_list to consume buffers */ + if (likely(cqe->ext_bd_len_list[0])) + qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index, + le16_to_cpu(cqe->ext_bd_len_list[0])); + + if (unlikely(cqe->ext_bd_len_list[1])) { + DP_ERR(edev, + "Unlikely - got a TPA aggregation with more than one ext_bd_len_list entry in the TPA start\n"); + tpa_info->state = QEDE_AGG_STATE_ERROR; + } +} + +#ifdef CONFIG_INET +static void qede_gro_ip_csum(struct sk_buff *skb) +{ + const struct iphdr *iph = ip_hdr(skb); + struct tcphdr *th; + + skb_set_transport_header(skb, sizeof(struct iphdr)); + th = tcp_hdr(skb); + + th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb), + iph->saddr, iph->daddr, 0); + + tcp_gro_complete(skb); +} + +static void qede_gro_ipv6_csum(struct sk_buff *skb) +{ + struct ipv6hdr *iph = ipv6_hdr(skb); + struct tcphdr *th; + + skb_set_transport_header(skb, sizeof(struct ipv6hdr)); + th = tcp_hdr(skb); + + th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb), + &iph->saddr, &iph->daddr, 0); + tcp_gro_complete(skb); +} +#endif + +static void qede_gro_receive(struct qede_dev *edev, + struct qede_fastpath *fp, + struct sk_buff *skb, + u16 vlan_tag) +{ + /* FW can send a single MTU sized packet from gro flow + * due to aggregation timeout/last segment etc. which + * is not expected to be a gro packet. If a skb has zero + * frags then simply push it in the stack as non gso skb. + */ + if (unlikely(!skb->data_len)) { + skb_shinfo(skb)->gso_type = 0; + skb_shinfo(skb)->gso_size = 0; + goto send_skb; + } + +#ifdef CONFIG_INET + if (skb_shinfo(skb)->gso_size) { + skb_reset_network_header(skb); + + switch (skb->protocol) { + case htons(ETH_P_IP): + qede_gro_ip_csum(skb); + break; + case htons(ETH_P_IPV6): + qede_gro_ipv6_csum(skb); + break; + default: + DP_ERR(edev, + "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n", + ntohs(skb->protocol)); + } + } +#endif + +send_skb: + skb_record_rx_queue(skb, fp->rxq->rxq_id); + qede_skb_receive(edev, fp, fp->rxq, skb, vlan_tag); +} + +static inline void qede_tpa_cont(struct qede_dev *edev, + struct qede_rx_queue *rxq, + struct eth_fast_path_rx_tpa_cont_cqe *cqe) +{ + int i; + + for (i = 0; cqe->len_list[i]; i++) + qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index, + le16_to_cpu(cqe->len_list[i])); + + if (unlikely(i > 1)) + DP_ERR(edev, + "Strange - TPA cont with more than a single len_list entry\n"); +} + +static void qede_tpa_end(struct qede_dev *edev, + struct qede_fastpath *fp, + struct eth_fast_path_rx_tpa_end_cqe *cqe) +{ + struct qede_rx_queue *rxq = fp->rxq; + struct qede_agg_info *tpa_info; + struct sk_buff *skb; + int i; + + tpa_info = &rxq->tpa_info[cqe->tpa_agg_index]; + skb = tpa_info->skb; + + for (i = 0; cqe->len_list[i]; i++) + qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index, + le16_to_cpu(cqe->len_list[i])); + if (unlikely(i > 1)) + DP_ERR(edev, + "Strange - TPA emd with more than a single len_list entry\n"); + + if (unlikely(tpa_info->state != QEDE_AGG_STATE_START)) + goto err; + + /* Sanity */ + if (unlikely(cqe->num_of_bds != tpa_info->frag_id + 1)) + DP_ERR(edev, + "Strange - TPA had %02x BDs, but SKB has only %d frags\n", + cqe->num_of_bds, tpa_info->frag_id); + if (unlikely(skb->len != le16_to_cpu(cqe->total_packet_len))) + DP_ERR(edev, + "Strange - total packet len [cqe] is %4x but SKB has len %04x\n", + le16_to_cpu(cqe->total_packet_len), skb->len); + + memcpy(skb->data, + page_address(tpa_info->buffer.data) + + tpa_info->start_cqe_placement_offset + + tpa_info->buffer.page_offset, tpa_info->start_cqe_bd_len); + + /* Finalize the SKB */ + skb->protocol = eth_type_trans(skb, edev->ndev); + skb->ip_summed = CHECKSUM_UNNECESSARY; + + /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count + * to skb_shinfo(skb)->gso_segs + */ + NAPI_GRO_CB(skb)->count = le16_to_cpu(cqe->num_of_coalesced_segs); + + qede_gro_receive(edev, fp, skb, tpa_info->vlan_tag); + + tpa_info->state = QEDE_AGG_STATE_NONE; + + return; +err: + tpa_info->state = QEDE_AGG_STATE_NONE; + dev_kfree_skb_any(tpa_info->skb); + tpa_info->skb = NULL; +} + +static u8 qede_check_notunn_csum(u16 flag) +{ + u16 csum_flag = 0; + u8 csum = 0; + + if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK << + PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) { + csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK << + PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT; + csum = QEDE_CSUM_UNNECESSARY; + } + + csum_flag |= PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK << + PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT; + + if (csum_flag & flag) + return QEDE_CSUM_ERROR; + + return csum; +} + +static u8 qede_check_csum(u16 flag) +{ + if (!qede_tunn_exist(flag)) + return qede_check_notunn_csum(flag); + else + return qede_check_tunn_csum(flag); +} + +static bool qede_pkt_is_ip_fragmented(struct eth_fast_path_rx_reg_cqe *cqe, + u16 flag) +{ + u8 tun_pars_flg = cqe->tunnel_pars_flags.flags; + + if ((tun_pars_flg & (ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK << + ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT)) || + (flag & (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK << + PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT))) + return true; + + return false; +} + +/* Return true iff packet is to be passed to stack */ +static bool qede_rx_xdp(struct qede_dev *edev, + struct qede_fastpath *fp, + struct qede_rx_queue *rxq, + struct bpf_prog *prog, + struct sw_rx_data *bd, + struct eth_fast_path_rx_reg_cqe *cqe) +{ + u16 len = le16_to_cpu(cqe->len_on_first_bd); + struct xdp_buff xdp; + enum xdp_action act; + + xdp.data = page_address(bd->data) + cqe->placement_offset; + xdp.data_end = xdp.data + len; + + /* Queues always have a full reset currently, so for the time + * being until there's atomic program replace just mark read + * side for map helpers. + */ + rcu_read_lock(); + act = bpf_prog_run_xdp(prog, &xdp); + rcu_read_unlock(); + + if (act == XDP_PASS) + return true; + + /* Count number of packets not to be passed to stack */ + rxq->xdp_no_pass++; + + switch (act) { + case XDP_TX: + /* We need the replacement buffer before transmit. */ + if (qede_alloc_rx_buffer(rxq, true)) { + qede_recycle_rx_bd_ring(rxq, 1); + trace_xdp_exception(edev->ndev, prog, act); + return false; + } + + /* Now if there's a transmission problem, we'd still have to + * throw current buffer, as replacement was already allocated. + */ + if (qede_xdp_xmit(edev, fp, bd, cqe->placement_offset, len)) { + dma_unmap_page(rxq->dev, bd->mapping, + PAGE_SIZE, DMA_BIDIRECTIONAL); + __free_page(bd->data); + trace_xdp_exception(edev->ndev, prog, act); + } + + /* Regardless, we've consumed an Rx BD */ + qede_rx_bd_ring_consume(rxq); + return false; + + default: + bpf_warn_invalid_xdp_action(act); + case XDP_ABORTED: + trace_xdp_exception(edev->ndev, prog, act); + case XDP_DROP: + qede_recycle_rx_bd_ring(rxq, cqe->bd_num); + } + + return false; +} + +static struct sk_buff *qede_rx_allocate_skb(struct qede_dev *edev, + struct qede_rx_queue *rxq, + struct sw_rx_data *bd, u16 len, + u16 pad) +{ + unsigned int offset = bd->page_offset; + struct skb_frag_struct *frag; + struct page *page = bd->data; + unsigned int pull_len; + struct sk_buff *skb; + unsigned char *va; + + /* Allocate a new SKB with a sufficient large header len */ + skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE); + if (unlikely(!skb)) + return NULL; + + /* Copy data into SKB - if it's small, we can simply copy it and + * re-use the already allcoated & mapped memory. + */ + if (len + pad <= edev->rx_copybreak) { + memcpy(skb_put(skb, len), + page_address(page) + pad + offset, len); + qede_reuse_page(rxq, bd); + goto out; + } + + frag = &skb_shinfo(skb)->frags[0]; + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + page, pad + offset, len, rxq->rx_buf_seg_size); + + va = skb_frag_address(frag); + pull_len = eth_get_headlen(va, QEDE_RX_HDR_SIZE); + + /* Align the pull_len to optimize memcpy */ + memcpy(skb->data, va, ALIGN(pull_len, sizeof(long))); + + /* Correct the skb & frag sizes offset after the pull */ + skb_frag_size_sub(frag, pull_len); + frag->page_offset += pull_len; + skb->data_len -= pull_len; + skb->tail += pull_len; + + if (unlikely(qede_realloc_rx_buffer(rxq, bd))) { + /* Incr page ref count to reuse on allocation failure so + * that it doesn't get freed while freeing SKB [as its + * already mapped there]. + */ + page_ref_inc(page); + dev_kfree_skb_any(skb); + return NULL; + } + +out: + /* We've consumed the first BD and prepared an SKB */ + qede_rx_bd_ring_consume(rxq); + return skb; +} + +static int qede_rx_build_jumbo(struct qede_dev *edev, + struct qede_rx_queue *rxq, + struct sk_buff *skb, + struct eth_fast_path_rx_reg_cqe *cqe, + u16 first_bd_len) +{ + u16 pkt_len = le16_to_cpu(cqe->pkt_len); + struct sw_rx_data *bd; + u16 bd_cons_idx; + u8 num_frags; + + pkt_len -= first_bd_len; + + /* We've already used one BD for the SKB. Now take care of the rest */ + for (num_frags = cqe->bd_num - 1; num_frags > 0; num_frags--) { + u16 cur_size = pkt_len > rxq->rx_buf_size ? rxq->rx_buf_size : + pkt_len; + + if (unlikely(!cur_size)) { + DP_ERR(edev, + "Still got %d BDs for mapping jumbo, but length became 0\n", + num_frags); + goto out; + } + + /* We need a replacement buffer for each BD */ + if (unlikely(qede_alloc_rx_buffer(rxq, true))) + goto out; + + /* Now that we've allocated the replacement buffer, + * we can safely consume the next BD and map it to the SKB. + */ + bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX; + bd = &rxq->sw_rx_ring[bd_cons_idx]; + qede_rx_bd_ring_consume(rxq); + + dma_unmap_page(rxq->dev, bd->mapping, + PAGE_SIZE, DMA_FROM_DEVICE); + + skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++, + bd->data, 0, cur_size); + + skb->truesize += PAGE_SIZE; + skb->data_len += cur_size; + skb->len += cur_size; + pkt_len -= cur_size; + } + + if (unlikely(pkt_len)) + DP_ERR(edev, + "Mapped all BDs of jumbo, but still have %d bytes\n", + pkt_len); + +out: + return num_frags; +} + +static int qede_rx_process_tpa_cqe(struct qede_dev *edev, + struct qede_fastpath *fp, + struct qede_rx_queue *rxq, + union eth_rx_cqe *cqe, + enum eth_rx_cqe_type type) +{ + switch (type) { + case ETH_RX_CQE_TYPE_TPA_START: + qede_tpa_start(edev, rxq, &cqe->fast_path_tpa_start); + return 0; + case ETH_RX_CQE_TYPE_TPA_CONT: + qede_tpa_cont(edev, rxq, &cqe->fast_path_tpa_cont); + return 0; + case ETH_RX_CQE_TYPE_TPA_END: + qede_tpa_end(edev, fp, &cqe->fast_path_tpa_end); + return 1; + default: + return 0; + } +} + +static int qede_rx_process_cqe(struct qede_dev *edev, + struct qede_fastpath *fp, + struct qede_rx_queue *rxq) +{ + struct bpf_prog *xdp_prog = READ_ONCE(rxq->xdp_prog); + struct eth_fast_path_rx_reg_cqe *fp_cqe; + u16 len, pad, bd_cons_idx, parse_flag; + enum eth_rx_cqe_type cqe_type; + union eth_rx_cqe *cqe; + struct sw_rx_data *bd; + struct sk_buff *skb; + __le16 flags; + u8 csum_flag; + + /* Get the CQE from the completion ring */ + cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring); + cqe_type = cqe->fast_path_regular.type; + + /* Process an unlikely slowpath event */ + if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) { + struct eth_slow_path_rx_cqe *sp_cqe; + + sp_cqe = (struct eth_slow_path_rx_cqe *)cqe; + edev->ops->eth_cqe_completion(edev->cdev, fp->id, sp_cqe); + return 0; + } + + /* Handle TPA cqes */ + if (cqe_type != ETH_RX_CQE_TYPE_REGULAR) + return qede_rx_process_tpa_cqe(edev, fp, rxq, cqe, cqe_type); + + /* Get the data from the SW ring; Consume it only after it's evident + * we wouldn't recycle it. + */ + bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX; + bd = &rxq->sw_rx_ring[bd_cons_idx]; + + fp_cqe = &cqe->fast_path_regular; + len = le16_to_cpu(fp_cqe->len_on_first_bd); + pad = fp_cqe->placement_offset; + + /* Run eBPF program if one is attached */ + if (xdp_prog) + if (!qede_rx_xdp(edev, fp, rxq, xdp_prog, bd, fp_cqe)) + return 1; + + /* If this is an error packet then drop it */ + flags = cqe->fast_path_regular.pars_flags.flags; + parse_flag = le16_to_cpu(flags); + + csum_flag = qede_check_csum(parse_flag); + if (unlikely(csum_flag == QEDE_CSUM_ERROR)) { + if (qede_pkt_is_ip_fragmented(fp_cqe, parse_flag)) { + rxq->rx_ip_frags++; + } else { + DP_NOTICE(edev, + "CQE has error, flags = %x, dropping incoming packet\n", + parse_flag); + rxq->rx_hw_errors++; + qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num); + return 0; + } + } + + /* Basic validation passed; Need to prepare an SKB. This would also + * guarantee to finally consume the first BD upon success. + */ + skb = qede_rx_allocate_skb(edev, rxq, bd, len, pad); + if (!skb) { + rxq->rx_alloc_errors++; + qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num); + return 0; + } + + /* In case of Jumbo packet, several PAGE_SIZEd buffers will be pointed + * by a single cqe. + */ + if (fp_cqe->bd_num > 1) { + u16 unmapped_frags = qede_rx_build_jumbo(edev, rxq, skb, + fp_cqe, len); + + if (unlikely(unmapped_frags > 0)) { + qede_recycle_rx_bd_ring(rxq, unmapped_frags); + dev_kfree_skb_any(skb); + return 0; + } + } + + /* The SKB contains all the data. Now prepare meta-magic */ + skb->protocol = eth_type_trans(skb, edev->ndev); + qede_get_rxhash(skb, fp_cqe->bitfields, fp_cqe->rss_hash); + qede_set_skb_csum(skb, csum_flag); + skb_record_rx_queue(skb, rxq->rxq_id); + qede_ptp_record_rx_ts(edev, cqe, skb); + + /* SKB is prepared - pass it to stack */ + qede_skb_receive(edev, fp, rxq, skb, le16_to_cpu(fp_cqe->vlan_tag)); + + return 1; +} + +static int qede_rx_int(struct qede_fastpath *fp, int budget) +{ + struct qede_rx_queue *rxq = fp->rxq; + struct qede_dev *edev = fp->edev; + u16 hw_comp_cons, sw_comp_cons; + int work_done = 0; + + hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr); + sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring); + + /* Memory barrier to prevent the CPU from doing speculative reads of CQE + * / BD in the while-loop before reading hw_comp_cons. If the CQE is + * read before it is written by FW, then FW writes CQE and SB, and then + * the CPU reads the hw_comp_cons, it will use an old CQE. + */ + rmb(); + + /* Loop to complete all indicated BDs */ + while ((sw_comp_cons != hw_comp_cons) && (work_done < budget)) { + qede_rx_process_cqe(edev, fp, rxq); + qed_chain_recycle_consumed(&rxq->rx_comp_ring); + sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring); + work_done++; + } + + /* Allocate replacement buffers */ + while (rxq->num_rx_buffers - rxq->filled_buffers) + if (qede_alloc_rx_buffer(rxq, false)) + break; + + /* Update producers */ + qede_update_rx_prod(edev, rxq); + + return work_done; +} + +static bool qede_poll_is_more_work(struct qede_fastpath *fp) +{ + qed_sb_update_sb_idx(fp->sb_info); + + /* *_has_*_work() reads the status block, thus we need to ensure that + * status block indices have been actually read (qed_sb_update_sb_idx) + * prior to this check (*_has_*_work) so that we won't write the + * "newer" value of the status block to HW (if there was a DMA right + * after qede_has_rx_work and if there is no rmb, the memory reading + * (qed_sb_update_sb_idx) may be postponed to right before *_ack_sb). + * In this case there will never be another interrupt until there is + * another update of the status block, while there is still unhandled + * work. + */ + rmb(); + + if (likely(fp->type & QEDE_FASTPATH_RX)) + if (qede_has_rx_work(fp->rxq)) + return true; + + if (fp->type & QEDE_FASTPATH_XDP) + if (qede_txq_has_work(fp->xdp_tx)) + return true; + + if (likely(fp->type & QEDE_FASTPATH_TX)) + if (qede_txq_has_work(fp->txq)) + return true; + + return false; +} + +/********************* + * NDO & API related * + *********************/ +int qede_poll(struct napi_struct *napi, int budget) +{ + struct qede_fastpath *fp = container_of(napi, struct qede_fastpath, + napi); + struct qede_dev *edev = fp->edev; + int rx_work_done = 0; + + if (likely(fp->type & QEDE_FASTPATH_TX) && qede_txq_has_work(fp->txq)) + qede_tx_int(edev, fp->txq); + + if ((fp->type & QEDE_FASTPATH_XDP) && qede_txq_has_work(fp->xdp_tx)) + qede_xdp_tx_int(edev, fp->xdp_tx); + + rx_work_done = (likely(fp->type & QEDE_FASTPATH_RX) && + qede_has_rx_work(fp->rxq)) ? + qede_rx_int(fp, budget) : 0; + if (rx_work_done < budget) { + if (!qede_poll_is_more_work(fp)) { + napi_complete_done(napi, rx_work_done); + + /* Update and reenable interrupts */ + qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1); + } else { + rx_work_done = budget; + } + } + + if (fp->xdp_xmit) { + u16 xdp_prod = qed_chain_get_prod_idx(&fp->xdp_tx->tx_pbl); + + fp->xdp_xmit = 0; + fp->xdp_tx->tx_db.data.bd_prod = cpu_to_le16(xdp_prod); + qede_update_tx_producer(fp->xdp_tx); + } + + return rx_work_done; +} + +irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie) +{ + struct qede_fastpath *fp = fp_cookie; + + qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0 /*do not update*/); + + napi_schedule_irqoff(&fp->napi); + return IRQ_HANDLED; +} + +/* Main transmit function */ +netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct qede_dev *edev = netdev_priv(ndev); + struct netdev_queue *netdev_txq; + struct qede_tx_queue *txq; + struct eth_tx_1st_bd *first_bd; + struct eth_tx_2nd_bd *second_bd = NULL; + struct eth_tx_3rd_bd *third_bd = NULL; + struct eth_tx_bd *tx_data_bd = NULL; + u16 txq_index; + u8 nbd = 0; + dma_addr_t mapping; + int rc, frag_idx = 0, ipv6_ext = 0; + u8 xmit_type; + u16 idx; + u16 hlen; + bool data_split = false; + + /* Get tx-queue context and netdev index */ + txq_index = skb_get_queue_mapping(skb); + WARN_ON(txq_index >= QEDE_TSS_COUNT(edev)); + txq = edev->fp_array[edev->fp_num_rx + txq_index].txq; + netdev_txq = netdev_get_tx_queue(ndev, txq_index); + + WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1)); + + xmit_type = qede_xmit_type(skb, &ipv6_ext); + +#if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET) + if (qede_pkt_req_lin(skb, xmit_type)) { + if (skb_linearize(skb)) { + DP_NOTICE(edev, + "SKB linearization failed - silently dropping this SKB\n"); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + } +#endif + + /* Fill the entry in the SW ring and the BDs in the FW ring */ + idx = txq->sw_tx_prod & NUM_TX_BDS_MAX; + txq->sw_tx_ring.skbs[idx].skb = skb; + first_bd = (struct eth_tx_1st_bd *) + qed_chain_produce(&txq->tx_pbl); + memset(first_bd, 0, sizeof(*first_bd)); + first_bd->data.bd_flags.bitfields = + 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT; + + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) + qede_ptp_tx_ts(edev, skb); + + /* Map skb linear data for DMA and set in the first BD */ + mapping = dma_map_single(txq->dev, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(txq->dev, mapping))) { + DP_NOTICE(edev, "SKB mapping failed\n"); + qede_free_failed_tx_pkt(txq, first_bd, 0, false); + qede_update_tx_producer(txq); + return NETDEV_TX_OK; + } + nbd++; + BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb)); + + /* In case there is IPv6 with extension headers or LSO we need 2nd and + * 3rd BDs. + */ + if (unlikely((xmit_type & XMIT_LSO) | ipv6_ext)) { + second_bd = (struct eth_tx_2nd_bd *) + qed_chain_produce(&txq->tx_pbl); + memset(second_bd, 0, sizeof(*second_bd)); + + nbd++; + third_bd = (struct eth_tx_3rd_bd *) + qed_chain_produce(&txq->tx_pbl); + memset(third_bd, 0, sizeof(*third_bd)); + + nbd++; + /* We need to fill in additional data in second_bd... */ + tx_data_bd = (struct eth_tx_bd *)second_bd; + } + + if (skb_vlan_tag_present(skb)) { + first_bd->data.vlan = cpu_to_le16(skb_vlan_tag_get(skb)); + first_bd->data.bd_flags.bitfields |= + 1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT; + } + + /* Fill the parsing flags & params according to the requested offload */ + if (xmit_type & XMIT_L4_CSUM) { + /* We don't re-calculate IP checksum as it is already done by + * the upper stack + */ + first_bd->data.bd_flags.bitfields |= + 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT; + + if (xmit_type & XMIT_ENC) { + first_bd->data.bd_flags.bitfields |= + 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT; + first_bd->data.bitfields |= + 1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT; + } + + /* Legacy FW had flipped behavior in regard to this bit - + * I.e., needed to set to prevent FW from touching encapsulated + * packets when it didn't need to. + */ + if (unlikely(txq->is_legacy)) + first_bd->data.bitfields ^= + 1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT; + + /* If the packet is IPv6 with extension header, indicate that + * to FW and pass few params, since the device cracker doesn't + * support parsing IPv6 with extension header/s. + */ + if (unlikely(ipv6_ext)) + qede_set_params_for_ipv6_ext(skb, second_bd, third_bd); + } + + if (xmit_type & XMIT_LSO) { + first_bd->data.bd_flags.bitfields |= + (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT); + third_bd->data.lso_mss = + cpu_to_le16(skb_shinfo(skb)->gso_size); + + if (unlikely(xmit_type & XMIT_ENC)) { + first_bd->data.bd_flags.bitfields |= + 1 << ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT; + + if (xmit_type & XMIT_ENC_GSO_L4_CSUM) { + u8 tmp = ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT; + + first_bd->data.bd_flags.bitfields |= 1 << tmp; + } + hlen = qede_get_skb_hlen(skb, true); + } else { + first_bd->data.bd_flags.bitfields |= + 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT; + hlen = qede_get_skb_hlen(skb, false); + } + + /* @@@TBD - if will not be removed need to check */ + third_bd->data.bitfields |= + cpu_to_le16(1 << ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT); + + /* Make life easier for FW guys who can't deal with header and + * data on same BD. If we need to split, use the second bd... + */ + if (unlikely(skb_headlen(skb) > hlen)) { + DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED, + "TSO split header size is %d (%x:%x)\n", + first_bd->nbytes, first_bd->addr.hi, + first_bd->addr.lo); + + mapping = HILO_U64(le32_to_cpu(first_bd->addr.hi), + le32_to_cpu(first_bd->addr.lo)) + + hlen; + + BD_SET_UNMAP_ADDR_LEN(tx_data_bd, mapping, + le16_to_cpu(first_bd->nbytes) - + hlen); + + /* this marks the BD as one that has no + * individual mapping + */ + txq->sw_tx_ring.skbs[idx].flags |= QEDE_TSO_SPLIT_BD; + + first_bd->nbytes = cpu_to_le16(hlen); + + tx_data_bd = (struct eth_tx_bd *)third_bd; + data_split = true; + } + } else { + first_bd->data.bitfields |= + (skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) << + ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT; + } + + /* Handle fragmented skb */ + /* special handle for frags inside 2nd and 3rd bds.. */ + while (tx_data_bd && frag_idx < skb_shinfo(skb)->nr_frags) { + rc = map_frag_to_bd(txq, + &skb_shinfo(skb)->frags[frag_idx], + tx_data_bd); + if (rc) { + qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split); + qede_update_tx_producer(txq); + return NETDEV_TX_OK; + } + + if (tx_data_bd == (struct eth_tx_bd *)second_bd) + tx_data_bd = (struct eth_tx_bd *)third_bd; + else + tx_data_bd = NULL; + + frag_idx++; + } + + /* map last frags into 4th, 5th .... */ + for (; frag_idx < skb_shinfo(skb)->nr_frags; frag_idx++, nbd++) { + tx_data_bd = (struct eth_tx_bd *) + qed_chain_produce(&txq->tx_pbl); + + memset(tx_data_bd, 0, sizeof(*tx_data_bd)); + + rc = map_frag_to_bd(txq, + &skb_shinfo(skb)->frags[frag_idx], + tx_data_bd); + if (rc) { + qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split); + qede_update_tx_producer(txq); + return NETDEV_TX_OK; + } + } + + /* update the first BD with the actual num BDs */ + first_bd->data.nbds = nbd; + + netdev_tx_sent_queue(netdev_txq, skb->len); + + skb_tx_timestamp(skb); + + /* Advance packet producer only before sending the packet since mapping + * of pages may fail. + */ + txq->sw_tx_prod++; + + /* 'next page' entries are counted in the producer value */ + txq->tx_db.data.bd_prod = + cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl)); + + if (!skb->xmit_more || netif_xmit_stopped(netdev_txq)) + qede_update_tx_producer(txq); + + if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl) + < (MAX_SKB_FRAGS + 1))) { + if (skb->xmit_more) + qede_update_tx_producer(txq); + + netif_tx_stop_queue(netdev_txq); + txq->stopped_cnt++; + DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED, + "Stop queue was called\n"); + /* paired memory barrier is in qede_tx_int(), we have to keep + * ordering of set_bit() in netif_tx_stop_queue() and read of + * fp->bd_tx_cons + */ + smp_mb(); + + if ((qed_chain_get_elem_left(&txq->tx_pbl) >= + (MAX_SKB_FRAGS + 1)) && + (edev->state == QEDE_STATE_OPEN)) { + netif_tx_wake_queue(netdev_txq); + DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED, + "Wake queue was called\n"); + } + } + + return NETDEV_TX_OK; +} + +/* 8B udp header + 8B base tunnel header + 32B option length */ +#define QEDE_MAX_TUN_HDR_LEN 48 + +netdev_features_t qede_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + if (skb->encapsulation) { + u8 l4_proto = 0; + + switch (vlan_get_protocol(skb)) { + case htons(ETH_P_IP): + l4_proto = ip_hdr(skb)->protocol; + break; + case htons(ETH_P_IPV6): + l4_proto = ipv6_hdr(skb)->nexthdr; + break; + default: + return features; + } + + /* Disable offloads for geneve tunnels, as HW can't parse + * the geneve header which has option length greater than 32B. + */ + if ((l4_proto == IPPROTO_UDP) && + ((skb_inner_mac_header(skb) - + skb_transport_header(skb)) > QEDE_MAX_TUN_HDR_LEN)) + return features & ~(NETIF_F_CSUM_MASK | + NETIF_F_GSO_MASK); + } + + return features; +} diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index aecdd1c5c0ea..3a78c3f25157 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -1,11 +1,34 @@ /* QLogic qede NIC Driver -* Copyright (c) 2015 QLogic Corporation -* -* This software is available under the terms of the GNU General Public License -* (GPL) Version 2, available from the file COPYING in the main directory of -* this source tree. -*/ - + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ #include <linux/module.h> #include <linux/pci.h> #include <linux/version.h> @@ -36,8 +59,10 @@ #include <linux/random.h> #include <net/ip6_checksum.h> #include <linux/bitops.h> +#include <linux/vmalloc.h> #include <linux/qed/qede_roce.h> #include "qede.h" +#include "qede_ptp.h" static char version[] = "QLogic FastLinQ 4xxxx Ethernet Driver qede " DRV_MODULE_VERSION "\n"; @@ -154,8 +179,12 @@ static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param) { struct qede_dev *edev = netdev_priv(pci_get_drvdata(pdev)); struct qed_dev_info *qed_info = &edev->dev_info.common; + struct qed_update_vport_params *vport_params; int rc; + vport_params = vzalloc(sizeof(*vport_params)); + if (!vport_params) + return -ENOMEM; DP_VERBOSE(edev, QED_MSG_IOV, "Requested %d VFs\n", num_vfs_param); rc = edev->ops->iov->configure(edev->cdev, num_vfs_param); @@ -163,15 +192,13 @@ static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param) /* Enable/Disable Tx switching for PF */ if ((rc == num_vfs_param) && netif_running(edev->ndev) && qed_info->mf_mode != QED_MF_NPAR && qed_info->tx_switching) { - struct qed_update_vport_params params; - - memset(¶ms, 0, sizeof(params)); - params.vport_id = 0; - params.update_tx_switching_flg = 1; - params.tx_switching_flg = num_vfs_param ? 1 : 0; - edev->ops->vport_update(edev->cdev, ¶ms); + vport_params->vport_id = 0; + vport_params->update_tx_switching_flg = 1; + vport_params->tx_switching_flg = num_vfs_param ? 1 : 0; + edev->ops->vport_update(edev->cdev, vport_params); } + vfree(vport_params); return rc; } #endif @@ -187,18 +214,6 @@ static struct pci_driver qede_pci_driver = { #endif }; -static void qede_force_mac(void *dev, u8 *mac, bool forced) -{ - struct qede_dev *edev = dev; - - /* MAC hints take effect only if we haven't set one already */ - if (is_valid_ether_addr(edev->ndev->dev_addr) && !forced) - return; - - ether_addr_copy(edev->ndev->dev_addr, mac); - ether_addr_copy(edev->primary_mac, mac); -} - static struct qed_eth_cb_ops qede_ll_ops = { { .link_update = qede_link_update, @@ -294,1643 +309,8 @@ static void __exit qede_cleanup(void) module_init(qede_init); module_exit(qede_cleanup); -/* ------------------------------------------------------------------------- - * START OF FAST-PATH - * ------------------------------------------------------------------------- - */ - -/* Unmap the data and free skb */ -static int qede_free_tx_pkt(struct qede_dev *edev, - struct qede_tx_queue *txq, int *len) -{ - u16 idx = txq->sw_tx_cons & NUM_TX_BDS_MAX; - struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb; - struct eth_tx_1st_bd *first_bd; - struct eth_tx_bd *tx_data_bd; - int bds_consumed = 0; - int nbds; - bool data_split = txq->sw_tx_ring.skbs[idx].flags & QEDE_TSO_SPLIT_BD; - int i, split_bd_len = 0; - - if (unlikely(!skb)) { - DP_ERR(edev, - "skb is null for txq idx=%d txq->sw_tx_cons=%d txq->sw_tx_prod=%d\n", - idx, txq->sw_tx_cons, txq->sw_tx_prod); - return -1; - } - - *len = skb->len; - - first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl); - - bds_consumed++; - - nbds = first_bd->data.nbds; - - if (data_split) { - struct eth_tx_bd *split = (struct eth_tx_bd *) - qed_chain_consume(&txq->tx_pbl); - split_bd_len = BD_UNMAP_LEN(split); - bds_consumed++; - } - dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd), - BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE); - - /* Unmap the data of the skb frags */ - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) { - tx_data_bd = (struct eth_tx_bd *) - qed_chain_consume(&txq->tx_pbl); - dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(tx_data_bd), - BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE); - } - - while (bds_consumed++ < nbds) - qed_chain_consume(&txq->tx_pbl); - - /* Free skb */ - dev_kfree_skb_any(skb); - txq->sw_tx_ring.skbs[idx].skb = NULL; - txq->sw_tx_ring.skbs[idx].flags = 0; - - return 0; -} - -/* Unmap the data and free skb when mapping failed during start_xmit */ -static void qede_free_failed_tx_pkt(struct qede_tx_queue *txq, - struct eth_tx_1st_bd *first_bd, - int nbd, bool data_split) -{ - u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX; - struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb; - struct eth_tx_bd *tx_data_bd; - int i, split_bd_len = 0; - - /* Return prod to its position before this skb was handled */ - qed_chain_set_prod(&txq->tx_pbl, - le16_to_cpu(txq->tx_db.data.bd_prod), first_bd); - - first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl); - - if (data_split) { - struct eth_tx_bd *split = (struct eth_tx_bd *) - qed_chain_produce(&txq->tx_pbl); - split_bd_len = BD_UNMAP_LEN(split); - nbd--; - } - - dma_unmap_single(txq->dev, BD_UNMAP_ADDR(first_bd), - BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE); - - /* Unmap the data of the skb frags */ - for (i = 0; i < nbd; i++) { - tx_data_bd = (struct eth_tx_bd *) - qed_chain_produce(&txq->tx_pbl); - if (tx_data_bd->nbytes) - dma_unmap_page(txq->dev, - BD_UNMAP_ADDR(tx_data_bd), - BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE); - } - - /* Return again prod to its position before this skb was handled */ - qed_chain_set_prod(&txq->tx_pbl, - le16_to_cpu(txq->tx_db.data.bd_prod), first_bd); - - /* Free skb */ - dev_kfree_skb_any(skb); - txq->sw_tx_ring.skbs[idx].skb = NULL; - txq->sw_tx_ring.skbs[idx].flags = 0; -} - -static u32 qede_xmit_type(struct sk_buff *skb, int *ipv6_ext) -{ - u32 rc = XMIT_L4_CSUM; - __be16 l3_proto; - - if (skb->ip_summed != CHECKSUM_PARTIAL) - return XMIT_PLAIN; - - l3_proto = vlan_get_protocol(skb); - if (l3_proto == htons(ETH_P_IPV6) && - (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6)) - *ipv6_ext = 1; - - if (skb->encapsulation) { - rc |= XMIT_ENC; - if (skb_is_gso(skb)) { - unsigned short gso_type = skb_shinfo(skb)->gso_type; - - if ((gso_type & SKB_GSO_UDP_TUNNEL_CSUM) || - (gso_type & SKB_GSO_GRE_CSUM)) - rc |= XMIT_ENC_GSO_L4_CSUM; - - rc |= XMIT_LSO; - return rc; - } - } - - if (skb_is_gso(skb)) - rc |= XMIT_LSO; - - return rc; -} - -static void qede_set_params_for_ipv6_ext(struct sk_buff *skb, - struct eth_tx_2nd_bd *second_bd, - struct eth_tx_3rd_bd *third_bd) -{ - u8 l4_proto; - u16 bd2_bits1 = 0, bd2_bits2 = 0; - - bd2_bits1 |= (1 << ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT); - - bd2_bits2 |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) & - ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK) - << ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT; - - bd2_bits1 |= (ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH << - ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT); - - if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) - l4_proto = ipv6_hdr(skb)->nexthdr; - else - l4_proto = ip_hdr(skb)->protocol; - - if (l4_proto == IPPROTO_UDP) - bd2_bits1 |= 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT; - - if (third_bd) - third_bd->data.bitfields |= - cpu_to_le16(((tcp_hdrlen(skb) / 4) & - ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK) << - ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT); - - second_bd->data.bitfields1 = cpu_to_le16(bd2_bits1); - second_bd->data.bitfields2 = cpu_to_le16(bd2_bits2); -} - -static int map_frag_to_bd(struct qede_tx_queue *txq, - skb_frag_t *frag, struct eth_tx_bd *bd) -{ - dma_addr_t mapping; - - /* Map skb non-linear frag data for DMA */ - mapping = skb_frag_dma_map(txq->dev, frag, 0, - skb_frag_size(frag), DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(txq->dev, mapping))) - return -ENOMEM; - - /* Setup the data pointer of the frag data */ - BD_SET_UNMAP_ADDR_LEN(bd, mapping, skb_frag_size(frag)); - - return 0; -} - -static u16 qede_get_skb_hlen(struct sk_buff *skb, bool is_encap_pkt) -{ - if (is_encap_pkt) - return (skb_inner_transport_header(skb) + - inner_tcp_hdrlen(skb) - skb->data); - else - return (skb_transport_header(skb) + - tcp_hdrlen(skb) - skb->data); -} - -/* +2 for 1st BD for headers and 2nd BD for headlen (if required) */ -#if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET) -static bool qede_pkt_req_lin(struct sk_buff *skb, u8 xmit_type) -{ - int allowed_frags = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET - 1; - - if (xmit_type & XMIT_LSO) { - int hlen; - - hlen = qede_get_skb_hlen(skb, xmit_type & XMIT_ENC); - - /* linear payload would require its own BD */ - if (skb_headlen(skb) > hlen) - allowed_frags--; - } - - return (skb_shinfo(skb)->nr_frags > allowed_frags); -} -#endif - -static inline void qede_update_tx_producer(struct qede_tx_queue *txq) -{ - /* wmb makes sure that the BDs data is updated before updating the - * producer, otherwise FW may read old data from the BDs. - */ - wmb(); - barrier(); - writel(txq->tx_db.raw, txq->doorbell_addr); - - /* mmiowb is needed to synchronize doorbell writes from more than one - * processor. It guarantees that the write arrives to the device before - * the queue lock is released and another start_xmit is called (possibly - * on another CPU). Without this barrier, the next doorbell can bypass - * this doorbell. This is applicable to IA64/Altix systems. - */ - mmiowb(); -} - -static int qede_xdp_xmit(struct qede_dev *edev, struct qede_fastpath *fp, - struct sw_rx_data *metadata, u16 padding, u16 length) -{ - struct qede_tx_queue *txq = fp->xdp_tx; - u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX; - struct eth_tx_1st_bd *first_bd; - - if (!qed_chain_get_elem_left(&txq->tx_pbl)) { - txq->stopped_cnt++; - return -ENOMEM; - } - - first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl); - - memset(first_bd, 0, sizeof(*first_bd)); - first_bd->data.bd_flags.bitfields = - BIT(ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT); - first_bd->data.bitfields |= - (length & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) << - ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT; - first_bd->data.nbds = 1; - - /* We can safely ignore the offset, as it's 0 for XDP */ - BD_SET_UNMAP_ADDR_LEN(first_bd, metadata->mapping + padding, length); - - /* Synchronize the buffer back to device, as program [probably] - * has changed it. - */ - dma_sync_single_for_device(&edev->pdev->dev, - metadata->mapping + padding, - length, PCI_DMA_TODEVICE); - - txq->sw_tx_ring.pages[idx] = metadata->data; - txq->sw_tx_prod++; - - /* Mark the fastpath for future XDP doorbell */ - fp->xdp_xmit = 1; - - return 0; -} - -/* Main transmit function */ -static netdev_tx_t qede_start_xmit(struct sk_buff *skb, - struct net_device *ndev) -{ - struct qede_dev *edev = netdev_priv(ndev); - struct netdev_queue *netdev_txq; - struct qede_tx_queue *txq; - struct eth_tx_1st_bd *first_bd; - struct eth_tx_2nd_bd *second_bd = NULL; - struct eth_tx_3rd_bd *third_bd = NULL; - struct eth_tx_bd *tx_data_bd = NULL; - u16 txq_index; - u8 nbd = 0; - dma_addr_t mapping; - int rc, frag_idx = 0, ipv6_ext = 0; - u8 xmit_type; - u16 idx; - u16 hlen; - bool data_split = false; - - /* Get tx-queue context and netdev index */ - txq_index = skb_get_queue_mapping(skb); - WARN_ON(txq_index >= QEDE_TSS_COUNT(edev)); - txq = edev->fp_array[edev->fp_num_rx + txq_index].txq; - netdev_txq = netdev_get_tx_queue(ndev, txq_index); - - WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1)); - - xmit_type = qede_xmit_type(skb, &ipv6_ext); - -#if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET) - if (qede_pkt_req_lin(skb, xmit_type)) { - if (skb_linearize(skb)) { - DP_NOTICE(edev, - "SKB linearization failed - silently dropping this SKB\n"); - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; - } - } -#endif - - /* Fill the entry in the SW ring and the BDs in the FW ring */ - idx = txq->sw_tx_prod & NUM_TX_BDS_MAX; - txq->sw_tx_ring.skbs[idx].skb = skb; - first_bd = (struct eth_tx_1st_bd *) - qed_chain_produce(&txq->tx_pbl); - memset(first_bd, 0, sizeof(*first_bd)); - first_bd->data.bd_flags.bitfields = - 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT; - - /* Map skb linear data for DMA and set in the first BD */ - mapping = dma_map_single(txq->dev, skb->data, - skb_headlen(skb), DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(txq->dev, mapping))) { - DP_NOTICE(edev, "SKB mapping failed\n"); - qede_free_failed_tx_pkt(txq, first_bd, 0, false); - qede_update_tx_producer(txq); - return NETDEV_TX_OK; - } - nbd++; - BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb)); - - /* In case there is IPv6 with extension headers or LSO we need 2nd and - * 3rd BDs. - */ - if (unlikely((xmit_type & XMIT_LSO) | ipv6_ext)) { - second_bd = (struct eth_tx_2nd_bd *) - qed_chain_produce(&txq->tx_pbl); - memset(second_bd, 0, sizeof(*second_bd)); - - nbd++; - third_bd = (struct eth_tx_3rd_bd *) - qed_chain_produce(&txq->tx_pbl); - memset(third_bd, 0, sizeof(*third_bd)); - - nbd++; - /* We need to fill in additional data in second_bd... */ - tx_data_bd = (struct eth_tx_bd *)second_bd; - } - - if (skb_vlan_tag_present(skb)) { - first_bd->data.vlan = cpu_to_le16(skb_vlan_tag_get(skb)); - first_bd->data.bd_flags.bitfields |= - 1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT; - } - - /* Fill the parsing flags & params according to the requested offload */ - if (xmit_type & XMIT_L4_CSUM) { - /* We don't re-calculate IP checksum as it is already done by - * the upper stack - */ - first_bd->data.bd_flags.bitfields |= - 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT; - - if (xmit_type & XMIT_ENC) { - first_bd->data.bd_flags.bitfields |= - 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT; - first_bd->data.bitfields |= - 1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT; - } - - /* Legacy FW had flipped behavior in regard to this bit - - * I.e., needed to set to prevent FW from touching encapsulated - * packets when it didn't need to. - */ - if (unlikely(txq->is_legacy)) - first_bd->data.bitfields ^= - 1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT; - - /* If the packet is IPv6 with extension header, indicate that - * to FW and pass few params, since the device cracker doesn't - * support parsing IPv6 with extension header/s. - */ - if (unlikely(ipv6_ext)) - qede_set_params_for_ipv6_ext(skb, second_bd, third_bd); - } - - if (xmit_type & XMIT_LSO) { - first_bd->data.bd_flags.bitfields |= - (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT); - third_bd->data.lso_mss = - cpu_to_le16(skb_shinfo(skb)->gso_size); - - if (unlikely(xmit_type & XMIT_ENC)) { - first_bd->data.bd_flags.bitfields |= - 1 << ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT; - - if (xmit_type & XMIT_ENC_GSO_L4_CSUM) { - u8 tmp = ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT; - - first_bd->data.bd_flags.bitfields |= 1 << tmp; - } - hlen = qede_get_skb_hlen(skb, true); - } else { - first_bd->data.bd_flags.bitfields |= - 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT; - hlen = qede_get_skb_hlen(skb, false); - } - - /* @@@TBD - if will not be removed need to check */ - third_bd->data.bitfields |= - cpu_to_le16((1 << ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT)); - - /* Make life easier for FW guys who can't deal with header and - * data on same BD. If we need to split, use the second bd... - */ - if (unlikely(skb_headlen(skb) > hlen)) { - DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED, - "TSO split header size is %d (%x:%x)\n", - first_bd->nbytes, first_bd->addr.hi, - first_bd->addr.lo); - - mapping = HILO_U64(le32_to_cpu(first_bd->addr.hi), - le32_to_cpu(first_bd->addr.lo)) + - hlen; - - BD_SET_UNMAP_ADDR_LEN(tx_data_bd, mapping, - le16_to_cpu(first_bd->nbytes) - - hlen); - - /* this marks the BD as one that has no - * individual mapping - */ - txq->sw_tx_ring.skbs[idx].flags |= QEDE_TSO_SPLIT_BD; - - first_bd->nbytes = cpu_to_le16(hlen); - - tx_data_bd = (struct eth_tx_bd *)third_bd; - data_split = true; - } - } else { - first_bd->data.bitfields |= - (skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) << - ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT; - } - - /* Handle fragmented skb */ - /* special handle for frags inside 2nd and 3rd bds.. */ - while (tx_data_bd && frag_idx < skb_shinfo(skb)->nr_frags) { - rc = map_frag_to_bd(txq, - &skb_shinfo(skb)->frags[frag_idx], - tx_data_bd); - if (rc) { - qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split); - qede_update_tx_producer(txq); - return NETDEV_TX_OK; - } - - if (tx_data_bd == (struct eth_tx_bd *)second_bd) - tx_data_bd = (struct eth_tx_bd *)third_bd; - else - tx_data_bd = NULL; - - frag_idx++; - } - - /* map last frags into 4th, 5th .... */ - for (; frag_idx < skb_shinfo(skb)->nr_frags; frag_idx++, nbd++) { - tx_data_bd = (struct eth_tx_bd *) - qed_chain_produce(&txq->tx_pbl); - - memset(tx_data_bd, 0, sizeof(*tx_data_bd)); - - rc = map_frag_to_bd(txq, - &skb_shinfo(skb)->frags[frag_idx], - tx_data_bd); - if (rc) { - qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split); - qede_update_tx_producer(txq); - return NETDEV_TX_OK; - } - } - - /* update the first BD with the actual num BDs */ - first_bd->data.nbds = nbd; - - netdev_tx_sent_queue(netdev_txq, skb->len); - - skb_tx_timestamp(skb); - - /* Advance packet producer only before sending the packet since mapping - * of pages may fail. - */ - txq->sw_tx_prod++; - - /* 'next page' entries are counted in the producer value */ - txq->tx_db.data.bd_prod = - cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl)); - - if (!skb->xmit_more || netif_xmit_stopped(netdev_txq)) - qede_update_tx_producer(txq); - - if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl) - < (MAX_SKB_FRAGS + 1))) { - if (skb->xmit_more) - qede_update_tx_producer(txq); - - netif_tx_stop_queue(netdev_txq); - txq->stopped_cnt++; - DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED, - "Stop queue was called\n"); - /* paired memory barrier is in qede_tx_int(), we have to keep - * ordering of set_bit() in netif_tx_stop_queue() and read of - * fp->bd_tx_cons - */ - smp_mb(); - - if (qed_chain_get_elem_left(&txq->tx_pbl) - >= (MAX_SKB_FRAGS + 1) && - (edev->state == QEDE_STATE_OPEN)) { - netif_tx_wake_queue(netdev_txq); - DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED, - "Wake queue was called\n"); - } - } - - return NETDEV_TX_OK; -} - -int qede_txq_has_work(struct qede_tx_queue *txq) -{ - u16 hw_bd_cons; - - /* Tell compiler that consumer and producer can change */ - barrier(); - hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr); - if (qed_chain_get_cons_idx(&txq->tx_pbl) == hw_bd_cons + 1) - return 0; - - return hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl); -} - -static void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq) -{ - struct eth_tx_1st_bd *bd; - u16 hw_bd_cons; - - hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr); - barrier(); - - while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) { - bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl); - - dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(bd), - PAGE_SIZE, DMA_BIDIRECTIONAL); - __free_page(txq->sw_tx_ring.pages[txq->sw_tx_cons & - NUM_TX_BDS_MAX]); - - txq->sw_tx_cons++; - txq->xmit_pkts++; - } -} - -static int qede_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq) -{ - struct netdev_queue *netdev_txq; - u16 hw_bd_cons; - unsigned int pkts_compl = 0, bytes_compl = 0; - int rc; - - netdev_txq = netdev_get_tx_queue(edev->ndev, txq->index); - - hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr); - barrier(); - - while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) { - int len = 0; - - rc = qede_free_tx_pkt(edev, txq, &len); - if (rc) { - DP_NOTICE(edev, "hw_bd_cons = %d, chain_cons=%d\n", - hw_bd_cons, - qed_chain_get_cons_idx(&txq->tx_pbl)); - break; - } - - bytes_compl += len; - pkts_compl++; - txq->sw_tx_cons++; - txq->xmit_pkts++; - } - - netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl); - - /* Need to make the tx_bd_cons update visible to start_xmit() - * before checking for netif_tx_queue_stopped(). Without the - * memory barrier, there is a small possibility that - * start_xmit() will miss it and cause the queue to be stopped - * forever. - * On the other hand we need an rmb() here to ensure the proper - * ordering of bit testing in the following - * netif_tx_queue_stopped(txq) call. - */ - smp_mb(); - - if (unlikely(netif_tx_queue_stopped(netdev_txq))) { - /* Taking tx_lock is needed to prevent reenabling the queue - * while it's empty. This could have happen if rx_action() gets - * suspended in qede_tx_int() after the condition before - * netif_tx_wake_queue(), while tx_action (qede_start_xmit()): - * - * stops the queue->sees fresh tx_bd_cons->releases the queue-> - * sends some packets consuming the whole queue again-> - * stops the queue - */ - - __netif_tx_lock(netdev_txq, smp_processor_id()); - - if ((netif_tx_queue_stopped(netdev_txq)) && - (edev->state == QEDE_STATE_OPEN) && - (qed_chain_get_elem_left(&txq->tx_pbl) - >= (MAX_SKB_FRAGS + 1))) { - netif_tx_wake_queue(netdev_txq); - DP_VERBOSE(edev, NETIF_MSG_TX_DONE, - "Wake queue was called\n"); - } - - __netif_tx_unlock(netdev_txq); - } - - return 0; -} - -bool qede_has_rx_work(struct qede_rx_queue *rxq) -{ - u16 hw_comp_cons, sw_comp_cons; - - /* Tell compiler that status block fields can change */ - barrier(); - - hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr); - sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring); - - return hw_comp_cons != sw_comp_cons; -} - -static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq) -{ - qed_chain_consume(&rxq->rx_bd_ring); - rxq->sw_rx_cons++; -} - -/* This function reuses the buffer(from an offset) from - * consumer index to producer index in the bd ring - */ -static inline void qede_reuse_page(struct qede_rx_queue *rxq, - struct sw_rx_data *curr_cons) -{ - struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring); - struct sw_rx_data *curr_prod; - dma_addr_t new_mapping; - - curr_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX]; - *curr_prod = *curr_cons; - - new_mapping = curr_prod->mapping + curr_prod->page_offset; - - rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(new_mapping)); - rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(new_mapping)); - - rxq->sw_rx_prod++; - curr_cons->data = NULL; -} - -/* In case of allocation failures reuse buffers - * from consumer index to produce buffers for firmware - */ -void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, u8 count) -{ - struct sw_rx_data *curr_cons; - - for (; count > 0; count--) { - curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX]; - qede_reuse_page(rxq, curr_cons); - qede_rx_bd_ring_consume(rxq); - } -} - -static int qede_alloc_rx_buffer(struct qede_rx_queue *rxq) -{ - struct sw_rx_data *sw_rx_data; - struct eth_rx_bd *rx_bd; - dma_addr_t mapping; - struct page *data; - - data = alloc_pages(GFP_ATOMIC, 0); - if (unlikely(!data)) - return -ENOMEM; - - /* Map the entire page as it would be used - * for multiple RX buffer segment size mapping. - */ - mapping = dma_map_page(rxq->dev, data, 0, - PAGE_SIZE, rxq->data_direction); - if (unlikely(dma_mapping_error(rxq->dev, mapping))) { - __free_page(data); - return -ENOMEM; - } - - sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX]; - sw_rx_data->page_offset = 0; - sw_rx_data->data = data; - sw_rx_data->mapping = mapping; - - /* Advance PROD and get BD pointer */ - rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring); - WARN_ON(!rx_bd); - rx_bd->addr.hi = cpu_to_le32(upper_32_bits(mapping)); - rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping)); - - rxq->sw_rx_prod++; - - return 0; -} - -static inline int qede_realloc_rx_buffer(struct qede_rx_queue *rxq, - struct sw_rx_data *curr_cons) -{ - /* Move to the next segment in the page */ - curr_cons->page_offset += rxq->rx_buf_seg_size; - - if (curr_cons->page_offset == PAGE_SIZE) { - if (unlikely(qede_alloc_rx_buffer(rxq))) { - /* Since we failed to allocate new buffer - * current buffer can be used again. - */ - curr_cons->page_offset -= rxq->rx_buf_seg_size; - - return -ENOMEM; - } - - dma_unmap_page(rxq->dev, curr_cons->mapping, - PAGE_SIZE, rxq->data_direction); - } else { - /* Increment refcount of the page as we don't want - * network stack to take the ownership of the page - * which can be recycled multiple times by the driver. - */ - page_ref_inc(curr_cons->data); - qede_reuse_page(rxq, curr_cons); - } - - return 0; -} - -void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq) -{ - u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring); - u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring); - struct eth_rx_prod_data rx_prods = {0}; - - /* Update producers */ - rx_prods.bd_prod = cpu_to_le16(bd_prod); - rx_prods.cqe_prod = cpu_to_le16(cqe_prod); - - /* Make sure that the BD and SGE data is updated before updating the - * producers since FW might read the BD/SGE right after the producer - * is updated. - */ - wmb(); - - internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods), - (u32 *)&rx_prods); - - /* mmiowb is needed to synchronize doorbell writes from more than one - * processor. It guarantees that the write arrives to the device before - * the napi lock is released and another qede_poll is called (possibly - * on another CPU). Without this barrier, the next doorbell can bypass - * this doorbell. This is applicable to IA64/Altix systems. - */ - mmiowb(); -} - -static void qede_get_rxhash(struct sk_buff *skb, u8 bitfields, __le32 rss_hash) -{ - enum pkt_hash_types hash_type = PKT_HASH_TYPE_NONE; - enum rss_hash_type htype; - u32 hash = 0; - - htype = GET_FIELD(bitfields, ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE); - if (htype) { - hash_type = ((htype == RSS_HASH_TYPE_IPV4) || - (htype == RSS_HASH_TYPE_IPV6)) ? - PKT_HASH_TYPE_L3 : PKT_HASH_TYPE_L4; - hash = le32_to_cpu(rss_hash); - } - skb_set_hash(skb, hash, hash_type); -} - -static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag) -{ - skb_checksum_none_assert(skb); - - if (csum_flag & QEDE_CSUM_UNNECESSARY) - skb->ip_summed = CHECKSUM_UNNECESSARY; - - if (csum_flag & QEDE_TUNN_CSUM_UNNECESSARY) - skb->csum_level = 1; -} - -static inline void qede_skb_receive(struct qede_dev *edev, - struct qede_fastpath *fp, - struct qede_rx_queue *rxq, - struct sk_buff *skb, u16 vlan_tag) -{ - if (vlan_tag) - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); - - napi_gro_receive(&fp->napi, skb); - fp->rxq->rcv_pkts++; -} - -static void qede_set_gro_params(struct qede_dev *edev, - struct sk_buff *skb, - struct eth_fast_path_rx_tpa_start_cqe *cqe) -{ - u16 parsing_flags = le16_to_cpu(cqe->pars_flags.flags); - - if (((parsing_flags >> PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) & - PARSING_AND_ERR_FLAGS_L3TYPE_MASK) == 2) - skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; - else - skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; - - skb_shinfo(skb)->gso_size = __le16_to_cpu(cqe->len_on_first_bd) - - cqe->header_len; -} - -static int qede_fill_frag_skb(struct qede_dev *edev, - struct qede_rx_queue *rxq, - u8 tpa_agg_index, u16 len_on_bd) -{ - struct sw_rx_data *current_bd = &rxq->sw_rx_ring[rxq->sw_rx_cons & - NUM_RX_BDS_MAX]; - struct qede_agg_info *tpa_info = &rxq->tpa_info[tpa_agg_index]; - struct sk_buff *skb = tpa_info->skb; - - if (unlikely(tpa_info->state != QEDE_AGG_STATE_START)) - goto out; - - /* Add one frag and update the appropriate fields in the skb */ - skb_fill_page_desc(skb, tpa_info->frag_id++, - current_bd->data, current_bd->page_offset, - len_on_bd); - - if (unlikely(qede_realloc_rx_buffer(rxq, current_bd))) { - /* Incr page ref count to reuse on allocation failure - * so that it doesn't get freed while freeing SKB. - */ - page_ref_inc(current_bd->data); - goto out; - } - - qed_chain_consume(&rxq->rx_bd_ring); - rxq->sw_rx_cons++; - - skb->data_len += len_on_bd; - skb->truesize += rxq->rx_buf_seg_size; - skb->len += len_on_bd; - - return 0; - -out: - tpa_info->state = QEDE_AGG_STATE_ERROR; - qede_recycle_rx_bd_ring(rxq, 1); - - return -ENOMEM; -} - -static void qede_tpa_start(struct qede_dev *edev, - struct qede_rx_queue *rxq, - struct eth_fast_path_rx_tpa_start_cqe *cqe) -{ - struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index]; - struct eth_rx_bd *rx_bd_cons = qed_chain_consume(&rxq->rx_bd_ring); - struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring); - struct sw_rx_data *replace_buf = &tpa_info->buffer; - dma_addr_t mapping = tpa_info->buffer_mapping; - struct sw_rx_data *sw_rx_data_cons; - struct sw_rx_data *sw_rx_data_prod; - - sw_rx_data_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX]; - sw_rx_data_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX]; - - /* Use pre-allocated replacement buffer - we can't release the agg. - * start until its over and we don't want to risk allocation failing - * here, so re-allocate when aggregation will be over. - */ - sw_rx_data_prod->mapping = replace_buf->mapping; - - sw_rx_data_prod->data = replace_buf->data; - rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(mapping)); - rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(mapping)); - sw_rx_data_prod->page_offset = replace_buf->page_offset; - - rxq->sw_rx_prod++; - - /* move partial skb from cons to pool (don't unmap yet) - * save mapping, incase we drop the packet later on. - */ - tpa_info->buffer = *sw_rx_data_cons; - mapping = HILO_U64(le32_to_cpu(rx_bd_cons->addr.hi), - le32_to_cpu(rx_bd_cons->addr.lo)); - - tpa_info->buffer_mapping = mapping; - rxq->sw_rx_cons++; - - /* set tpa state to start only if we are able to allocate skb - * for this aggregation, otherwise mark as error and aggregation will - * be dropped - */ - tpa_info->skb = netdev_alloc_skb(edev->ndev, - le16_to_cpu(cqe->len_on_first_bd)); - if (unlikely(!tpa_info->skb)) { - DP_NOTICE(edev, "Failed to allocate SKB for gro\n"); - tpa_info->state = QEDE_AGG_STATE_ERROR; - goto cons_buf; - } - - /* Start filling in the aggregation info */ - skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd)); - tpa_info->frag_id = 0; - tpa_info->state = QEDE_AGG_STATE_START; - - /* Store some information from first CQE */ - tpa_info->start_cqe_placement_offset = cqe->placement_offset; - tpa_info->start_cqe_bd_len = le16_to_cpu(cqe->len_on_first_bd); - if ((le16_to_cpu(cqe->pars_flags.flags) >> - PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT) & - PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK) - tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag); - else - tpa_info->vlan_tag = 0; - - qede_get_rxhash(tpa_info->skb, cqe->bitfields, cqe->rss_hash); - - /* This is needed in order to enable forwarding support */ - qede_set_gro_params(edev, tpa_info->skb, cqe); - -cons_buf: /* We still need to handle bd_len_list to consume buffers */ - if (likely(cqe->ext_bd_len_list[0])) - qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index, - le16_to_cpu(cqe->ext_bd_len_list[0])); - - if (unlikely(cqe->ext_bd_len_list[1])) { - DP_ERR(edev, - "Unlikely - got a TPA aggregation with more than one ext_bd_len_list entry in the TPA start\n"); - tpa_info->state = QEDE_AGG_STATE_ERROR; - } -} - -#ifdef CONFIG_INET -static void qede_gro_ip_csum(struct sk_buff *skb) -{ - const struct iphdr *iph = ip_hdr(skb); - struct tcphdr *th; - - skb_set_transport_header(skb, sizeof(struct iphdr)); - th = tcp_hdr(skb); - - th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb), - iph->saddr, iph->daddr, 0); - - tcp_gro_complete(skb); -} - -static void qede_gro_ipv6_csum(struct sk_buff *skb) -{ - struct ipv6hdr *iph = ipv6_hdr(skb); - struct tcphdr *th; - - skb_set_transport_header(skb, sizeof(struct ipv6hdr)); - th = tcp_hdr(skb); - - th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb), - &iph->saddr, &iph->daddr, 0); - tcp_gro_complete(skb); -} -#endif - -static void qede_gro_receive(struct qede_dev *edev, - struct qede_fastpath *fp, - struct sk_buff *skb, - u16 vlan_tag) -{ - /* FW can send a single MTU sized packet from gro flow - * due to aggregation timeout/last segment etc. which - * is not expected to be a gro packet. If a skb has zero - * frags then simply push it in the stack as non gso skb. - */ - if (unlikely(!skb->data_len)) { - skb_shinfo(skb)->gso_type = 0; - skb_shinfo(skb)->gso_size = 0; - goto send_skb; - } - -#ifdef CONFIG_INET - if (skb_shinfo(skb)->gso_size) { - skb_reset_network_header(skb); - - switch (skb->protocol) { - case htons(ETH_P_IP): - qede_gro_ip_csum(skb); - break; - case htons(ETH_P_IPV6): - qede_gro_ipv6_csum(skb); - break; - default: - DP_ERR(edev, - "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n", - ntohs(skb->protocol)); - } - } -#endif - -send_skb: - skb_record_rx_queue(skb, fp->rxq->rxq_id); - qede_skb_receive(edev, fp, fp->rxq, skb, vlan_tag); -} - -static inline void qede_tpa_cont(struct qede_dev *edev, - struct qede_rx_queue *rxq, - struct eth_fast_path_rx_tpa_cont_cqe *cqe) -{ - int i; - - for (i = 0; cqe->len_list[i]; i++) - qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index, - le16_to_cpu(cqe->len_list[i])); - - if (unlikely(i > 1)) - DP_ERR(edev, - "Strange - TPA cont with more than a single len_list entry\n"); -} - -static void qede_tpa_end(struct qede_dev *edev, - struct qede_fastpath *fp, - struct eth_fast_path_rx_tpa_end_cqe *cqe) -{ - struct qede_rx_queue *rxq = fp->rxq; - struct qede_agg_info *tpa_info; - struct sk_buff *skb; - int i; - - tpa_info = &rxq->tpa_info[cqe->tpa_agg_index]; - skb = tpa_info->skb; - - for (i = 0; cqe->len_list[i]; i++) - qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index, - le16_to_cpu(cqe->len_list[i])); - if (unlikely(i > 1)) - DP_ERR(edev, - "Strange - TPA emd with more than a single len_list entry\n"); - - if (unlikely(tpa_info->state != QEDE_AGG_STATE_START)) - goto err; - - /* Sanity */ - if (unlikely(cqe->num_of_bds != tpa_info->frag_id + 1)) - DP_ERR(edev, - "Strange - TPA had %02x BDs, but SKB has only %d frags\n", - cqe->num_of_bds, tpa_info->frag_id); - if (unlikely(skb->len != le16_to_cpu(cqe->total_packet_len))) - DP_ERR(edev, - "Strange - total packet len [cqe] is %4x but SKB has len %04x\n", - le16_to_cpu(cqe->total_packet_len), skb->len); - - memcpy(skb->data, - page_address(tpa_info->buffer.data) + - tpa_info->start_cqe_placement_offset + - tpa_info->buffer.page_offset, tpa_info->start_cqe_bd_len); - - /* Finalize the SKB */ - skb->protocol = eth_type_trans(skb, edev->ndev); - skb->ip_summed = CHECKSUM_UNNECESSARY; - - /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count - * to skb_shinfo(skb)->gso_segs - */ - NAPI_GRO_CB(skb)->count = le16_to_cpu(cqe->num_of_coalesced_segs); - - qede_gro_receive(edev, fp, skb, tpa_info->vlan_tag); - - tpa_info->state = QEDE_AGG_STATE_NONE; - - return; -err: - tpa_info->state = QEDE_AGG_STATE_NONE; - dev_kfree_skb_any(tpa_info->skb); - tpa_info->skb = NULL; -} - -static bool qede_tunn_exist(u16 flag) -{ - return !!(flag & (PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK << - PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT)); -} - -static u8 qede_check_tunn_csum(u16 flag) -{ - u16 csum_flag = 0; - u8 tcsum = 0; - - if (flag & (PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK << - PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT)) - csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK << - PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT; - - if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK << - PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) { - csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK << - PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT; - tcsum = QEDE_TUNN_CSUM_UNNECESSARY; - } - - csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK << - PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT | - PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK << - PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT; - - if (csum_flag & flag) - return QEDE_CSUM_ERROR; - - return QEDE_CSUM_UNNECESSARY | tcsum; -} - -static u8 qede_check_notunn_csum(u16 flag) -{ - u16 csum_flag = 0; - u8 csum = 0; - - if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK << - PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) { - csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK << - PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT; - csum = QEDE_CSUM_UNNECESSARY; - } - - csum_flag |= PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK << - PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT; - - if (csum_flag & flag) - return QEDE_CSUM_ERROR; - - return csum; -} - -static u8 qede_check_csum(u16 flag) -{ - if (!qede_tunn_exist(flag)) - return qede_check_notunn_csum(flag); - else - return qede_check_tunn_csum(flag); -} - -static bool qede_pkt_is_ip_fragmented(struct eth_fast_path_rx_reg_cqe *cqe, - u16 flag) -{ - u8 tun_pars_flg = cqe->tunnel_pars_flags.flags; - - if ((tun_pars_flg & (ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK << - ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT)) || - (flag & (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK << - PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT))) - return true; - - return false; -} - -/* Return true iff packet is to be passed to stack */ -static bool qede_rx_xdp(struct qede_dev *edev, - struct qede_fastpath *fp, - struct qede_rx_queue *rxq, - struct bpf_prog *prog, - struct sw_rx_data *bd, - struct eth_fast_path_rx_reg_cqe *cqe) -{ - u16 len = le16_to_cpu(cqe->len_on_first_bd); - struct xdp_buff xdp; - enum xdp_action act; - - xdp.data = page_address(bd->data) + cqe->placement_offset; - xdp.data_end = xdp.data + len; - - /* Queues always have a full reset currently, so for the time - * being until there's atomic program replace just mark read - * side for map helpers. - */ - rcu_read_lock(); - act = bpf_prog_run_xdp(prog, &xdp); - rcu_read_unlock(); - - if (act == XDP_PASS) - return true; - - /* Count number of packets not to be passed to stack */ - rxq->xdp_no_pass++; - - switch (act) { - case XDP_TX: - /* We need the replacement buffer before transmit. */ - if (qede_alloc_rx_buffer(rxq)) { - qede_recycle_rx_bd_ring(rxq, 1); - return false; - } - - /* Now if there's a transmission problem, we'd still have to - * throw current buffer, as replacement was already allocated. - */ - if (qede_xdp_xmit(edev, fp, bd, cqe->placement_offset, len)) { - dma_unmap_page(rxq->dev, bd->mapping, - PAGE_SIZE, DMA_BIDIRECTIONAL); - __free_page(bd->data); - } - - /* Regardless, we've consumed an Rx BD */ - qede_rx_bd_ring_consume(rxq); - return false; - - default: - bpf_warn_invalid_xdp_action(act); - case XDP_ABORTED: - case XDP_DROP: - qede_recycle_rx_bd_ring(rxq, cqe->bd_num); - } - - return false; -} - -static struct sk_buff *qede_rx_allocate_skb(struct qede_dev *edev, - struct qede_rx_queue *rxq, - struct sw_rx_data *bd, u16 len, - u16 pad) -{ - unsigned int offset = bd->page_offset; - struct skb_frag_struct *frag; - struct page *page = bd->data; - unsigned int pull_len; - struct sk_buff *skb; - unsigned char *va; - - /* Allocate a new SKB with a sufficient large header len */ - skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE); - if (unlikely(!skb)) - return NULL; - - /* Copy data into SKB - if it's small, we can simply copy it and - * re-use the already allcoated & mapped memory. - */ - if (len + pad <= edev->rx_copybreak) { - memcpy(skb_put(skb, len), - page_address(page) + pad + offset, len); - qede_reuse_page(rxq, bd); - goto out; - } - - frag = &skb_shinfo(skb)->frags[0]; - - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, - page, pad + offset, len, rxq->rx_buf_seg_size); - - va = skb_frag_address(frag); - pull_len = eth_get_headlen(va, QEDE_RX_HDR_SIZE); - - /* Align the pull_len to optimize memcpy */ - memcpy(skb->data, va, ALIGN(pull_len, sizeof(long))); - - /* Correct the skb & frag sizes offset after the pull */ - skb_frag_size_sub(frag, pull_len); - frag->page_offset += pull_len; - skb->data_len -= pull_len; - skb->tail += pull_len; - - if (unlikely(qede_realloc_rx_buffer(rxq, bd))) { - /* Incr page ref count to reuse on allocation failure so - * that it doesn't get freed while freeing SKB [as its - * already mapped there]. - */ - page_ref_inc(page); - dev_kfree_skb_any(skb); - return NULL; - } - -out: - /* We've consumed the first BD and prepared an SKB */ - qede_rx_bd_ring_consume(rxq); - return skb; -} - -static int qede_rx_build_jumbo(struct qede_dev *edev, - struct qede_rx_queue *rxq, - struct sk_buff *skb, - struct eth_fast_path_rx_reg_cqe *cqe, - u16 first_bd_len) -{ - u16 pkt_len = le16_to_cpu(cqe->pkt_len); - struct sw_rx_data *bd; - u16 bd_cons_idx; - u8 num_frags; - - pkt_len -= first_bd_len; - - /* We've already used one BD for the SKB. Now take care of the rest */ - for (num_frags = cqe->bd_num - 1; num_frags > 0; num_frags--) { - u16 cur_size = pkt_len > rxq->rx_buf_size ? rxq->rx_buf_size : - pkt_len; - - if (unlikely(!cur_size)) { - DP_ERR(edev, - "Still got %d BDs for mapping jumbo, but length became 0\n", - num_frags); - goto out; - } - - /* We need a replacement buffer for each BD */ - if (unlikely(qede_alloc_rx_buffer(rxq))) - goto out; - - /* Now that we've allocated the replacement buffer, - * we can safely consume the next BD and map it to the SKB. - */ - bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX; - bd = &rxq->sw_rx_ring[bd_cons_idx]; - qede_rx_bd_ring_consume(rxq); - - dma_unmap_page(rxq->dev, bd->mapping, - PAGE_SIZE, DMA_FROM_DEVICE); - - skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++, - bd->data, 0, cur_size); - - skb->truesize += PAGE_SIZE; - skb->data_len += cur_size; - skb->len += cur_size; - pkt_len -= cur_size; - } - - if (unlikely(pkt_len)) - DP_ERR(edev, - "Mapped all BDs of jumbo, but still have %d bytes\n", - pkt_len); - -out: - return num_frags; -} - -static int qede_rx_process_tpa_cqe(struct qede_dev *edev, - struct qede_fastpath *fp, - struct qede_rx_queue *rxq, - union eth_rx_cqe *cqe, - enum eth_rx_cqe_type type) -{ - switch (type) { - case ETH_RX_CQE_TYPE_TPA_START: - qede_tpa_start(edev, rxq, &cqe->fast_path_tpa_start); - return 0; - case ETH_RX_CQE_TYPE_TPA_CONT: - qede_tpa_cont(edev, rxq, &cqe->fast_path_tpa_cont); - return 0; - case ETH_RX_CQE_TYPE_TPA_END: - qede_tpa_end(edev, fp, &cqe->fast_path_tpa_end); - return 1; - default: - return 0; - } -} - -static int qede_rx_process_cqe(struct qede_dev *edev, - struct qede_fastpath *fp, - struct qede_rx_queue *rxq) -{ - struct bpf_prog *xdp_prog = READ_ONCE(rxq->xdp_prog); - struct eth_fast_path_rx_reg_cqe *fp_cqe; - u16 len, pad, bd_cons_idx, parse_flag; - enum eth_rx_cqe_type cqe_type; - union eth_rx_cqe *cqe; - struct sw_rx_data *bd; - struct sk_buff *skb; - __le16 flags; - u8 csum_flag; - - /* Get the CQE from the completion ring */ - cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring); - cqe_type = cqe->fast_path_regular.type; - - /* Process an unlikely slowpath event */ - if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) { - struct eth_slow_path_rx_cqe *sp_cqe; - - sp_cqe = (struct eth_slow_path_rx_cqe *)cqe; - edev->ops->eth_cqe_completion(edev->cdev, fp->id, sp_cqe); - return 0; - } - - /* Handle TPA cqes */ - if (cqe_type != ETH_RX_CQE_TYPE_REGULAR) - return qede_rx_process_tpa_cqe(edev, fp, rxq, cqe, cqe_type); - - /* Get the data from the SW ring; Consume it only after it's evident - * we wouldn't recycle it. - */ - bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX; - bd = &rxq->sw_rx_ring[bd_cons_idx]; - - fp_cqe = &cqe->fast_path_regular; - len = le16_to_cpu(fp_cqe->len_on_first_bd); - pad = fp_cqe->placement_offset; - - /* Run eBPF program if one is attached */ - if (xdp_prog) - if (!qede_rx_xdp(edev, fp, rxq, xdp_prog, bd, fp_cqe)) - return 1; - - /* If this is an error packet then drop it */ - flags = cqe->fast_path_regular.pars_flags.flags; - parse_flag = le16_to_cpu(flags); - - csum_flag = qede_check_csum(parse_flag); - if (unlikely(csum_flag == QEDE_CSUM_ERROR)) { - if (qede_pkt_is_ip_fragmented(fp_cqe, parse_flag)) { - rxq->rx_ip_frags++; - } else { - DP_NOTICE(edev, - "CQE has error, flags = %x, dropping incoming packet\n", - parse_flag); - rxq->rx_hw_errors++; - qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num); - return 0; - } - } - - /* Basic validation passed; Need to prepare an SKB. This would also - * guarantee to finally consume the first BD upon success. - */ - skb = qede_rx_allocate_skb(edev, rxq, bd, len, pad); - if (!skb) { - rxq->rx_alloc_errors++; - qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num); - return 0; - } - - /* In case of Jumbo packet, several PAGE_SIZEd buffers will be pointed - * by a single cqe. - */ - if (fp_cqe->bd_num > 1) { - u16 unmapped_frags = qede_rx_build_jumbo(edev, rxq, skb, - fp_cqe, len); - - if (unlikely(unmapped_frags > 0)) { - qede_recycle_rx_bd_ring(rxq, unmapped_frags); - dev_kfree_skb_any(skb); - return 0; - } - } - - /* The SKB contains all the data. Now prepare meta-magic */ - skb->protocol = eth_type_trans(skb, edev->ndev); - qede_get_rxhash(skb, fp_cqe->bitfields, fp_cqe->rss_hash); - qede_set_skb_csum(skb, csum_flag); - skb_record_rx_queue(skb, rxq->rxq_id); - - /* SKB is prepared - pass it to stack */ - qede_skb_receive(edev, fp, rxq, skb, le16_to_cpu(fp_cqe->vlan_tag)); - - return 1; -} - -static int qede_rx_int(struct qede_fastpath *fp, int budget) -{ - struct qede_rx_queue *rxq = fp->rxq; - struct qede_dev *edev = fp->edev; - u16 hw_comp_cons, sw_comp_cons; - int work_done = 0; - - hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr); - sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring); - - /* Memory barrier to prevent the CPU from doing speculative reads of CQE - * / BD in the while-loop before reading hw_comp_cons. If the CQE is - * read before it is written by FW, then FW writes CQE and SB, and then - * the CPU reads the hw_comp_cons, it will use an old CQE. - */ - rmb(); - - /* Loop to complete all indicated BDs */ - while ((sw_comp_cons != hw_comp_cons) && (work_done < budget)) { - qede_rx_process_cqe(edev, fp, rxq); - qed_chain_recycle_consumed(&rxq->rx_comp_ring); - sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring); - work_done++; - } - - /* Update producers */ - qede_update_rx_prod(edev, rxq); - - return work_done; -} - -static bool qede_poll_is_more_work(struct qede_fastpath *fp) -{ - qed_sb_update_sb_idx(fp->sb_info); - - /* *_has_*_work() reads the status block, thus we need to ensure that - * status block indices have been actually read (qed_sb_update_sb_idx) - * prior to this check (*_has_*_work) so that we won't write the - * "newer" value of the status block to HW (if there was a DMA right - * after qede_has_rx_work and if there is no rmb, the memory reading - * (qed_sb_update_sb_idx) may be postponed to right before *_ack_sb). - * In this case there will never be another interrupt until there is - * another update of the status block, while there is still unhandled - * work. - */ - rmb(); - - if (likely(fp->type & QEDE_FASTPATH_RX)) - if (qede_has_rx_work(fp->rxq)) - return true; - - if (fp->type & QEDE_FASTPATH_XDP) - if (qede_txq_has_work(fp->xdp_tx)) - return true; - - if (likely(fp->type & QEDE_FASTPATH_TX)) - if (qede_txq_has_work(fp->txq)) - return true; - - return false; -} - -static int qede_poll(struct napi_struct *napi, int budget) -{ - struct qede_fastpath *fp = container_of(napi, struct qede_fastpath, - napi); - struct qede_dev *edev = fp->edev; - int rx_work_done = 0; - - if (likely(fp->type & QEDE_FASTPATH_TX) && qede_txq_has_work(fp->txq)) - qede_tx_int(edev, fp->txq); - - if ((fp->type & QEDE_FASTPATH_XDP) && qede_txq_has_work(fp->xdp_tx)) - qede_xdp_tx_int(edev, fp->xdp_tx); - - rx_work_done = (likely(fp->type & QEDE_FASTPATH_RX) && - qede_has_rx_work(fp->rxq)) ? - qede_rx_int(fp, budget) : 0; - if (rx_work_done < budget) { - if (!qede_poll_is_more_work(fp)) { - napi_complete(napi); - - /* Update and reenable interrupts */ - qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1); - } else { - rx_work_done = budget; - } - } - - if (fp->xdp_xmit) { - u16 xdp_prod = qed_chain_get_prod_idx(&fp->xdp_tx->tx_pbl); - - fp->xdp_xmit = 0; - fp->xdp_tx->tx_db.data.bd_prod = cpu_to_le16(xdp_prod); - qede_update_tx_producer(fp->xdp_tx); - } - - return rx_work_done; -} - -static irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie) -{ - struct qede_fastpath *fp = fp_cookie; - - qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0 /*do not update*/); - - napi_schedule_irqoff(&fp->napi); - return IRQ_HANDLED; -} - -/* ------------------------------------------------------------------------- - * END OF FAST-PATH - * ------------------------------------------------------------------------- - */ - static int qede_open(struct net_device *ndev); static int qede_close(struct net_device *ndev); -static int qede_set_mac_addr(struct net_device *ndev, void *p); -static void qede_set_rx_mode(struct net_device *ndev); -static void qede_config_rx_mode(struct net_device *ndev); - -static int qede_set_ucast_rx_mac(struct qede_dev *edev, - enum qed_filter_xcast_params_type opcode, - unsigned char mac[ETH_ALEN]) -{ - struct qed_filter_params filter_cmd; - - memset(&filter_cmd, 0, sizeof(filter_cmd)); - filter_cmd.type = QED_FILTER_TYPE_UCAST; - filter_cmd.filter.ucast.type = opcode; - filter_cmd.filter.ucast.mac_valid = 1; - ether_addr_copy(filter_cmd.filter.ucast.mac, mac); - - return edev->ops->filter_config(edev->cdev, &filter_cmd); -} - -static int qede_set_ucast_rx_vlan(struct qede_dev *edev, - enum qed_filter_xcast_params_type opcode, - u16 vid) -{ - struct qed_filter_params filter_cmd; - - memset(&filter_cmd, 0, sizeof(filter_cmd)); - filter_cmd.type = QED_FILTER_TYPE_UCAST; - filter_cmd.filter.ucast.type = opcode; - filter_cmd.filter.ucast.vlan_valid = 1; - filter_cmd.filter.ucast.vlan = vid; - - return edev->ops->filter_config(edev->cdev, &filter_cmd); -} void qede_fill_by_demand_stats(struct qede_dev *edev) { @@ -2019,9 +399,8 @@ void qede_fill_by_demand_stats(struct qede_dev *edev) edev->stats.tx_mac_ctrl_frames = stats.tx_mac_ctrl_frames; } -static -struct rtnl_link_stats64 *qede_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *stats) +static void qede_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) { struct qede_dev *edev = netdev_priv(dev); @@ -2051,8 +430,6 @@ struct rtnl_link_stats64 *qede_get_stats64(struct net_device *dev, stats->collisions = edev->stats.tx_total_collisions; stats->rx_crc_errors = edev->stats.rx_crc_errors; stats->rx_frame_errors = edev->stats.rx_align_errors; - - return stats; } #ifdef CONFIG_QED_SRIOV @@ -2096,445 +473,37 @@ static int qede_set_vf_link_state(struct net_device *dev, int vfidx, return edev->ops->iov->set_link_state(edev->cdev, vfidx, link_state); } -#endif -static void qede_config_accept_any_vlan(struct qede_dev *edev, bool action) -{ - struct qed_update_vport_params params; - int rc; - - /* Proceed only if action actually needs to be performed */ - if (edev->accept_any_vlan == action) - return; - - memset(¶ms, 0, sizeof(params)); - - params.vport_id = 0; - params.accept_any_vlan = action; - params.update_accept_any_vlan_flg = 1; - - rc = edev->ops->vport_update(edev->cdev, ¶ms); - if (rc) { - DP_ERR(edev, "Failed to %s accept-any-vlan\n", - action ? "enable" : "disable"); - } else { - DP_INFO(edev, "%s accept-any-vlan\n", - action ? "enabled" : "disabled"); - edev->accept_any_vlan = action; - } -} - -static int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) +static int qede_set_vf_trust(struct net_device *dev, int vfidx, bool setting) { struct qede_dev *edev = netdev_priv(dev); - struct qede_vlan *vlan, *tmp; - int rc = 0; - - DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan 0x%04x\n", vid); - - vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); - if (!vlan) { - DP_INFO(edev, "Failed to allocate struct for vlan\n"); - return -ENOMEM; - } - INIT_LIST_HEAD(&vlan->list); - vlan->vid = vid; - vlan->configured = false; - - /* Verify vlan isn't already configured */ - list_for_each_entry(tmp, &edev->vlan_list, list) { - if (tmp->vid == vlan->vid) { - DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), - "vlan already configured\n"); - kfree(vlan); - return -EEXIST; - } - } - - /* If interface is down, cache this VLAN ID and return */ - __qede_lock(edev); - if (edev->state != QEDE_STATE_OPEN) { - DP_VERBOSE(edev, NETIF_MSG_IFDOWN, - "Interface is down, VLAN %d will be configured when interface is up\n", - vid); - if (vid != 0) - edev->non_configured_vlans++; - list_add(&vlan->list, &edev->vlan_list); - goto out; - } - - /* Check for the filter limit. - * Note - vlan0 has a reserved filter and can be added without - * worrying about quota - */ - if ((edev->configured_vlans < edev->dev_info.num_vlan_filters) || - (vlan->vid == 0)) { - rc = qede_set_ucast_rx_vlan(edev, - QED_FILTER_XCAST_TYPE_ADD, - vlan->vid); - if (rc) { - DP_ERR(edev, "Failed to configure VLAN %d\n", - vlan->vid); - kfree(vlan); - goto out; - } - vlan->configured = true; - - /* vlan0 filter isn't consuming out of our quota */ - if (vlan->vid != 0) - edev->configured_vlans++; - } else { - /* Out of quota; Activate accept-any-VLAN mode */ - if (!edev->non_configured_vlans) - qede_config_accept_any_vlan(edev, true); - - edev->non_configured_vlans++; - } - - list_add(&vlan->list, &edev->vlan_list); - -out: - __qede_unlock(edev); - return rc; -} - -static void qede_del_vlan_from_list(struct qede_dev *edev, - struct qede_vlan *vlan) -{ - /* vlan0 filter isn't consuming out of our quota */ - if (vlan->vid != 0) { - if (vlan->configured) - edev->configured_vlans--; - else - edev->non_configured_vlans--; - } - - list_del(&vlan->list); - kfree(vlan); -} - -static int qede_configure_vlan_filters(struct qede_dev *edev) -{ - int rc = 0, real_rc = 0, accept_any_vlan = 0; - struct qed_dev_eth_info *dev_info; - struct qede_vlan *vlan = NULL; - - if (list_empty(&edev->vlan_list)) - return 0; - - dev_info = &edev->dev_info; - - /* Configure non-configured vlans */ - list_for_each_entry(vlan, &edev->vlan_list, list) { - if (vlan->configured) - continue; - - /* We have used all our credits, now enable accept_any_vlan */ - if ((vlan->vid != 0) && - (edev->configured_vlans == dev_info->num_vlan_filters)) { - accept_any_vlan = 1; - continue; - } - - DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan %d\n", vlan->vid); - - rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_ADD, - vlan->vid); - if (rc) { - DP_ERR(edev, "Failed to configure VLAN %u\n", - vlan->vid); - real_rc = rc; - continue; - } - - vlan->configured = true; - /* vlan0 filter doesn't consume our VLAN filter's quota */ - if (vlan->vid != 0) { - edev->non_configured_vlans--; - edev->configured_vlans++; - } - } - - /* enable accept_any_vlan mode if we have more VLANs than credits, - * or remove accept_any_vlan mode if we've actually removed - * a non-configured vlan, and all remaining vlans are truly configured. - */ - - if (accept_any_vlan) - qede_config_accept_any_vlan(edev, true); - else if (!edev->non_configured_vlans) - qede_config_accept_any_vlan(edev, false); - - return real_rc; -} - -static int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) -{ - struct qede_dev *edev = netdev_priv(dev); - struct qede_vlan *vlan = NULL; - int rc = 0; - - DP_VERBOSE(edev, NETIF_MSG_IFDOWN, "Removing vlan 0x%04x\n", vid); - - /* Find whether entry exists */ - __qede_lock(edev); - list_for_each_entry(vlan, &edev->vlan_list, list) - if (vlan->vid == vid) - break; - - if (!vlan || (vlan->vid != vid)) { - DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), - "Vlan isn't configured\n"); - goto out; - } - - if (edev->state != QEDE_STATE_OPEN) { - /* As interface is already down, we don't have a VPORT - * instance to remove vlan filter. So just update vlan list - */ - DP_VERBOSE(edev, NETIF_MSG_IFDOWN, - "Interface is down, removing VLAN from list only\n"); - qede_del_vlan_from_list(edev, vlan); - goto out; - } - - /* Remove vlan */ - if (vlan->configured) { - rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_DEL, - vid); - if (rc) { - DP_ERR(edev, "Failed to remove VLAN %d\n", vid); - goto out; - } - } - - qede_del_vlan_from_list(edev, vlan); - - /* We have removed a VLAN - try to see if we can - * configure non-configured VLAN from the list. - */ - rc = qede_configure_vlan_filters(edev); - -out: - __qede_unlock(edev); - return rc; -} - -static void qede_vlan_mark_nonconfigured(struct qede_dev *edev) -{ - struct qede_vlan *vlan = NULL; - - if (list_empty(&edev->vlan_list)) - return; - - list_for_each_entry(vlan, &edev->vlan_list, list) { - if (!vlan->configured) - continue; - - vlan->configured = false; - - /* vlan0 filter isn't consuming out of our quota */ - if (vlan->vid != 0) { - edev->non_configured_vlans++; - edev->configured_vlans--; - } - - DP_VERBOSE(edev, NETIF_MSG_IFDOWN, - "marked vlan %d as non-configured\n", vlan->vid); - } - - edev->accept_any_vlan = false; -} -static void qede_set_features_reload(struct qede_dev *edev, - struct qede_reload_args *args) -{ - edev->ndev->features = args->u.features; -} - -int qede_set_features(struct net_device *dev, netdev_features_t features) -{ - struct qede_dev *edev = netdev_priv(dev); - netdev_features_t changes = features ^ dev->features; - bool need_reload = false; - - /* No action needed if hardware GRO is disabled during driver load */ - if (changes & NETIF_F_GRO) { - if (dev->features & NETIF_F_GRO) - need_reload = !edev->gro_disable; - else - need_reload = edev->gro_disable; - } - - if (need_reload) { - struct qede_reload_args args; - - args.u.features = features; - args.func = &qede_set_features_reload; - - /* Make sure that we definitely need to reload. - * In case of an eBPF attached program, there will be no FW - * aggregations, so no need to actually reload. - */ - __qede_lock(edev); - if (edev->xdp_prog) - args.func(edev, &args); - else - qede_reload(edev, &args, true); - __qede_unlock(edev); - - return 1; - } - - return 0; -} - -static void qede_udp_tunnel_add(struct net_device *dev, - struct udp_tunnel_info *ti) -{ - struct qede_dev *edev = netdev_priv(dev); - u16 t_port = ntohs(ti->port); - - switch (ti->type) { - case UDP_TUNNEL_TYPE_VXLAN: - if (edev->vxlan_dst_port) - return; - - edev->vxlan_dst_port = t_port; - - DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d\n", - t_port); - - set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags); - break; - case UDP_TUNNEL_TYPE_GENEVE: - if (edev->geneve_dst_port) - return; - - edev->geneve_dst_port = t_port; - - DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d\n", - t_port); - set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags); - break; - default: - return; - } + if (!edev->ops) + return -EINVAL; - schedule_delayed_work(&edev->sp_task, 0); + return edev->ops->iov->set_trust(edev->cdev, vfidx, setting); } +#endif -static void qede_udp_tunnel_del(struct net_device *dev, - struct udp_tunnel_info *ti) +static int qede_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct qede_dev *edev = netdev_priv(dev); - u16 t_port = ntohs(ti->port); - - switch (ti->type) { - case UDP_TUNNEL_TYPE_VXLAN: - if (t_port != edev->vxlan_dst_port) - return; - edev->vxlan_dst_port = 0; + if (!netif_running(dev)) + return -EAGAIN; - DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d\n", - t_port); - - set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags); - break; - case UDP_TUNNEL_TYPE_GENEVE: - if (t_port != edev->geneve_dst_port) - return; - - edev->geneve_dst_port = 0; - - DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d\n", - t_port); - set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags); - break; + switch (cmd) { + case SIOCSHWTSTAMP: + return qede_ptp_hw_ts(edev, ifr); default: - return; - } - - schedule_delayed_work(&edev->sp_task, 0); -} - -/* 8B udp header + 8B base tunnel header + 32B option length */ -#define QEDE_MAX_TUN_HDR_LEN 48 - -static netdev_features_t qede_features_check(struct sk_buff *skb, - struct net_device *dev, - netdev_features_t features) -{ - if (skb->encapsulation) { - u8 l4_proto = 0; - - switch (vlan_get_protocol(skb)) { - case htons(ETH_P_IP): - l4_proto = ip_hdr(skb)->protocol; - break; - case htons(ETH_P_IPV6): - l4_proto = ipv6_hdr(skb)->nexthdr; - break; - default: - return features; - } - - /* Disable offloads for geneve tunnels, as HW can't parse - * the geneve header which has option length greater than 32B. - */ - if ((l4_proto == IPPROTO_UDP) && - ((skb_inner_mac_header(skb) - - skb_transport_header(skb)) > QEDE_MAX_TUN_HDR_LEN)) - return features & ~(NETIF_F_CSUM_MASK | - NETIF_F_GSO_MASK); - } - - return features; -} - -static void qede_xdp_reload_func(struct qede_dev *edev, - struct qede_reload_args *args) -{ - struct bpf_prog *old; - - old = xchg(&edev->xdp_prog, args->u.new_prog); - if (old) - bpf_prog_put(old); -} - -static int qede_xdp_set(struct qede_dev *edev, struct bpf_prog *prog) -{ - struct qede_reload_args args; - - if (prog && prog->xdp_adjust_head) { - DP_ERR(edev, "Does not support bpf_xdp_adjust_head()\n"); + DP_VERBOSE(edev, QED_MSG_DEBUG, + "default IOCTL cmd 0x%x\n", cmd); return -EOPNOTSUPP; } - /* If we're called, there was already a bpf reference increment */ - args.func = &qede_xdp_reload_func; - args.u.new_prog = prog; - qede_reload(edev, &args, false); - return 0; } -static int qede_xdp(struct net_device *dev, struct netdev_xdp *xdp) -{ - struct qede_dev *edev = netdev_priv(dev); - - switch (xdp->command) { - case XDP_SETUP_PROG: - return qede_xdp_set(edev, xdp->prog); - case XDP_QUERY_PROG: - xdp->prog_attached = !!edev->xdp_prog; - return 0; - default: - return -EINVAL; - } -} - static const struct net_device_ops qede_netdev_ops = { .ndo_open = qede_open, .ndo_stop = qede_close, @@ -2543,9 +512,11 @@ static const struct net_device_ops qede_netdev_ops = { .ndo_set_mac_address = qede_set_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = qede_change_mtu, + .ndo_do_ioctl = qede_ioctl, #ifdef CONFIG_QED_SRIOV .ndo_set_vf_mac = qede_set_vf_mac, .ndo_set_vf_vlan = qede_set_vf_vlan, + .ndo_set_vf_trust = qede_set_vf_trust, #endif .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid, @@ -2814,7 +785,7 @@ static void qede_update_pf_params(struct qed_dev *cdev) /* 64 rx + 64 tx + 64 XDP */ memset(&pf_params, 0, sizeof(struct qed_pf_params)); - pf_params.eth_pf_params.num_cons = 192; + pf_params.eth_pf_params.num_cons = (MAX_SB_PER_PF_MIMD - 1) * 3; qed_ops->common->update_pf_params(cdev, &pf_params); } @@ -2883,6 +854,13 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, if (rc) goto err3; + /* Prepare the lock prior to the registeration of the netdev, + * as once it's registered we might reach flows requiring it + * [it's even possible to reach a flow needing it directly + * from there, although it's unlikely]. + */ + INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task); + mutex_init(&edev->qede_lock); rc = register_netdev(edev->ndev); if (rc) { DP_NOTICE(edev, "Cannot register net-device\n"); @@ -2891,6 +869,15 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, edev->ops->common->set_id(cdev, edev->ndev->name, DRV_MODULE_VERSION); + /* PTP not supported on VFs */ + if (!is_vf) { + rc = qede_ptp_register_phc(edev); + if (rc) { + DP_NOTICE(edev, "Cannot register PHC\n"); + goto err5; + } + } + edev->ops->register_ops(cdev, &qede_ll_ops, edev); #ifdef CONFIG_DCB @@ -2898,14 +885,14 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, qede_set_dcbnl_ops(edev->ndev); #endif - INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task); - mutex_init(&edev->qede_lock); edev->rx_copybreak = QEDE_RX_HDR_SIZE; DP_INFO(edev, "Ending successfully qede probe\n"); return 0; +err5: + unregister_netdev(edev->ndev); err4: qede_roce_dev_remove(edev); err3: @@ -2957,6 +944,8 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode) unregister_netdev(ndev); + qede_ptp_remove(edev); + qede_roce_dev_remove(edev); edev->ops->common->set_power_state(cdev, PCI_D0); @@ -2967,14 +956,20 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode) if (edev->xdp_prog) bpf_prog_put(edev->xdp_prog); - free_netdev(ndev); - /* Use global ops since we've freed edev */ qed_ops->common->slowpath_stop(cdev); if (system_state == SYSTEM_POWER_OFF) return; qed_ops->common->remove(cdev); + /* Since this can happen out-of-sync with other flows, + * don't release the netdevice until after slowpath stop + * has been called to guarantee various other contexts + * [e.g., QED register callbacks] won't break anything when + * accessing the netdevice. + */ + free_netdev(ndev); + dev_info(&pdev->dev, "Ending qede_remove successfully\n"); } @@ -3215,8 +1210,9 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq) goto err; /* Allocate buffers for the Rx ring */ + rxq->filled_buffers = 0; for (i = 0; i < rxq->num_rx_buffers; i++) { - rc = qede_alloc_rx_buffer(rxq); + rc = qede_alloc_rx_buffer(rxq, false); if (rc) { DP_ERR(edev, "Rx buffers allocation failed at index %d\n", i); @@ -3564,19 +1560,24 @@ static int qede_stop_txq(struct qede_dev *edev, static int qede_stop_queues(struct qede_dev *edev) { - struct qed_update_vport_params vport_update_params; + struct qed_update_vport_params *vport_update_params; struct qed_dev *cdev = edev->cdev; struct qede_fastpath *fp; int rc, i; /* Disable the vport */ - memset(&vport_update_params, 0, sizeof(vport_update_params)); - vport_update_params.vport_id = 0; - vport_update_params.update_vport_active_flg = 1; - vport_update_params.vport_active_flg = 0; - vport_update_params.update_rss_flg = 0; + vport_update_params = vzalloc(sizeof(*vport_update_params)); + if (!vport_update_params) + return -ENOMEM; + + vport_update_params->vport_id = 0; + vport_update_params->update_vport_active_flg = 1; + vport_update_params->vport_active_flg = 0; + vport_update_params->update_rss_flg = 0; + + rc = edev->ops->vport_update(cdev, vport_update_params); + vfree(vport_update_params); - rc = edev->ops->vport_update(cdev, &vport_update_params); if (rc) { DP_ERR(edev, "Failed to update vport\n"); return rc; @@ -3688,11 +1689,10 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats) { int vlan_removal_en = 1; struct qed_dev *cdev = edev->cdev; - struct qed_update_vport_params vport_update_params; - struct qed_queue_start_common_params q_params; struct qed_dev_info *qed_info = &edev->dev_info.common; + struct qed_update_vport_params *vport_update_params; + struct qed_queue_start_common_params q_params; struct qed_start_vport_params start = {0}; - bool reset_rss_indir = false; int rc, i; if (!edev->num_queues) { @@ -3701,6 +1701,11 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats) return -EINVAL; } + vport_update_params = vzalloc(sizeof(*vport_update_params)); + if (!vport_update_params) + return -ENOMEM; + + start.handle_ptp_pkts = !!(edev->ptp); start.gro_enable = !edev->gro_disable; start.mtu = edev->ndev->mtu; start.vport_id = 0; @@ -3712,7 +1717,7 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats) if (rc) { DP_ERR(edev, "Start V-PORT failed %d\n", rc); - return rc; + goto out; } DP_VERBOSE(edev, NETIF_MSG_IFUP, @@ -3748,7 +1753,7 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats) if (rc) { DP_ERR(edev, "Start RXQ #%d failed %d\n", i, rc); - return rc; + goto out; } /* Use the return parameters */ @@ -3764,108 +1769,44 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats) if (fp->type & QEDE_FASTPATH_XDP) { rc = qede_start_txq(edev, fp, fp->xdp_tx, i, XDP_PI); if (rc) - return rc; + goto out; fp->rxq->xdp_prog = bpf_prog_add(edev->xdp_prog, 1); if (IS_ERR(fp->rxq->xdp_prog)) { rc = PTR_ERR(fp->rxq->xdp_prog); fp->rxq->xdp_prog = NULL; - return rc; + goto out; } } if (fp->type & QEDE_FASTPATH_TX) { rc = qede_start_txq(edev, fp, fp->txq, i, TX_PI(0)); if (rc) - return rc; + goto out; } } /* Prepare and send the vport enable */ - memset(&vport_update_params, 0, sizeof(vport_update_params)); - vport_update_params.vport_id = start.vport_id; - vport_update_params.update_vport_active_flg = 1; - vport_update_params.vport_active_flg = 1; + vport_update_params->vport_id = start.vport_id; + vport_update_params->update_vport_active_flg = 1; + vport_update_params->vport_active_flg = 1; if ((qed_info->mf_mode == QED_MF_NPAR || pci_num_vf(edev->pdev)) && qed_info->tx_switching) { - vport_update_params.update_tx_switching_flg = 1; - vport_update_params.tx_switching_flg = 1; + vport_update_params->update_tx_switching_flg = 1; + vport_update_params->tx_switching_flg = 1; } - /* Fill struct with RSS params */ - if (QEDE_RSS_COUNT(edev) > 1) { - vport_update_params.update_rss_flg = 1; + qede_fill_rss_params(edev, &vport_update_params->rss_params, + &vport_update_params->update_rss_flg); - /* Need to validate current RSS config uses valid entries */ - for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) { - if (edev->rss_params.rss_ind_table[i] >= - QEDE_RSS_COUNT(edev)) { - reset_rss_indir = true; - break; - } - } - - if (!(edev->rss_params_inited & QEDE_RSS_INDIR_INITED) || - reset_rss_indir) { - u16 val; - - for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) { - u16 indir_val; - - val = QEDE_RSS_COUNT(edev); - indir_val = ethtool_rxfh_indir_default(i, val); - edev->rss_params.rss_ind_table[i] = indir_val; - } - edev->rss_params_inited |= QEDE_RSS_INDIR_INITED; - } - - if (!(edev->rss_params_inited & QEDE_RSS_KEY_INITED)) { - netdev_rss_key_fill(edev->rss_params.rss_key, - sizeof(edev->rss_params.rss_key)); - edev->rss_params_inited |= QEDE_RSS_KEY_INITED; - } - - if (!(edev->rss_params_inited & QEDE_RSS_CAPS_INITED)) { - edev->rss_params.rss_caps = QED_RSS_IPV4 | - QED_RSS_IPV6 | - QED_RSS_IPV4_TCP | - QED_RSS_IPV6_TCP; - edev->rss_params_inited |= QEDE_RSS_CAPS_INITED; - } - - memcpy(&vport_update_params.rss_params, &edev->rss_params, - sizeof(vport_update_params.rss_params)); - } else { - memset(&vport_update_params.rss_params, 0, - sizeof(vport_update_params.rss_params)); - } - - rc = edev->ops->vport_update(cdev, &vport_update_params); - if (rc) { + rc = edev->ops->vport_update(cdev, vport_update_params); + if (rc) DP_ERR(edev, "Update V-PORT failed %d\n", rc); - return rc; - } - - return 0; -} - -static int qede_set_mcast_rx_mac(struct qede_dev *edev, - enum qed_filter_xcast_params_type opcode, - unsigned char *mac, int num_macs) -{ - struct qed_filter_params filter_cmd; - int i; - - memset(&filter_cmd, 0, sizeof(filter_cmd)); - filter_cmd.type = QED_FILTER_TYPE_MCAST; - filter_cmd.filter.mcast.type = opcode; - filter_cmd.filter.mcast.num = num_macs; - - for (i = 0; i < num_macs; i++, mac += ETH_ALEN) - ether_addr_copy(filter_cmd.filter.mcast.mac[i], mac); - return edev->ops->filter_config(edev->cdev, &filter_cmd); +out: + vfree(vport_update_params); + return rc; } enum qede_unload_mode { @@ -3886,6 +1827,8 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode, qede_roce_dev_event_close(edev); edev->state = QEDE_STATE_CLOSED; + qede_ptp_stop(edev); + /* Close OS Tx */ netif_tx_disable(edev->ndev); netif_carrier_off(edev->ndev); @@ -3929,7 +1872,6 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode, bool is_locked) { struct qed_link_params link_params; - struct qed_link_output link_output; int rc; DP_INFO(edev, "Starting qede load\n"); @@ -3981,11 +1923,9 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode, link_params.link_up = true; edev->ops->common->set_link(edev->cdev, &link_params); - /* Query whether link is already-up */ - memset(&link_output, 0, sizeof(link_output)); - edev->ops->common->get_link(edev->cdev, &link_output); qede_roce_dev_event_open(edev); - qede_link_update(edev, &link_output); + + qede_ptp_start(edev, (mode == QEDE_LOAD_NORMAL)); edev->state = QEDE_STATE_OPEN; @@ -4097,192 +2037,3 @@ static void qede_link_update(void *dev, struct qed_link_output *link) } } } - -static int qede_set_mac_addr(struct net_device *ndev, void *p) -{ - struct qede_dev *edev = netdev_priv(ndev); - struct sockaddr *addr = p; - int rc; - - ASSERT_RTNL(); /* @@@TBD To be removed */ - - DP_INFO(edev, "Set_mac_addr called\n"); - - if (!is_valid_ether_addr(addr->sa_data)) { - DP_NOTICE(edev, "The MAC address is not valid\n"); - return -EFAULT; - } - - if (!edev->ops->check_mac(edev->cdev, addr->sa_data)) { - DP_NOTICE(edev, "qed prevents setting MAC\n"); - return -EINVAL; - } - - ether_addr_copy(ndev->dev_addr, addr->sa_data); - - if (!netif_running(ndev)) { - DP_NOTICE(edev, "The device is currently down\n"); - return 0; - } - - /* Remove the previous primary mac */ - rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL, - edev->primary_mac); - if (rc) - return rc; - - edev->ops->common->update_mac(edev->cdev, addr->sa_data); - - /* Add MAC filter according to the new unicast HW MAC address */ - ether_addr_copy(edev->primary_mac, ndev->dev_addr); - return qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD, - edev->primary_mac); -} - -static int -qede_configure_mcast_filtering(struct net_device *ndev, - enum qed_filter_rx_mode_type *accept_flags) -{ - struct qede_dev *edev = netdev_priv(ndev); - unsigned char *mc_macs, *temp; - struct netdev_hw_addr *ha; - int rc = 0, mc_count; - size_t size; - - size = 64 * ETH_ALEN; - - mc_macs = kzalloc(size, GFP_KERNEL); - if (!mc_macs) { - DP_NOTICE(edev, - "Failed to allocate memory for multicast MACs\n"); - rc = -ENOMEM; - goto exit; - } - - temp = mc_macs; - - /* Remove all previously configured MAC filters */ - rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL, - mc_macs, 1); - if (rc) - goto exit; - - netif_addr_lock_bh(ndev); - - mc_count = netdev_mc_count(ndev); - if (mc_count < 64) { - netdev_for_each_mc_addr(ha, ndev) { - ether_addr_copy(temp, ha->addr); - temp += ETH_ALEN; - } - } - - netif_addr_unlock_bh(ndev); - - /* Check for all multicast @@@TBD resource allocation */ - if ((ndev->flags & IFF_ALLMULTI) || - (mc_count > 64)) { - if (*accept_flags == QED_FILTER_RX_MODE_TYPE_REGULAR) - *accept_flags = QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC; - } else { - /* Add all multicast MAC filters */ - rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD, - mc_macs, mc_count); - } - -exit: - kfree(mc_macs); - return rc; -} - -static void qede_set_rx_mode(struct net_device *ndev) -{ - struct qede_dev *edev = netdev_priv(ndev); - - set_bit(QEDE_SP_RX_MODE, &edev->sp_flags); - schedule_delayed_work(&edev->sp_task, 0); -} - -/* Must be called with qede_lock held */ -static void qede_config_rx_mode(struct net_device *ndev) -{ - enum qed_filter_rx_mode_type accept_flags = QED_FILTER_TYPE_UCAST; - struct qede_dev *edev = netdev_priv(ndev); - struct qed_filter_params rx_mode; - unsigned char *uc_macs, *temp; - struct netdev_hw_addr *ha; - int rc, uc_count; - size_t size; - - netif_addr_lock_bh(ndev); - - uc_count = netdev_uc_count(ndev); - size = uc_count * ETH_ALEN; - - uc_macs = kzalloc(size, GFP_ATOMIC); - if (!uc_macs) { - DP_NOTICE(edev, "Failed to allocate memory for unicast MACs\n"); - netif_addr_unlock_bh(ndev); - return; - } - - temp = uc_macs; - netdev_for_each_uc_addr(ha, ndev) { - ether_addr_copy(temp, ha->addr); - temp += ETH_ALEN; - } - - netif_addr_unlock_bh(ndev); - - /* Configure the struct for the Rx mode */ - memset(&rx_mode, 0, sizeof(struct qed_filter_params)); - rx_mode.type = QED_FILTER_TYPE_RX_MODE; - - /* Remove all previous unicast secondary macs and multicast macs - * (configrue / leave the primary mac) - */ - rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_REPLACE, - edev->primary_mac); - if (rc) - goto out; - - /* Check for promiscuous */ - if ((ndev->flags & IFF_PROMISC) || - (uc_count > edev->dev_info.num_mac_filters - 1)) { - accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC; - } else { - /* Add MAC filters according to the unicast secondary macs */ - int i; - - temp = uc_macs; - for (i = 0; i < uc_count; i++) { - rc = qede_set_ucast_rx_mac(edev, - QED_FILTER_XCAST_TYPE_ADD, - temp); - if (rc) - goto out; - - temp += ETH_ALEN; - } - - rc = qede_configure_mcast_filtering(ndev, &accept_flags); - if (rc) - goto out; - } - - /* take care of VLAN mode */ - if (ndev->flags & IFF_PROMISC) { - qede_config_accept_any_vlan(edev, true); - } else if (!edev->non_configured_vlans) { - /* It's possible that accept_any_vlan mode is set due to a - * previous setting of IFF_PROMISC. If vlan credits are - * sufficient, disable accept_any_vlan. - */ - qede_config_accept_any_vlan(edev, false); - } - - rx_mode.filter.accept_flags = accept_flags; - edev->ops->filter_config(edev->cdev, &rx_mode); -out: - kfree(uc_macs); -} diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c new file mode 100644 index 000000000000..2e62dec09bd7 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c @@ -0,0 +1,536 @@ +/* QLogic qede NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "qede_ptp.h" + +struct qede_ptp { + const struct qed_eth_ptp_ops *ops; + struct ptp_clock_info clock_info; + struct cyclecounter cc; + struct timecounter tc; + struct ptp_clock *clock; + struct work_struct work; + struct qede_dev *edev; + struct sk_buff *tx_skb; + + /* ptp spinlock is used for protecting the cycle/time counter fields + * and, also for serializing the qed PTP API invocations. + */ + spinlock_t lock; + bool hw_ts_ioctl_called; + u16 tx_type; + u16 rx_filter; +}; + +/** + * qede_ptp_adjfreq + * @ptp: the ptp clock structure + * @ppb: parts per billion adjustment from base + * + * Adjust the frequency of the ptp cycle counter by the + * indicated ppb from the base frequency. + */ +static int qede_ptp_adjfreq(struct ptp_clock_info *info, s32 ppb) +{ + struct qede_ptp *ptp = container_of(info, struct qede_ptp, clock_info); + struct qede_dev *edev = ptp->edev; + int rc; + + __qede_lock(edev); + if (edev->state == QEDE_STATE_OPEN) { + spin_lock_bh(&ptp->lock); + rc = ptp->ops->adjfreq(edev->cdev, ppb); + spin_unlock_bh(&ptp->lock); + } else { + DP_ERR(edev, "PTP adjfreq called while interface is down\n"); + rc = -EFAULT; + } + __qede_unlock(edev); + + return rc; +} + +static int qede_ptp_adjtime(struct ptp_clock_info *info, s64 delta) +{ + struct qede_dev *edev; + struct qede_ptp *ptp; + + ptp = container_of(info, struct qede_ptp, clock_info); + edev = ptp->edev; + + DP_VERBOSE(edev, QED_MSG_DEBUG, "PTP adjtime called, delta = %llx\n", + delta); + + spin_lock_bh(&ptp->lock); + timecounter_adjtime(&ptp->tc, delta); + spin_unlock_bh(&ptp->lock); + + return 0; +} + +static int qede_ptp_gettime(struct ptp_clock_info *info, struct timespec64 *ts) +{ + struct qede_dev *edev; + struct qede_ptp *ptp; + u64 ns; + + ptp = container_of(info, struct qede_ptp, clock_info); + edev = ptp->edev; + + spin_lock_bh(&ptp->lock); + ns = timecounter_read(&ptp->tc); + spin_unlock_bh(&ptp->lock); + + DP_VERBOSE(edev, QED_MSG_DEBUG, "PTP gettime called, ns = %llu\n", ns); + + *ts = ns_to_timespec64(ns); + + return 0; +} + +static int qede_ptp_settime(struct ptp_clock_info *info, + const struct timespec64 *ts) +{ + struct qede_dev *edev; + struct qede_ptp *ptp; + u64 ns; + + ptp = container_of(info, struct qede_ptp, clock_info); + edev = ptp->edev; + + ns = timespec64_to_ns(ts); + + DP_VERBOSE(edev, QED_MSG_DEBUG, "PTP settime called, ns = %llu\n", ns); + + /* Re-init the timecounter */ + spin_lock_bh(&ptp->lock); + timecounter_init(&ptp->tc, &ptp->cc, ns); + spin_unlock_bh(&ptp->lock); + + return 0; +} + +/* Enable (or disable) ancillary features of the phc subsystem */ +static int qede_ptp_ancillary_feature_enable(struct ptp_clock_info *info, + struct ptp_clock_request *rq, + int on) +{ + struct qede_dev *edev; + struct qede_ptp *ptp; + + ptp = container_of(info, struct qede_ptp, clock_info); + edev = ptp->edev; + + DP_ERR(edev, "PHC ancillary features are not supported\n"); + + return -ENOTSUPP; +} + +static void qede_ptp_task(struct work_struct *work) +{ + struct skb_shared_hwtstamps shhwtstamps; + struct qede_dev *edev; + struct qede_ptp *ptp; + u64 timestamp, ns; + int rc; + + ptp = container_of(work, struct qede_ptp, work); + edev = ptp->edev; + + /* Read Tx timestamp registers */ + spin_lock_bh(&ptp->lock); + rc = ptp->ops->read_tx_ts(edev->cdev, ×tamp); + spin_unlock_bh(&ptp->lock); + if (rc) { + /* Reschedule to keep checking for a valid timestamp value */ + schedule_work(&ptp->work); + return; + } + + ns = timecounter_cyc2time(&ptp->tc, timestamp); + memset(&shhwtstamps, 0, sizeof(shhwtstamps)); + shhwtstamps.hwtstamp = ns_to_ktime(ns); + skb_tstamp_tx(ptp->tx_skb, &shhwtstamps); + dev_kfree_skb_any(ptp->tx_skb); + ptp->tx_skb = NULL; + + DP_VERBOSE(edev, QED_MSG_DEBUG, + "Tx timestamp, timestamp cycles = %llu, ns = %llu\n", + timestamp, ns); +} + +/* Read the PHC. This API is invoked with ptp_lock held. */ +static u64 qede_ptp_read_cc(const struct cyclecounter *cc) +{ + struct qede_dev *edev; + struct qede_ptp *ptp; + u64 phc_cycles; + int rc; + + ptp = container_of(cc, struct qede_ptp, cc); + edev = ptp->edev; + rc = ptp->ops->read_cc(edev->cdev, &phc_cycles); + if (rc) + WARN_ONCE(1, "PHC read err %d\n", rc); + + DP_VERBOSE(edev, QED_MSG_DEBUG, "PHC read cycles = %llu\n", phc_cycles); + + return phc_cycles; +} + +static void qede_ptp_init_cc(struct qede_dev *edev) +{ + struct qede_ptp *ptp; + + ptp = edev->ptp; + if (!ptp) + return; + + memset(&ptp->cc, 0, sizeof(ptp->cc)); + ptp->cc.read = qede_ptp_read_cc; + ptp->cc.mask = CYCLECOUNTER_MASK(64); + ptp->cc.shift = 0; + ptp->cc.mult = 1; +} + +static int qede_ptp_cfg_filters(struct qede_dev *edev) +{ + struct qede_ptp *ptp = edev->ptp; + + if (!ptp) + return -EIO; + + if (!ptp->hw_ts_ioctl_called) { + DP_INFO(edev, "TS IOCTL not called\n"); + return 0; + } + + switch (ptp->tx_type) { + case HWTSTAMP_TX_ON: + edev->flags |= QEDE_TX_TIMESTAMPING_EN; + ptp->ops->hwtstamp_tx_on(edev->cdev); + break; + + case HWTSTAMP_TX_ONESTEP_SYNC: + DP_ERR(edev, "One-step timestamping is not supported\n"); + return -ERANGE; + } + + spin_lock_bh(&ptp->lock); + switch (ptp->rx_filter) { + case HWTSTAMP_FILTER_NONE: + break; + case HWTSTAMP_FILTER_ALL: + case HWTSTAMP_FILTER_SOME: + ptp->rx_filter = HWTSTAMP_FILTER_NONE; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + ptp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; + /* Initialize PTP detection for UDP/IPv4 events */ + ptp->ops->cfg_rx_filters(edev->cdev, QED_PTP_FILTER_IPV4); + break; + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; + /* Initialize PTP detection for UDP/IPv4 or UDP/IPv6 events */ + ptp->ops->cfg_rx_filters(edev->cdev, QED_PTP_FILTER_IPV4_IPV6); + break; + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; + /* Initialize PTP detection L2 events */ + ptp->ops->cfg_rx_filters(edev->cdev, QED_PTP_FILTER_L2); + break; + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + /* Initialize PTP detection L2, UDP/IPv4 or UDP/IPv6 events */ + ptp->ops->cfg_rx_filters(edev->cdev, + QED_PTP_FILTER_L2_IPV4_IPV6); + break; + } + + spin_unlock_bh(&ptp->lock); + + return 0; +} + +int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *ifr) +{ + struct hwtstamp_config config; + struct qede_ptp *ptp; + int rc; + + ptp = edev->ptp; + if (!ptp) + return -EIO; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + DP_VERBOSE(edev, QED_MSG_DEBUG, + "HWTSTAMP IOCTL: Requested tx_type = %d, requested rx_filters = %d\n", + config.tx_type, config.rx_filter); + + if (config.flags) { + DP_ERR(edev, "config.flags is reserved for future use\n"); + return -EINVAL; + } + + ptp->hw_ts_ioctl_called = 1; + ptp->tx_type = config.tx_type; + ptp->rx_filter = config.rx_filter; + + rc = qede_ptp_cfg_filters(edev); + if (rc) + return rc; + + config.rx_filter = ptp->rx_filter; + + return copy_to_user(ifr->ifr_data, &config, + sizeof(config)) ? -EFAULT : 0; +} + +/* Called during load, to initialize PTP-related stuff */ +static void qede_ptp_init(struct qede_dev *edev, bool init_tc) +{ + struct qede_ptp *ptp; + int rc; + + ptp = edev->ptp; + if (!ptp) + return; + + spin_lock_init(&ptp->lock); + + /* Configure PTP in HW */ + rc = ptp->ops->enable(edev->cdev); + if (rc) { + DP_ERR(edev, "Stopping PTP initialization\n"); + return; + } + + /* Init work queue for Tx timestamping */ + INIT_WORK(&ptp->work, qede_ptp_task); + + /* Init cyclecounter and timecounter. This is done only in the first + * load. If done in every load, PTP application will fail when doing + * unload / load (e.g. MTU change) while it is running. + */ + if (init_tc) { + qede_ptp_init_cc(edev); + timecounter_init(&ptp->tc, &ptp->cc, + ktime_to_ns(ktime_get_real())); + } + + DP_VERBOSE(edev, QED_MSG_DEBUG, "PTP initialization is successful\n"); +} + +void qede_ptp_start(struct qede_dev *edev, bool init_tc) +{ + qede_ptp_init(edev, init_tc); + qede_ptp_cfg_filters(edev); +} + +void qede_ptp_remove(struct qede_dev *edev) +{ + struct qede_ptp *ptp; + + ptp = edev->ptp; + if (ptp && ptp->clock) { + ptp_clock_unregister(ptp->clock); + ptp->clock = NULL; + } + + kfree(ptp); + edev->ptp = NULL; +} + +int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *info) +{ + struct qede_ptp *ptp = edev->ptp; + + if (!ptp) + return -EIO; + + info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + if (ptp->clock) + info->phc_index = ptp_clock_index(ptp->clock); + else + info->phc_index = -1; + + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | + BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | + BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | + BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | + BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | + BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | + BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | + BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | + BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | + BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | + BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) | + BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) | + BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ); + + info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON); + + return 0; +} + +/* Called during unload, to stop PTP-related stuff */ +void qede_ptp_stop(struct qede_dev *edev) +{ + struct qede_ptp *ptp; + + ptp = edev->ptp; + if (!ptp) + return; + + /* Cancel PTP work queue. Should be done after the Tx queues are + * drained to prevent additional scheduling. + */ + cancel_work_sync(&ptp->work); + if (ptp->tx_skb) { + dev_kfree_skb_any(ptp->tx_skb); + ptp->tx_skb = NULL; + } + + /* Disable PTP in HW */ + spin_lock_bh(&ptp->lock); + ptp->ops->disable(edev->cdev); + spin_unlock_bh(&ptp->lock); +} + +int qede_ptp_register_phc(struct qede_dev *edev) +{ + struct qede_ptp *ptp; + + ptp = kzalloc(sizeof(*ptp), GFP_KERNEL); + if (!ptp) { + DP_INFO(edev, "Failed to allocate struct for PTP\n"); + return -ENOMEM; + } + + ptp->edev = edev; + ptp->ops = edev->ops->ptp; + if (!ptp->ops) { + kfree(ptp); + edev->ptp = NULL; + DP_ERR(edev, "PTP clock registeration failed\n"); + return -EIO; + } + + edev->ptp = ptp; + + /* Fill the ptp_clock_info struct and register PTP clock */ + ptp->clock_info.owner = THIS_MODULE; + snprintf(ptp->clock_info.name, 16, "%s", edev->ndev->name); + ptp->clock_info.max_adj = QED_MAX_PHC_DRIFT_PPB; + ptp->clock_info.n_alarm = 0; + ptp->clock_info.n_ext_ts = 0; + ptp->clock_info.n_per_out = 0; + ptp->clock_info.pps = 0; + ptp->clock_info.adjfreq = qede_ptp_adjfreq; + ptp->clock_info.adjtime = qede_ptp_adjtime; + ptp->clock_info.gettime64 = qede_ptp_gettime; + ptp->clock_info.settime64 = qede_ptp_settime; + ptp->clock_info.enable = qede_ptp_ancillary_feature_enable; + + ptp->clock = ptp_clock_register(&ptp->clock_info, &edev->pdev->dev); + if (IS_ERR(ptp->clock)) { + ptp->clock = NULL; + kfree(ptp); + edev->ptp = NULL; + DP_ERR(edev, "PTP clock registeration failed\n"); + } + + return 0; +} + +void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb) +{ + struct qede_ptp *ptp; + + ptp = edev->ptp; + if (!ptp) + return; + + if (unlikely(!(edev->flags & QEDE_TX_TIMESTAMPING_EN))) { + DP_NOTICE(edev, + "Tx timestamping was not enabled, this packet will not be timestamped\n"); + } else if (unlikely(ptp->tx_skb)) { + DP_NOTICE(edev, + "The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n"); + } else { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + /* schedule check for Tx timestamp */ + ptp->tx_skb = skb_get(skb); + schedule_work(&ptp->work); + } +} + +void qede_ptp_rx_ts(struct qede_dev *edev, struct sk_buff *skb) +{ + struct qede_ptp *ptp; + u64 timestamp, ns; + int rc; + + ptp = edev->ptp; + if (!ptp) + return; + + spin_lock_bh(&ptp->lock); + rc = ptp->ops->read_rx_ts(edev->cdev, ×tamp); + if (rc) { + spin_unlock_bh(&ptp->lock); + DP_INFO(edev, "Invalid Rx timestamp\n"); + return; + } + + ns = timecounter_cyc2time(&ptp->tc, timestamp); + spin_unlock_bh(&ptp->lock); + skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns); + DP_VERBOSE(edev, QED_MSG_DEBUG, + "Rx timestamp, timestamp cycles = %llu, ns = %llu\n", + timestamp, ns); +} diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.h b/drivers/net/ethernet/qlogic/qede/qede_ptp.h new file mode 100644 index 000000000000..f328f9bba53a --- /dev/null +++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.h @@ -0,0 +1,65 @@ +/* QLogic qede NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef _QEDE_PTP_H_ +#define _QEDE_PTP_H_ + +#include <linux/ptp_clock_kernel.h> +#include <linux/net_tstamp.h> +#include <linux/timecounter.h> +#include "qede.h" + +void qede_ptp_rx_ts(struct qede_dev *edev, struct sk_buff *skb); +void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb); +int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *req); +void qede_ptp_start(struct qede_dev *edev, bool init_tc); +void qede_ptp_stop(struct qede_dev *edev); +void qede_ptp_remove(struct qede_dev *edev); +int qede_ptp_register_phc(struct qede_dev *edev); +int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *ts); + +static inline void qede_ptp_record_rx_ts(struct qede_dev *edev, + union eth_rx_cqe *cqe, + struct sk_buff *skb) +{ + /* Check if this packet was timestamped */ + if (unlikely(le16_to_cpu(cqe->fast_path_regular.pars_flags.flags) & + (1 << PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_SHIFT))) { + if (likely(le16_to_cpu(cqe->fast_path_regular.pars_flags.flags) + & (1 << PARSING_AND_ERR_FLAGS_TIMESYNCPKT_SHIFT))) { + qede_ptp_rx_ts(edev, skb); + } else { + DP_INFO(edev, + "Timestamp recorded for non PTP packets\n"); + } + } +} +#endif /* _QEDE_PTP_H_ */ diff --git a/drivers/net/ethernet/qlogic/qede/qede_roce.c b/drivers/net/ethernet/qlogic/qede/qede_roce.c index 49272716a7c4..f00657ce7c8f 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_roce.c +++ b/drivers/net/ethernet/qlogic/qede/qede_roce.c @@ -1,5 +1,5 @@ /* QLogic qedr NIC Driver - * Copyright (c) 2015-2016 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c index 5c100ab86c00..2991179c2fd0 100644 --- a/drivers/net/ethernet/qlogic/qla3xxx.c +++ b/drivers/net/ethernet/qlogic/qla3xxx.c @@ -1707,23 +1707,30 @@ static int ql_get_full_dup(struct ql3_adapter *qdev) return status; } -static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) +static int ql_get_link_ksettings(struct net_device *ndev, + struct ethtool_link_ksettings *cmd) { struct ql3_adapter *qdev = netdev_priv(ndev); + u32 supported, advertising; - ecmd->transceiver = XCVR_INTERNAL; - ecmd->supported = ql_supported_modes(qdev); + supported = ql_supported_modes(qdev); if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) { - ecmd->port = PORT_FIBRE; + cmd->base.port = PORT_FIBRE; } else { - ecmd->port = PORT_TP; - ecmd->phy_address = qdev->PHYAddr; + cmd->base.port = PORT_TP; + cmd->base.phy_address = qdev->PHYAddr; } - ecmd->advertising = ql_supported_modes(qdev); - ecmd->autoneg = ql_get_auto_cfg_status(qdev); - ethtool_cmd_speed_set(ecmd, ql_get_speed(qdev)); - ecmd->duplex = ql_get_full_dup(qdev); + advertising = ql_supported_modes(qdev); + cmd->base.autoneg = ql_get_auto_cfg_status(qdev); + cmd->base.speed = ql_get_speed(qdev); + cmd->base.duplex = ql_get_full_dup(qdev); + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); + return 0; } @@ -1769,12 +1776,12 @@ static void ql_get_pauseparam(struct net_device *ndev, } static const struct ethtool_ops ql3xxx_ethtool_ops = { - .get_settings = ql_get_settings, .get_drvinfo = ql_get_drvinfo, .get_link = ethtool_op_get_link, .get_msglevel = ql_get_msglevel, .set_msglevel = ql_set_msglevel, .get_pauseparam = ql_get_pauseparam, + .get_link_ksettings = ql_get_link_ksettings, }; static int ql_populate_free_queue(struct ql3_adapter *qdev) @@ -2025,7 +2032,7 @@ static void ql_process_mac_rx_intr(struct ql3_adapter *qdev, skb_checksum_none_assert(skb); skb->protocol = eth_type_trans(skb, qdev->ndev); - netif_receive_skb(skb); + napi_gro_receive(&qdev->napi, skb); lrg_buf_cb2->skb = NULL; if (qdev->device_id == QL3022_DEVICE_ID) @@ -2095,7 +2102,7 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev, } skb2->protocol = eth_type_trans(skb2, qdev->ndev); - netif_receive_skb(skb2); + napi_gro_receive(&qdev->napi, skb2); ndev->stats.rx_packets++; ndev->stats.rx_bytes += length; lrg_buf_cb2->skb = NULL; @@ -2105,8 +2112,7 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev, ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2); } -static int ql_tx_rx_clean(struct ql3_adapter *qdev, - int *tx_cleaned, int *rx_cleaned, int work_to_do) +static int ql_tx_rx_clean(struct ql3_adapter *qdev, int budget) { struct net_rsp_iocb *net_rsp; struct net_device *ndev = qdev->ndev; @@ -2114,7 +2120,7 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev, /* While there are entries in the completion queue. */ while ((le32_to_cpu(*(qdev->prsp_producer_index)) != - qdev->rsp_consumer_index) && (work_done < work_to_do)) { + qdev->rsp_consumer_index) && (work_done < budget)) { net_rsp = qdev->rsp_current; rmb(); @@ -2130,21 +2136,20 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev, case OPCODE_OB_MAC_IOCB_FN2: ql_process_mac_tx_intr(qdev, (struct ob_mac_iocb_rsp *) net_rsp); - (*tx_cleaned)++; break; case OPCODE_IB_MAC_IOCB: case OPCODE_IB_3032_MAC_IOCB: ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *) net_rsp); - (*rx_cleaned)++; + work_done++; break; case OPCODE_IB_IP_IOCB: case OPCODE_IB_3032_IP_IOCB: ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *) net_rsp); - (*rx_cleaned)++; + work_done++; break; default: { u32 *tmp = (u32 *)net_rsp; @@ -2169,7 +2174,6 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev, qdev->rsp_current++; } - work_done = *tx_cleaned + *rx_cleaned; } return work_done; @@ -2178,25 +2182,25 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev, static int ql_poll(struct napi_struct *napi, int budget) { struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi); - int rx_cleaned = 0, tx_cleaned = 0; - unsigned long hw_flags; struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; + int work_done; - ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, budget); + work_done = ql_tx_rx_clean(qdev, budget); - if (tx_cleaned + rx_cleaned != budget) { - spin_lock_irqsave(&qdev->hw_lock, hw_flags); - __napi_complete(napi); + if (work_done < budget && napi_complete_done(napi, work_done)) { + unsigned long flags; + + spin_lock_irqsave(&qdev->hw_lock, flags); ql_update_small_bufq_prod_index(qdev); ql_update_lrg_bufq_prod_index(qdev); writel(qdev->rsp_consumer_index, &port_regs->CommonRegs.rspQConsumerIndex); - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + spin_unlock_irqrestore(&qdev->hw_lock, flags); ql_enable_interrupts(qdev); } - return tx_cleaned + rx_cleaned; + return work_done; } static irqreturn_t ql3xxx_isr(int irq, void *dev_id) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index bdbcd2b088a0..718bf58a7da6 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -178,7 +178,7 @@ const u32 qlcnic_83xx_reg_tbl[] = { 0x3540, /* Device state, DRV_REG1 */ 0x3544, /* Driver state, DRV_REG2 */ 0x3548, /* Driver scratch, DRV_REG3 */ - 0x354C, /* Device partiton info, DRV_REG4 */ + 0x354C, /* Device partition info, DRV_REG4 */ 0x3524, /* Driver IDC ver, DRV_REG5 */ 0x3550, /* FW_VER_MAJOR */ 0x3554, /* FW_VER_MINOR */ @@ -3252,12 +3252,13 @@ out: return config; } -int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter, - struct ethtool_cmd *ecmd) +int qlcnic_83xx_get_link_ksettings(struct qlcnic_adapter *adapter, + struct ethtool_link_ksettings *ecmd) { struct qlcnic_hardware_context *ahw = adapter->ahw; u32 config = 0; int status = 0; + u32 supported, advertising; if (!test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state)) { /* Get port configuration info */ @@ -3271,45 +3272,48 @@ int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter, ahw->board_type = QLCNIC_BRDTYPE_83XX_10G; if (netif_running(adapter->netdev) && ahw->has_link_events) { - ethtool_cmd_speed_set(ecmd, ahw->link_speed); - ecmd->duplex = ahw->link_duplex; - ecmd->autoneg = ahw->link_autoneg; + ecmd->base.speed = ahw->link_speed; + ecmd->base.duplex = ahw->link_duplex; + ecmd->base.autoneg = ahw->link_autoneg; } else { - ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); - ecmd->duplex = DUPLEX_UNKNOWN; - ecmd->autoneg = AUTONEG_DISABLE; + ecmd->base.speed = SPEED_UNKNOWN; + ecmd->base.duplex = DUPLEX_UNKNOWN; + ecmd->base.autoneg = AUTONEG_DISABLE; } - ecmd->supported = (SUPPORTED_10baseT_Full | + supported = (SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Full | SUPPORTED_10000baseT_Full | SUPPORTED_Autoneg); - if (ecmd->autoneg == AUTONEG_ENABLE) { + ethtool_convert_link_mode_to_legacy_u32(&advertising, + ecmd->link_modes.advertising); + + if (ecmd->base.autoneg == AUTONEG_ENABLE) { if (ahw->port_config & QLC_83XX_10_CAPABLE) - ecmd->advertising |= SUPPORTED_10baseT_Full; + advertising |= SUPPORTED_10baseT_Full; if (ahw->port_config & QLC_83XX_100_CAPABLE) - ecmd->advertising |= SUPPORTED_100baseT_Full; + advertising |= SUPPORTED_100baseT_Full; if (ahw->port_config & QLC_83XX_1G_CAPABLE) - ecmd->advertising |= SUPPORTED_1000baseT_Full; + advertising |= SUPPORTED_1000baseT_Full; if (ahw->port_config & QLC_83XX_10G_CAPABLE) - ecmd->advertising |= SUPPORTED_10000baseT_Full; + advertising |= SUPPORTED_10000baseT_Full; if (ahw->port_config & QLC_83XX_AUTONEG_ENABLE) - ecmd->advertising |= ADVERTISED_Autoneg; + advertising |= ADVERTISED_Autoneg; } else { switch (ahw->link_speed) { case SPEED_10: - ecmd->advertising = SUPPORTED_10baseT_Full; + advertising = SUPPORTED_10baseT_Full; break; case SPEED_100: - ecmd->advertising = SUPPORTED_100baseT_Full; + advertising = SUPPORTED_100baseT_Full; break; case SPEED_1000: - ecmd->advertising = SUPPORTED_1000baseT_Full; + advertising = SUPPORTED_1000baseT_Full; break; case SPEED_10000: - ecmd->advertising = SUPPORTED_10000baseT_Full; + advertising = SUPPORTED_10000baseT_Full; break; default: break; @@ -3319,56 +3323,58 @@ int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter, switch (ahw->supported_type) { case PORT_FIBRE: - ecmd->supported |= SUPPORTED_FIBRE; - ecmd->advertising |= ADVERTISED_FIBRE; - ecmd->port = PORT_FIBRE; - ecmd->transceiver = XCVR_EXTERNAL; + supported |= SUPPORTED_FIBRE; + advertising |= ADVERTISED_FIBRE; + ecmd->base.port = PORT_FIBRE; break; case PORT_TP: - ecmd->supported |= SUPPORTED_TP; - ecmd->advertising |= ADVERTISED_TP; - ecmd->port = PORT_TP; - ecmd->transceiver = XCVR_INTERNAL; + supported |= SUPPORTED_TP; + advertising |= ADVERTISED_TP; + ecmd->base.port = PORT_TP; break; case PORT_DA: - ecmd->supported |= SUPPORTED_FIBRE; - ecmd->advertising |= ADVERTISED_FIBRE; - ecmd->port = PORT_DA; - ecmd->transceiver = XCVR_EXTERNAL; + supported |= SUPPORTED_FIBRE; + advertising |= ADVERTISED_FIBRE; + ecmd->base.port = PORT_DA; break; default: - ecmd->supported |= SUPPORTED_FIBRE; - ecmd->advertising |= ADVERTISED_FIBRE; - ecmd->port = PORT_OTHER; - ecmd->transceiver = XCVR_EXTERNAL; + supported |= SUPPORTED_FIBRE; + advertising |= ADVERTISED_FIBRE; + ecmd->base.port = PORT_OTHER; break; } - ecmd->phy_address = ahw->physical_port; + ecmd->base.phy_address = ahw->physical_port; + + ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.advertising, + advertising); + return status; } -int qlcnic_83xx_set_settings(struct qlcnic_adapter *adapter, - struct ethtool_cmd *ecmd) +int qlcnic_83xx_set_link_ksettings(struct qlcnic_adapter *adapter, + const struct ethtool_link_ksettings *ecmd) { struct qlcnic_hardware_context *ahw = adapter->ahw; u32 config = adapter->ahw->port_config; int status = 0; /* 83xx devices do not support Half duplex */ - if (ecmd->duplex == DUPLEX_HALF) { - netdev_info(adapter->netdev, - "Half duplex mode not supported\n"); - return -EINVAL; + if (ecmd->base.duplex == DUPLEX_HALF) { + netdev_info(adapter->netdev, + "Half duplex mode not supported\n"); + return -EINVAL; } - if (ecmd->autoneg) { + if (ecmd->base.autoneg) { ahw->port_config |= QLC_83XX_AUTONEG_ENABLE; ahw->port_config |= (QLC_83XX_100_CAPABLE | QLC_83XX_1G_CAPABLE | QLC_83XX_10G_CAPABLE); } else { /* force speed */ ahw->port_config &= ~QLC_83XX_AUTONEG_ENABLE; - switch (ethtool_cmd_speed(ecmd)) { + switch (ecmd->base.speed) { case SPEED_10: ahw->port_config &= ~(QLC_83XX_100_CAPABLE | QLC_83XX_1G_CAPABLE | diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h index 331ae2c20f40..3dfe8e27b51c 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h @@ -628,8 +628,10 @@ int qlcnic_83xx_set_port_eswitch_status(struct qlcnic_adapter *, int, int *); void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *); void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data); int qlcnic_83xx_extend_md_capab(struct qlcnic_adapter *); -int qlcnic_83xx_get_settings(struct qlcnic_adapter *, struct ethtool_cmd *); -int qlcnic_83xx_set_settings(struct qlcnic_adapter *, struct ethtool_cmd *); +int qlcnic_83xx_get_link_ksettings(struct qlcnic_adapter *adapter, + struct ethtool_link_ksettings *ecmd); +int qlcnic_83xx_set_link_ksettings(struct qlcnic_adapter *adapter, + const struct ethtool_link_ksettings *ecmd); void qlcnic_83xx_get_pauseparam(struct qlcnic_adapter *, struct ethtool_pauseparam *); int qlcnic_83xx_set_pauseparam(struct qlcnic_adapter *, diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c index daf05155b732..d344e9d43832 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c @@ -573,8 +573,10 @@ int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter) ptr = (__le32 *)dma_alloc_coherent(&pdev->dev, sizeof(u32), &tx_ring->hw_cons_phys_addr, GFP_KERNEL); - if (ptr == NULL) - return -ENOMEM; + if (ptr == NULL) { + err = -ENOMEM; + goto err_out_free; + } tx_ring->hw_consumer = ptr; /* cmd desc ring */ diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c index 0a2318cad34d..9a869c15d8bf 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c @@ -285,42 +285,43 @@ qlcnic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) sizeof(drvinfo->version)); } -static int qlcnic_82xx_get_settings(struct qlcnic_adapter *adapter, - struct ethtool_cmd *ecmd) +static int qlcnic_82xx_get_link_ksettings(struct qlcnic_adapter *adapter, + struct ethtool_link_ksettings *ecmd) { struct qlcnic_hardware_context *ahw = adapter->ahw; u32 speed, reg; int check_sfp_module = 0, err = 0; u16 pcifn = ahw->pci_func; + u32 supported, advertising; /* read which mode */ if (adapter->ahw->port_type == QLCNIC_GBE) { - ecmd->supported = (SUPPORTED_10baseT_Half | + supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full); - ecmd->advertising = (ADVERTISED_100baseT_Half | + advertising = (ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full); - ethtool_cmd_speed_set(ecmd, adapter->ahw->link_speed); - ecmd->duplex = adapter->ahw->link_duplex; - ecmd->autoneg = adapter->ahw->link_autoneg; + ecmd->base.speed = adapter->ahw->link_speed; + ecmd->base.duplex = adapter->ahw->link_duplex; + ecmd->base.autoneg = adapter->ahw->link_autoneg; } else if (adapter->ahw->port_type == QLCNIC_XGBE) { u32 val = 0; val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR, &err); if (val == QLCNIC_PORT_MODE_802_3_AP) { - ecmd->supported = SUPPORTED_1000baseT_Full; - ecmd->advertising = ADVERTISED_1000baseT_Full; + supported = SUPPORTED_1000baseT_Full; + advertising = ADVERTISED_1000baseT_Full; } else { - ecmd->supported = SUPPORTED_10000baseT_Full; - ecmd->advertising = ADVERTISED_10000baseT_Full; + supported = SUPPORTED_10000baseT_Full; + advertising = ADVERTISED_10000baseT_Full; } if (netif_running(adapter->netdev) && ahw->has_link_events) { @@ -331,73 +332,72 @@ static int qlcnic_82xx_get_settings(struct qlcnic_adapter *adapter, ahw->link_speed = speed * P3P_LINK_SPEED_MHZ; } - ethtool_cmd_speed_set(ecmd, ahw->link_speed); - ecmd->autoneg = ahw->link_autoneg; - ecmd->duplex = ahw->link_duplex; + ecmd->base.speed = ahw->link_speed; + ecmd->base.autoneg = ahw->link_autoneg; + ecmd->base.duplex = ahw->link_duplex; goto skip; } - ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); - ecmd->duplex = DUPLEX_UNKNOWN; - ecmd->autoneg = AUTONEG_DISABLE; + ecmd->base.speed = SPEED_UNKNOWN; + ecmd->base.duplex = DUPLEX_UNKNOWN; + ecmd->base.autoneg = AUTONEG_DISABLE; } else return -EIO; skip: - ecmd->phy_address = adapter->ahw->physical_port; - ecmd->transceiver = XCVR_EXTERNAL; + ecmd->base.phy_address = adapter->ahw->physical_port; switch (adapter->ahw->board_type) { case QLCNIC_BRDTYPE_P3P_REF_QG: case QLCNIC_BRDTYPE_P3P_4_GB: case QLCNIC_BRDTYPE_P3P_4_GB_MM: - ecmd->supported |= SUPPORTED_Autoneg; - ecmd->advertising |= ADVERTISED_Autoneg; + supported |= SUPPORTED_Autoneg; + advertising |= ADVERTISED_Autoneg; case QLCNIC_BRDTYPE_P3P_10G_CX4: case QLCNIC_BRDTYPE_P3P_10G_CX4_LP: case QLCNIC_BRDTYPE_P3P_10000_BASE_T: - ecmd->supported |= SUPPORTED_TP; - ecmd->advertising |= ADVERTISED_TP; - ecmd->port = PORT_TP; - ecmd->autoneg = adapter->ahw->link_autoneg; + supported |= SUPPORTED_TP; + advertising |= ADVERTISED_TP; + ecmd->base.port = PORT_TP; + ecmd->base.autoneg = adapter->ahw->link_autoneg; break; case QLCNIC_BRDTYPE_P3P_IMEZ: case QLCNIC_BRDTYPE_P3P_XG_LOM: case QLCNIC_BRDTYPE_P3P_HMEZ: - ecmd->supported |= SUPPORTED_MII; - ecmd->advertising |= ADVERTISED_MII; - ecmd->port = PORT_MII; - ecmd->autoneg = AUTONEG_DISABLE; + supported |= SUPPORTED_MII; + advertising |= ADVERTISED_MII; + ecmd->base.port = PORT_MII; + ecmd->base.autoneg = AUTONEG_DISABLE; break; case QLCNIC_BRDTYPE_P3P_10G_SFP_PLUS: case QLCNIC_BRDTYPE_P3P_10G_SFP_CT: case QLCNIC_BRDTYPE_P3P_10G_SFP_QT: - ecmd->advertising |= ADVERTISED_TP; - ecmd->supported |= SUPPORTED_TP; + advertising |= ADVERTISED_TP; + supported |= SUPPORTED_TP; check_sfp_module = netif_running(adapter->netdev) && ahw->has_link_events; case QLCNIC_BRDTYPE_P3P_10G_XFP: - ecmd->supported |= SUPPORTED_FIBRE; - ecmd->advertising |= ADVERTISED_FIBRE; - ecmd->port = PORT_FIBRE; - ecmd->autoneg = AUTONEG_DISABLE; + supported |= SUPPORTED_FIBRE; + advertising |= ADVERTISED_FIBRE; + ecmd->base.port = PORT_FIBRE; + ecmd->base.autoneg = AUTONEG_DISABLE; break; case QLCNIC_BRDTYPE_P3P_10G_TP: if (adapter->ahw->port_type == QLCNIC_XGBE) { - ecmd->autoneg = AUTONEG_DISABLE; - ecmd->supported |= (SUPPORTED_FIBRE | SUPPORTED_TP); - ecmd->advertising |= + ecmd->base.autoneg = AUTONEG_DISABLE; + supported |= (SUPPORTED_FIBRE | SUPPORTED_TP); + advertising |= (ADVERTISED_FIBRE | ADVERTISED_TP); - ecmd->port = PORT_FIBRE; + ecmd->base.port = PORT_FIBRE; check_sfp_module = netif_running(adapter->netdev) && ahw->has_link_events; } else { - ecmd->autoneg = AUTONEG_ENABLE; - ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg); - ecmd->advertising |= + ecmd->base.autoneg = AUTONEG_ENABLE; + supported |= (SUPPORTED_TP | SUPPORTED_Autoneg); + advertising |= (ADVERTISED_TP | ADVERTISED_Autoneg); - ecmd->port = PORT_TP; + ecmd->base.port = PORT_TP; } break; default: @@ -412,47 +412,52 @@ skip: case LINKEVENT_MODULE_OPTICAL_SRLR: case LINKEVENT_MODULE_OPTICAL_LRM: case LINKEVENT_MODULE_OPTICAL_SFP_1G: - ecmd->port = PORT_FIBRE; + ecmd->base.port = PORT_FIBRE; break; case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE: case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN: case LINKEVENT_MODULE_TWINAX: - ecmd->port = PORT_TP; + ecmd->base.port = PORT_TP; break; default: - ecmd->port = PORT_OTHER; + ecmd->base.port = PORT_OTHER; } } + ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.advertising, + advertising); + return 0; } -static int qlcnic_get_settings(struct net_device *dev, - struct ethtool_cmd *ecmd) +static int qlcnic_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *ecmd) { struct qlcnic_adapter *adapter = netdev_priv(dev); if (qlcnic_82xx_check(adapter)) - return qlcnic_82xx_get_settings(adapter, ecmd); + return qlcnic_82xx_get_link_ksettings(adapter, ecmd); else if (qlcnic_83xx_check(adapter)) - return qlcnic_83xx_get_settings(adapter, ecmd); + return qlcnic_83xx_get_link_ksettings(adapter, ecmd); return -EIO; } static int qlcnic_set_port_config(struct qlcnic_adapter *adapter, - struct ethtool_cmd *ecmd) + const struct ethtool_link_ksettings *ecmd) { u32 ret = 0, config = 0; /* read which mode */ - if (ecmd->duplex) + if (ecmd->base.duplex) config |= 0x1; - if (ecmd->autoneg) + if (ecmd->base.autoneg) config |= 0x2; - switch (ethtool_cmd_speed(ecmd)) { + switch (ecmd->base.speed) { case SPEED_10: config |= (0 << 8); break; @@ -475,7 +480,8 @@ static int qlcnic_set_port_config(struct qlcnic_adapter *adapter, return ret; } -static int qlcnic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +static int qlcnic_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *ecmd) { u32 ret = 0; struct qlcnic_adapter *adapter = netdev_priv(dev); @@ -484,16 +490,16 @@ static int qlcnic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) return -EOPNOTSUPP; if (qlcnic_83xx_check(adapter)) - ret = qlcnic_83xx_set_settings(adapter, ecmd); + ret = qlcnic_83xx_set_link_ksettings(adapter, ecmd); else ret = qlcnic_set_port_config(adapter, ecmd); if (!ret) return ret; - adapter->ahw->link_speed = ethtool_cmd_speed(ecmd); - adapter->ahw->link_duplex = ecmd->duplex; - adapter->ahw->link_autoneg = ecmd->autoneg; + adapter->ahw->link_speed = ecmd->base.speed; + adapter->ahw->link_duplex = ecmd->base.duplex; + adapter->ahw->link_autoneg = ecmd->base.autoneg; if (!netif_running(dev)) return 0; @@ -1822,8 +1828,6 @@ qlcnic_set_dump(struct net_device *netdev, struct ethtool_dump *val) } const struct ethtool_ops qlcnic_ethtool_ops = { - .get_settings = qlcnic_get_settings, - .set_settings = qlcnic_set_settings, .get_drvinfo = qlcnic_get_drvinfo, .get_regs_len = qlcnic_get_regs_len, .get_regs = qlcnic_get_regs, @@ -1850,10 +1854,11 @@ const struct ethtool_ops qlcnic_ethtool_ops = { .get_dump_flag = qlcnic_get_dump_flag, .get_dump_data = qlcnic_get_dump_data, .set_dump = qlcnic_set_dump, + .get_link_ksettings = qlcnic_get_link_ksettings, + .set_link_ksettings = qlcnic_set_link_ksettings, }; const struct ethtool_ops qlcnic_sriov_vf_ethtool_ops = { - .get_settings = qlcnic_get_settings, .get_drvinfo = qlcnic_get_drvinfo, .get_regs_len = qlcnic_get_regs_len, .get_regs = qlcnic_get_regs, @@ -1872,12 +1877,13 @@ const struct ethtool_ops qlcnic_sriov_vf_ethtool_ops = { .set_coalesce = qlcnic_set_intr_coalesce, .set_msglevel = qlcnic_set_msglevel, .get_msglevel = qlcnic_get_msglevel, + .get_link_ksettings = qlcnic_get_link_ksettings, }; const struct ethtool_ops qlcnic_ethtool_failed_ops = { - .get_settings = qlcnic_get_settings, .get_drvinfo = qlcnic_get_drvinfo, .set_msglevel = qlcnic_set_msglevel, .get_msglevel = qlcnic_get_msglevel, .set_dump = qlcnic_set_dump, + .get_link_ksettings = qlcnic_get_link_ksettings, }; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index fedd7366713c..84dd83031a1b 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c @@ -975,7 +975,7 @@ static int qlcnic_poll(struct napi_struct *napi, int budget) work_done = budget; if (work_done < budget) { - napi_complete(&sds_ring->napi); + napi_complete_done(&sds_ring->napi, work_done); if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) { qlcnic_enable_sds_intr(adapter, sds_ring); qlcnic_enable_tx_intr(adapter, tx_ring); @@ -1019,7 +1019,7 @@ static int qlcnic_rx_poll(struct napi_struct *napi, int budget) work_done = qlcnic_process_rcv_ring(sds_ring, budget); if (work_done < budget) { - napi_complete(&sds_ring->napi); + napi_complete_done(&sds_ring->napi, work_done); if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) qlcnic_enable_sds_intr(adapter, sds_ring); } @@ -1966,7 +1966,7 @@ static int qlcnic_83xx_msix_sriov_vf_poll(struct napi_struct *napi, int budget) work_done = budget; if (work_done < budget) { - napi_complete(&sds_ring->napi); + napi_complete_done(&sds_ring->napi, work_done); qlcnic_enable_sds_intr(adapter, sds_ring); } @@ -1994,7 +1994,7 @@ static int qlcnic_83xx_poll(struct napi_struct *napi, int budget) work_done = budget; if (work_done < budget) { - napi_complete(&sds_ring->napi); + napi_complete_done(&sds_ring->napi, work_done); qlcnic_enable_sds_intr(adapter, sds_ring); } @@ -2032,7 +2032,7 @@ static int qlcnic_83xx_rx_poll(struct napi_struct *napi, int budget) adapter = sds_ring->adapter; work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget); if (work_done < budget) { - napi_complete(&sds_ring->napi); + napi_complete_done(&sds_ring->napi, work_done); if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) qlcnic_enable_sds_intr(adapter, sds_ring); } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 4c0cce962585..b6628aaa6e4a 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -4220,7 +4220,7 @@ recheck: if (dev == NULL) goto done; - if (dev->priv_flags & IFF_802_1Q_VLAN) { + if (is_vlan_dev(dev)) { dev = vlan_dev_real_dev(dev); goto recheck; } @@ -4256,7 +4256,7 @@ recheck: if (dev == NULL) goto done; - if (dev->priv_flags & IFF_802_1Q_VLAN) { + if (is_vlan_dev(dev)) { dev = vlan_dev_real_dev(dev); goto recheck; } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c index ccbb04503b27..73027a6c06c7 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c @@ -1192,56 +1192,56 @@ static struct device_attribute dev_attr_beacon = { .store = qlcnic_store_beacon, }; -static struct bin_attribute bin_attr_crb = { +static const struct bin_attribute bin_attr_crb = { .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)}, .size = 0, .read = qlcnic_sysfs_read_crb, .write = qlcnic_sysfs_write_crb, }; -static struct bin_attribute bin_attr_mem = { +static const struct bin_attribute bin_attr_mem = { .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)}, .size = 0, .read = qlcnic_sysfs_read_mem, .write = qlcnic_sysfs_write_mem, }; -static struct bin_attribute bin_attr_npar_config = { +static const struct bin_attribute bin_attr_npar_config = { .attr = {.name = "npar_config", .mode = (S_IRUGO | S_IWUSR)}, .size = 0, .read = qlcnic_sysfs_read_npar_config, .write = qlcnic_sysfs_write_npar_config, }; -static struct bin_attribute bin_attr_pci_config = { +static const struct bin_attribute bin_attr_pci_config = { .attr = {.name = "pci_config", .mode = (S_IRUGO | S_IWUSR)}, .size = 0, .read = qlcnic_sysfs_read_pci_config, .write = NULL, }; -static struct bin_attribute bin_attr_port_stats = { +static const struct bin_attribute bin_attr_port_stats = { .attr = {.name = "port_stats", .mode = (S_IRUGO | S_IWUSR)}, .size = 0, .read = qlcnic_sysfs_get_port_stats, .write = qlcnic_sysfs_clear_port_stats, }; -static struct bin_attribute bin_attr_esw_stats = { +static const struct bin_attribute bin_attr_esw_stats = { .attr = {.name = "esw_stats", .mode = (S_IRUGO | S_IWUSR)}, .size = 0, .read = qlcnic_sysfs_get_esw_stats, .write = qlcnic_sysfs_clear_esw_stats, }; -static struct bin_attribute bin_attr_esw_config = { +static const struct bin_attribute bin_attr_esw_config = { .attr = {.name = "esw_config", .mode = (S_IRUGO | S_IWUSR)}, .size = 0, .read = qlcnic_sysfs_read_esw_config, .write = qlcnic_sysfs_write_esw_config, }; -static struct bin_attribute bin_attr_pm_config = { +static const struct bin_attribute bin_attr_pm_config = { .attr = {.name = "pm_config", .mode = (S_IRUGO | S_IWUSR)}, .size = 0, .read = qlcnic_sysfs_read_pm_config, diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c index 5dade1fd08b8..31f40148fa5c 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c @@ -375,28 +375,34 @@ ql_get_ethtool_stats(struct net_device *ndev, } } -static int ql_get_settings(struct net_device *ndev, - struct ethtool_cmd *ecmd) +static int ql_get_link_ksettings(struct net_device *ndev, + struct ethtool_link_ksettings *ecmd) { struct ql_adapter *qdev = netdev_priv(ndev); + u32 supported, advertising; + + supported = SUPPORTED_10000baseT_Full; + advertising = ADVERTISED_10000baseT_Full; - ecmd->supported = SUPPORTED_10000baseT_Full; - ecmd->advertising = ADVERTISED_10000baseT_Full; - ecmd->transceiver = XCVR_EXTERNAL; if ((qdev->link_status & STS_LINK_TYPE_MASK) == STS_LINK_TYPE_10GBASET) { - ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg); - ecmd->advertising |= (ADVERTISED_TP | ADVERTISED_Autoneg); - ecmd->port = PORT_TP; - ecmd->autoneg = AUTONEG_ENABLE; + supported |= (SUPPORTED_TP | SUPPORTED_Autoneg); + advertising |= (ADVERTISED_TP | ADVERTISED_Autoneg); + ecmd->base.port = PORT_TP; + ecmd->base.autoneg = AUTONEG_ENABLE; } else { - ecmd->supported |= SUPPORTED_FIBRE; - ecmd->advertising |= ADVERTISED_FIBRE; - ecmd->port = PORT_FIBRE; + supported |= SUPPORTED_FIBRE; + advertising |= ADVERTISED_FIBRE; + ecmd->base.port = PORT_FIBRE; } - ethtool_cmd_speed_set(ecmd, SPEED_10000); - ecmd->duplex = DUPLEX_FULL; + ecmd->base.speed = SPEED_10000; + ecmd->base.duplex = DUPLEX_FULL; + + ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.advertising, + advertising); return 0; } @@ -706,7 +712,6 @@ static void ql_set_msglevel(struct net_device *ndev, u32 value) } const struct ethtool_ops qlge_ethtool_ops = { - .get_settings = ql_get_settings, .get_drvinfo = ql_get_drvinfo, .get_wol = ql_get_wol, .set_wol = ql_set_wol, @@ -724,5 +729,6 @@ const struct ethtool_ops qlge_ethtool_ops = { .get_sset_count = ql_get_sset_count, .get_strings = ql_get_strings, .get_ethtool_stats = ql_get_ethtool_stats, + .get_link_ksettings = ql_get_link_ksettings, }; diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index 1409412ab39d..e9e647072596 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -2334,7 +2334,7 @@ static int ql_napi_poll_msix(struct napi_struct *napi, int budget) } if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); ql_enable_completion_interrupt(qdev, rx_ring->irq); } return work_done; diff --git a/drivers/net/ethernet/qualcomm/emac/Makefile b/drivers/net/ethernet/qualcomm/emac/Makefile index 7a6687982dae..fc57cedf4c0c 100644 --- a/drivers/net/ethernet/qualcomm/emac/Makefile +++ b/drivers/net/ethernet/qualcomm/emac/Makefile @@ -4,6 +4,6 @@ obj-$(CONFIG_QCOM_EMAC) += qcom-emac.o -qcom-emac-objs := emac.o emac-mac.o emac-phy.o emac-sgmii.o \ +qcom-emac-objs := emac.o emac-mac.o emac-phy.o emac-sgmii.o emac-ethtool.o \ emac-sgmii-fsm9900.o emac-sgmii-qdf2432.o \ emac-sgmii-qdf2400.o diff --git a/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c b/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c new file mode 100644 index 000000000000..bbe24639aa5a --- /dev/null +++ b/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c @@ -0,0 +1,261 @@ +/* Copyright (c) 2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/ethtool.h> +#include <linux/phy.h> + +#include "emac.h" + +static const char * const emac_ethtool_stat_strings[] = { + "rx_ok", + "rx_bcast", + "rx_mcast", + "rx_pause", + "rx_ctrl", + "rx_fcs_err", + "rx_len_err", + "rx_byte_cnt", + "rx_runt", + "rx_frag", + "rx_sz_64", + "rx_sz_65_127", + "rx_sz_128_255", + "rx_sz_256_511", + "rx_sz_512_1023", + "rx_sz_1024_1518", + "rx_sz_1519_max", + "rx_sz_ov", + "rx_rxf_ov", + "rx_align_err", + "rx_bcast_byte_cnt", + "rx_mcast_byte_cnt", + "rx_err_addr", + "rx_crc_align", + "rx_jabbers", + "tx_ok", + "tx_bcast", + "tx_mcast", + "tx_pause", + "tx_exc_defer", + "tx_ctrl", + "tx_defer", + "tx_byte_cnt", + "tx_sz_64", + "tx_sz_65_127", + "tx_sz_128_255", + "tx_sz_256_511", + "tx_sz_512_1023", + "tx_sz_1024_1518", + "tx_sz_1519_max", + "tx_1_col", + "tx_2_col", + "tx_late_col", + "tx_abort_col", + "tx_underrun", + "tx_rd_eop", + "tx_len_err", + "tx_trunc", + "tx_bcast_byte", + "tx_mcast_byte", + "tx_col", +}; + +#define EMAC_STATS_LEN ARRAY_SIZE(emac_ethtool_stat_strings) + +static u32 emac_get_msglevel(struct net_device *netdev) +{ + struct emac_adapter *adpt = netdev_priv(netdev); + + return adpt->msg_enable; +} + +static void emac_set_msglevel(struct net_device *netdev, u32 data) +{ + struct emac_adapter *adpt = netdev_priv(netdev); + + adpt->msg_enable = data; +} + +static int emac_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return EMAC_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void emac_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + unsigned int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < EMAC_STATS_LEN; i++) { + strlcpy(data, emac_ethtool_stat_strings[i], + ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } + break; + } +} + +static void emac_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, + u64 *data) +{ + struct emac_adapter *adpt = netdev_priv(netdev); + + spin_lock(&adpt->stats.lock); + + emac_update_hw_stats(adpt); + memcpy(data, &adpt->stats, EMAC_STATS_LEN * sizeof(u64)); + + spin_unlock(&adpt->stats.lock); +} + +static int emac_nway_reset(struct net_device *netdev) +{ + struct phy_device *phydev = netdev->phydev; + + if (!phydev) + return -ENODEV; + + return genphy_restart_aneg(phydev); +} + +static void emac_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct emac_adapter *adpt = netdev_priv(netdev); + + ring->rx_max_pending = EMAC_MAX_RX_DESCS; + ring->tx_max_pending = EMAC_MAX_TX_DESCS; + ring->rx_pending = adpt->rx_desc_cnt; + ring->tx_pending = adpt->tx_desc_cnt; +} + +static int emac_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct emac_adapter *adpt = netdev_priv(netdev); + + /* We don't have separate queues/rings for small/large frames, so + * reject any attempt to specify those values separately. + */ + if (ring->rx_mini_pending || ring->rx_jumbo_pending) + return -EINVAL; + + adpt->tx_desc_cnt = + clamp_val(ring->tx_pending, EMAC_MIN_TX_DESCS, EMAC_MAX_TX_DESCS); + + adpt->rx_desc_cnt = + clamp_val(ring->rx_pending, EMAC_MIN_RX_DESCS, EMAC_MAX_RX_DESCS); + + if (netif_running(netdev)) + return emac_reinit_locked(adpt); + + return 0; +} + +static void emac_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct emac_adapter *adpt = netdev_priv(netdev); + + pause->autoneg = adpt->automatic ? AUTONEG_ENABLE : AUTONEG_DISABLE; + pause->rx_pause = adpt->rx_flow_control ? 1 : 0; + pause->tx_pause = adpt->tx_flow_control ? 1 : 0; +} + +static int emac_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct emac_adapter *adpt = netdev_priv(netdev); + + adpt->automatic = pause->autoneg == AUTONEG_ENABLE; + adpt->rx_flow_control = pause->rx_pause != 0; + adpt->tx_flow_control = pause->tx_pause != 0; + + if (netif_running(netdev)) + return emac_reinit_locked(adpt); + + return 0; +} + +/* Selected registers that might want to track during runtime. */ +static const u16 emac_regs[] = { + EMAC_DMA_MAS_CTRL, + EMAC_MAC_CTRL, + EMAC_TXQ_CTRL_0, + EMAC_RXQ_CTRL_0, + EMAC_DMA_CTRL, + EMAC_INT_MASK, + EMAC_AXI_MAST_CTRL, + EMAC_CORE_HW_VERSION, + EMAC_MISC_CTRL, +}; + +/* Every time emac_regs[] above is changed, increase this version number. */ +#define EMAC_REGS_VERSION 0 + +#define EMAC_MAX_REG_SIZE ARRAY_SIZE(emac_regs) + +static void emac_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *buff) +{ + struct emac_adapter *adpt = netdev_priv(netdev); + u32 *val = buff; + unsigned int i; + + regs->version = EMAC_REGS_VERSION; + regs->len = EMAC_MAX_REG_SIZE * sizeof(u32); + + for (i = 0; i < EMAC_MAX_REG_SIZE; i++) + val[i] = readl(adpt->base + emac_regs[i]); +} + +static int emac_get_regs_len(struct net_device *netdev) +{ + return EMAC_MAX_REG_SIZE * sizeof(u32); +} + +static const struct ethtool_ops emac_ethtool_ops = { + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, + + .get_msglevel = emac_get_msglevel, + .set_msglevel = emac_set_msglevel, + + .get_sset_count = emac_get_sset_count, + .get_strings = emac_get_strings, + .get_ethtool_stats = emac_get_ethtool_stats, + + .get_ringparam = emac_get_ringparam, + .set_ringparam = emac_set_ringparam, + + .get_pauseparam = emac_get_pauseparam, + .set_pauseparam = emac_set_pauseparam, + + .nway_reset = emac_nway_reset, + + .get_link = ethtool_op_get_link, + + .get_regs_len = emac_get_regs_len, + .get_regs = emac_get_regs, +}; + +void emac_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &emac_ethtool_ops; +} diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c index 0b4deb31e742..cc065ffbe4b5 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c @@ -25,58 +25,6 @@ #include "emac.h" #include "emac-sgmii.h" -/* EMAC base register offsets */ -#define EMAC_MAC_CTRL 0x001480 -#define EMAC_WOL_CTRL0 0x0014a0 -#define EMAC_RSS_KEY0 0x0014b0 -#define EMAC_H1TPD_BASE_ADDR_LO 0x0014e0 -#define EMAC_H2TPD_BASE_ADDR_LO 0x0014e4 -#define EMAC_H3TPD_BASE_ADDR_LO 0x0014e8 -#define EMAC_INTER_SRAM_PART9 0x001534 -#define EMAC_DESC_CTRL_0 0x001540 -#define EMAC_DESC_CTRL_1 0x001544 -#define EMAC_DESC_CTRL_2 0x001550 -#define EMAC_DESC_CTRL_10 0x001554 -#define EMAC_DESC_CTRL_12 0x001558 -#define EMAC_DESC_CTRL_13 0x00155c -#define EMAC_DESC_CTRL_3 0x001560 -#define EMAC_DESC_CTRL_4 0x001564 -#define EMAC_DESC_CTRL_5 0x001568 -#define EMAC_DESC_CTRL_14 0x00156c -#define EMAC_DESC_CTRL_15 0x001570 -#define EMAC_DESC_CTRL_16 0x001574 -#define EMAC_DESC_CTRL_6 0x001578 -#define EMAC_DESC_CTRL_8 0x001580 -#define EMAC_DESC_CTRL_9 0x001584 -#define EMAC_DESC_CTRL_11 0x001588 -#define EMAC_TXQ_CTRL_0 0x001590 -#define EMAC_TXQ_CTRL_1 0x001594 -#define EMAC_TXQ_CTRL_2 0x001598 -#define EMAC_RXQ_CTRL_0 0x0015a0 -#define EMAC_RXQ_CTRL_1 0x0015a4 -#define EMAC_RXQ_CTRL_2 0x0015a8 -#define EMAC_RXQ_CTRL_3 0x0015ac -#define EMAC_BASE_CPU_NUMBER 0x0015b8 -#define EMAC_DMA_CTRL 0x0015c0 -#define EMAC_MAILBOX_0 0x0015e0 -#define EMAC_MAILBOX_5 0x0015e4 -#define EMAC_MAILBOX_6 0x0015e8 -#define EMAC_MAILBOX_13 0x0015ec -#define EMAC_MAILBOX_2 0x0015f4 -#define EMAC_MAILBOX_3 0x0015f8 -#define EMAC_MAILBOX_11 0x00160c -#define EMAC_AXI_MAST_CTRL 0x001610 -#define EMAC_MAILBOX_12 0x001614 -#define EMAC_MAILBOX_9 0x001618 -#define EMAC_MAILBOX_10 0x00161c -#define EMAC_ATHR_HEADER_CTRL 0x001620 -#define EMAC_CLK_GATE_CTRL 0x001814 -#define EMAC_MISC_CTRL 0x001990 -#define EMAC_MAILBOX_7 0x0019e0 -#define EMAC_MAILBOX_8 0x0019e4 -#define EMAC_MAILBOX_15 0x001bd4 -#define EMAC_MAILBOX_16 0x001bd8 - /* EMAC_MAC_CTRL */ #define SINGLE_PAUSE_MODE 0x10000000 #define DEBUG_MODE 0x08000000 @@ -103,14 +51,6 @@ #define RXEN 0x00000002 #define TXEN 0x00000001 - -/* EMAC_WOL_CTRL0 */ -#define LK_CHG_PME 0x20 -#define LK_CHG_EN 0x10 -#define MG_FRAME_PME 0x8 -#define MG_FRAME_EN 0x4 -#define WK_FRAME_EN 0x1 - /* EMAC_DESC_CTRL_3 */ #define RFD_RING_SIZE_BMSK 0xfff @@ -314,8 +254,6 @@ struct emac_skb_cb { RX_PKT_INT2 |\ RX_PKT_INT3) -#define EMAC_MAC_IRQ_RES "core0" - void emac_mac_multicast_addr_set(struct emac_adapter *adpt, u8 *addr) { u32 crc32, bit, reg, mta; @@ -558,7 +496,7 @@ void emac_mac_reset(struct emac_adapter *adpt) emac_reg_update32(adpt->base + EMAC_DMA_MAS_CTRL, 0, INT_RD_CLR_EN); } -void emac_mac_start(struct emac_adapter *adpt) +static void emac_mac_start(struct emac_adapter *adpt) { struct phy_device *phydev = adpt->phydev; u32 mac, csr1; @@ -575,11 +513,19 @@ void emac_mac_start(struct emac_adapter *adpt) mac |= TXEN | RXEN; /* enable RX/TX */ - /* Configure MAC flow control to match the PHY's settings. */ - if (phydev->pause) - mac |= RXFC; - if (phydev->pause != phydev->asym_pause) - mac |= TXFC; + /* Configure MAC flow control. If set to automatic, then match + * whatever the PHY does. Otherwise, enable or disable it, depending + * on what the user configured via ethtool. + */ + mac &= ~(RXFC | TXFC); + + if (adpt->automatic) { + /* If it's set to automatic, then update our local values */ + adpt->rx_flow_control = phydev->pause; + adpt->tx_flow_control = phydev->pause != phydev->asym_pause; + } + mac |= adpt->rx_flow_control ? RXFC : 0; + mac |= adpt->tx_flow_control ? TXFC : 0; /* setup link speed */ mac &= ~SPEED_MASK; @@ -621,8 +567,6 @@ void emac_mac_start(struct emac_adapter *adpt) emac_reg_update32(adpt->base + EMAC_ATHR_HEADER_CTRL, (HEADER_ENABLE | HEADER_CNT_EN), 0); - - emac_reg_update32(adpt->csr + EMAC_EMAC_WRAPPER_CSR2, 0, WOL_EN); } void emac_mac_stop(struct emac_adapter *adpt) @@ -963,12 +907,16 @@ static void emac_mac_rx_descs_refill(struct emac_adapter *adpt, static void emac_adjust_link(struct net_device *netdev) { struct emac_adapter *adpt = netdev_priv(netdev); + struct emac_sgmii *sgmii = &adpt->phy; struct phy_device *phydev = netdev->phydev; - if (phydev->link) + if (phydev->link) { emac_mac_start(adpt); - else + sgmii->link_up(adpt); + } else { + sgmii->link_down(adpt); emac_mac_stop(adpt); + } phy_print_status(phydev); } @@ -977,40 +925,26 @@ static void emac_adjust_link(struct net_device *netdev) int emac_mac_up(struct emac_adapter *adpt) { struct net_device *netdev = adpt->netdev; - struct emac_irq *irq = &adpt->irq; int ret; emac_mac_rx_tx_ring_reset_all(adpt); emac_mac_config(adpt); - - ret = request_irq(irq->irq, emac_isr, 0, EMAC_MAC_IRQ_RES, irq); - if (ret) { - netdev_err(adpt->netdev, "could not request %s irq\n", - EMAC_MAC_IRQ_RES); - return ret; - } - emac_mac_rx_descs_refill(adpt, &adpt->rx_q); + adpt->phydev->irq = PHY_IGNORE_INTERRUPT; ret = phy_connect_direct(netdev, adpt->phydev, emac_adjust_link, PHY_INTERFACE_MODE_SGMII); if (ret) { netdev_err(adpt->netdev, "could not connect phy\n"); - free_irq(irq->irq, irq); return ret; } + phy_attached_print(adpt->phydev, NULL); + /* enable mac irq */ writel((u32)~DIS_INT, adpt->base + EMAC_INT_STATUS); writel(adpt->irq.mask, adpt->base + EMAC_INT_MASK); - /* Enable pause frames. Without this feature, the EMAC has been shown - * to receive (and drop) frames with FCS errors at gigabit connections. - */ - adpt->phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; - adpt->phydev->advertising |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; - - adpt->phydev->irq = PHY_IGNORE_INTERRUPT; phy_start(adpt->phydev); napi_enable(&adpt->rx_q.napi); @@ -1036,7 +970,6 @@ void emac_mac_down(struct emac_adapter *adpt) writel(DIS_INT, adpt->base + EMAC_INT_STATUS); writel(0, adpt->base + EMAC_INT_MASK); synchronize_irq(adpt->irq.irq); - free_irq(adpt->irq.irq, &adpt->irq); phy_disconnect(adpt->phydev); @@ -1213,7 +1146,6 @@ void emac_mac_rx_process(struct emac_adapter *adpt, struct emac_rx_queue *rx_q, emac_receive_skb(rx_q, skb, (u16)RRD_CVALN_TAG(&rrd), (bool)RRD_CVTAG(&rrd)); - netdev->last_rx = jiffies; (*num_pkts)++; } while (*num_pkts < max_pkts); diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.h b/drivers/net/ethernet/qualcomm/emac/emac-mac.h index f3aa24dc4a29..5028fb4bec2b 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-mac.h +++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.h @@ -230,7 +230,6 @@ struct emac_adapter; int emac_mac_up(struct emac_adapter *adpt); void emac_mac_down(struct emac_adapter *adpt); void emac_mac_reset(struct emac_adapter *adpt); -void emac_mac_start(struct emac_adapter *adpt); void emac_mac_stop(struct emac_adapter *adpt); void emac_mac_mode_config(struct emac_adapter *adpt); void emac_mac_rx_process(struct emac_adapter *adpt, struct emac_rx_queue *rx_q, diff --git a/drivers/net/ethernet/qualcomm/emac/emac-phy.c b/drivers/net/ethernet/qualcomm/emac/emac-phy.c index 2851b4c56570..441c19366489 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-phy.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-phy.c @@ -22,8 +22,6 @@ #include <linux/acpi.h> #include "emac.h" #include "emac-mac.h" -#include "emac-phy.h" -#include "emac-sgmii.h" /* EMAC base register offsets */ #define EMAC_MDIO_CTRL 0x001414 @@ -228,8 +226,5 @@ int emac_phy_config(struct platform_device *pdev, struct emac_adapter *adpt) return -ENODEV; } - if (adpt->phydev->drv) - phy_attached_print(adpt->phydev, NULL); - return 0; } diff --git a/drivers/net/ethernet/qualcomm/emac/emac-phy.h b/drivers/net/ethernet/qualcomm/emac/emac-phy.h index 49f3701a6dd7..c0c301c72129 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-phy.h +++ b/drivers/net/ethernet/qualcomm/emac/emac-phy.h @@ -13,19 +13,6 @@ #ifndef _EMAC_PHY_H_ #define _EMAC_PHY_H_ -typedef int (*emac_sgmii_initialize)(struct emac_adapter *adpt); - -/** emac_phy - internal emac phy - * @base base address - * @digital per-lane digital block - * @initialize initialization function - */ -struct emac_phy { - void __iomem *base; - void __iomem *digital; - emac_sgmii_initialize initialize; -}; - struct emac_adapter; int emac_phy_config(struct platform_device *pdev, struct emac_adapter *adpt); diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii-fsm9900.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii-fsm9900.c index af690e1a6e7b..10de8d0d9a56 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii-fsm9900.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii-fsm9900.c @@ -214,7 +214,7 @@ static const struct emac_reg_write tx_rx_setting[] = { int emac_sgmii_init_fsm9900(struct emac_adapter *adpt) { - struct emac_phy *phy = &adpt->phy; + struct emac_sgmii *phy = &adpt->phy; unsigned int i; emac_reg_write_all(phy->base, physical_coding_sublayer_programming, diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2400.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2400.c index 5b8419498ef1..f62c215be779 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2400.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2400.c @@ -174,7 +174,7 @@ static const struct emac_reg_write physical_coding_sublayer_programming[] = { int emac_sgmii_init_qdf2400(struct emac_adapter *adpt) { - struct emac_phy *phy = &adpt->phy; + struct emac_sgmii *phy = &adpt->phy; void __iomem *phy_regs = phy->base; void __iomem *laned = phy->digital; unsigned int i; diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2432.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2432.c index 6170200d7479..b9c0df7bdd15 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2432.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2432.c @@ -167,7 +167,7 @@ static const struct emac_reg_write physical_coding_sublayer_programming[] = { int emac_sgmii_init_qdf2432(struct emac_adapter *adpt) { - struct emac_phy *phy = &adpt->phy; + struct emac_sgmii *phy = &adpt->phy; void __iomem *phy_regs = phy->base; void __iomem *laned = phy->digital; unsigned int i; diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c index bf722a9bb09d..040b28977ee7 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c @@ -25,7 +25,9 @@ #define EMAC_SGMII_PHY_SPEED_CFG1 0x0074 #define EMAC_SGMII_PHY_IRQ_CMD 0x00ac #define EMAC_SGMII_PHY_INTERRUPT_CLEAR 0x00b0 +#define EMAC_SGMII_PHY_INTERRUPT_MASK 0x00b4 #define EMAC_SGMII_PHY_INTERRUPT_STATUS 0x00b8 +#define EMAC_SGMII_PHY_RX_CHK_STATUS 0x00d4 #define FORCE_AN_TX_CFG BIT(5) #define FORCE_AN_RX_CFG BIT(4) @@ -36,6 +38,8 @@ #define SPDMODE_100 BIT(0) #define SPDMODE_10 0 +#define CDR_ALIGN_DET BIT(6) + #define IRQ_GLOBAL_CLEAR BIT(0) #define DECODE_CODE_ERR BIT(7) @@ -44,52 +48,28 @@ #define SGMII_PHY_IRQ_CLR_WAIT_TIME 10 #define SGMII_PHY_INTERRUPT_ERR (DECODE_CODE_ERR | DECODE_DISP_ERR) +#define SGMII_ISR_MASK (SGMII_PHY_INTERRUPT_ERR) #define SERDES_START_WAIT_TIMES 100 -static int emac_sgmii_link_init(struct emac_adapter *adpt) +/* Initialize the SGMII link between the internal and external PHYs. */ +static void emac_sgmii_link_init(struct emac_adapter *adpt) { - struct phy_device *phydev = adpt->phydev; - struct emac_phy *phy = &adpt->phy; + struct emac_sgmii *phy = &adpt->phy; u32 val; + /* Always use autonegotiation. It works no matter how the external + * PHY is configured. + */ val = readl(phy->base + EMAC_SGMII_PHY_AUTONEG_CFG2); - - if (phydev->autoneg == AUTONEG_ENABLE) { - val &= ~(FORCE_AN_RX_CFG | FORCE_AN_TX_CFG); - val |= AN_ENABLE; - writel(val, phy->base + EMAC_SGMII_PHY_AUTONEG_CFG2); - } else { - u32 speed_cfg; - - switch (phydev->speed) { - case SPEED_10: - speed_cfg = SPDMODE_10; - break; - case SPEED_100: - speed_cfg = SPDMODE_100; - break; - case SPEED_1000: - speed_cfg = SPDMODE_1000; - break; - default: - return -EINVAL; - } - - if (phydev->duplex == DUPLEX_FULL) - speed_cfg |= DUPLEX_MODE; - - val &= ~AN_ENABLE; - writel(speed_cfg, phy->base + EMAC_SGMII_PHY_SPEED_CFG1); - writel(val, phy->base + EMAC_SGMII_PHY_AUTONEG_CFG2); - } - - return 0; + val &= ~(FORCE_AN_RX_CFG | FORCE_AN_TX_CFG); + val |= AN_ENABLE; + writel(val, phy->base + EMAC_SGMII_PHY_AUTONEG_CFG2); } static int emac_sgmii_irq_clear(struct emac_adapter *adpt, u32 irq_bits) { - struct emac_phy *phy = &adpt->phy; + struct emac_sgmii *phy = &adpt->phy; u32 status; writel_relaxed(irq_bits, phy->base + EMAC_SGMII_PHY_INTERRUPT_CLEAR); @@ -121,9 +101,54 @@ static int emac_sgmii_irq_clear(struct emac_adapter *adpt, u32 irq_bits) return 0; } +/* The number of decode errors that triggers a reset */ +#define DECODE_ERROR_LIMIT 2 + +static irqreturn_t emac_sgmii_interrupt(int irq, void *data) +{ + struct emac_adapter *adpt = data; + struct emac_sgmii *phy = &adpt->phy; + u32 status; + + status = readl(phy->base + EMAC_SGMII_PHY_INTERRUPT_STATUS); + status &= SGMII_ISR_MASK; + if (!status) + return IRQ_HANDLED; + + /* If we get a decoding error and CDR is not locked, then try + * resetting the internal PHY. The internal PHY uses an embedded + * clock with Clock and Data Recovery (CDR) to recover the + * clock and data. + */ + if (status & SGMII_PHY_INTERRUPT_ERR) { + int count; + + /* The SGMII is capable of recovering from some decode + * errors automatically. However, if we get multiple + * decode errors in a row, then assume that something + * is wrong and reset the interface. + */ + count = atomic_inc_return(&phy->decode_error_count); + if (count == DECODE_ERROR_LIMIT) { + schedule_work(&adpt->work_thread); + atomic_set(&phy->decode_error_count, 0); + } + } else { + /* We only care about consecutive decode errors. */ + atomic_set(&phy->decode_error_count, 0); + } + + if (emac_sgmii_irq_clear(adpt, status)) { + netdev_warn(adpt->netdev, "failed to clear SGMII interrupt\n"); + schedule_work(&adpt->work_thread); + } + + return IRQ_HANDLED; +} + static void emac_sgmii_reset_prepare(struct emac_adapter *adpt) { - struct emac_phy *phy = &adpt->phy; + struct emac_sgmii *phy = &adpt->phy; u32 val; /* Reset PHY */ @@ -145,12 +170,7 @@ void emac_sgmii_reset(struct emac_adapter *adpt) int ret; emac_sgmii_reset_prepare(adpt); - - ret = emac_sgmii_link_init(adpt); - if (ret) { - netdev_err(adpt->netdev, "unsupported link speed\n"); - return; - } + emac_sgmii_link_init(adpt); ret = adpt->phy.initialize(adpt); if (ret) @@ -159,6 +179,68 @@ void emac_sgmii_reset(struct emac_adapter *adpt) ret); } +static int emac_sgmii_open(struct emac_adapter *adpt) +{ + struct emac_sgmii *sgmii = &adpt->phy; + int ret; + + if (sgmii->irq) { + /* Make sure interrupts are cleared and disabled first */ + ret = emac_sgmii_irq_clear(adpt, 0xff); + if (ret) + return ret; + writel(0, sgmii->base + EMAC_SGMII_PHY_INTERRUPT_MASK); + + ret = request_irq(sgmii->irq, emac_sgmii_interrupt, 0, + "emac-sgmii", adpt); + if (ret) { + netdev_err(adpt->netdev, + "could not register handler for internal PHY\n"); + return ret; + } + } + + return 0; +} + +static int emac_sgmii_close(struct emac_adapter *adpt) +{ + struct emac_sgmii *sgmii = &adpt->phy; + + /* Make sure interrupts are disabled */ + writel(0, sgmii->base + EMAC_SGMII_PHY_INTERRUPT_MASK); + free_irq(sgmii->irq, adpt); + + return 0; +} + +/* The error interrupts are only valid after the link is up */ +static int emac_sgmii_link_up(struct emac_adapter *adpt) +{ + struct emac_sgmii *sgmii = &adpt->phy; + int ret; + + /* Clear and enable interrupts */ + ret = emac_sgmii_irq_clear(adpt, 0xff); + if (ret) + return ret; + + writel(SGMII_ISR_MASK, sgmii->base + EMAC_SGMII_PHY_INTERRUPT_MASK); + + return 0; +} + +static int emac_sgmii_link_down(struct emac_adapter *adpt) +{ + struct emac_sgmii *sgmii = &adpt->phy; + + /* Disable interrupts */ + writel(0, sgmii->base + EMAC_SGMII_PHY_INTERRUPT_MASK); + synchronize_irq(sgmii->irq); + + return 0; +} + static int emac_sgmii_acpi_match(struct device *dev, void *data) { #ifdef CONFIG_ACPI @@ -169,7 +251,7 @@ static int emac_sgmii_acpi_match(struct device *dev, void *data) {} }; const struct acpi_device_id *id = acpi_match_device(match_table, dev); - emac_sgmii_initialize *initialize = data; + emac_sgmii_function *initialize = data; if (id) { acpi_handle handle = ACPI_HANDLE(dev); @@ -217,7 +299,7 @@ static const struct of_device_id emac_sgmii_dt_match[] = { int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt) { struct platform_device *sgmii_pdev = NULL; - struct emac_phy *phy = &adpt->phy; + struct emac_sgmii *phy = &adpt->phy; struct resource *res; int ret; @@ -256,9 +338,14 @@ int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt) goto error_put_device; } - phy->initialize = (emac_sgmii_initialize)match->data; + phy->initialize = (emac_sgmii_function)match->data; } + phy->open = emac_sgmii_open; + phy->close = emac_sgmii_close; + phy->link_up = emac_sgmii_link_up; + phy->link_down = emac_sgmii_link_down; + /* Base address is the first address */ res = platform_get_resource(sgmii_pdev, IORESOURCE_MEM, 0); if (!res) { @@ -286,7 +373,11 @@ int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt) if (ret) goto error; - emac_sgmii_irq_clear(adpt, SGMII_PHY_INTERRUPT_ERR); + emac_sgmii_link_init(adpt); + + ret = platform_get_irq(sgmii_pdev, 0); + if (ret > 0) + phy->irq = ret; /* We've remapped the addresses, so we don't need the device any * more. of_find_device_by_node() says we should release it. diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.h b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.h index 80ed3dc3157a..e7c0c3b2baa4 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.h +++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.h @@ -16,6 +16,31 @@ struct emac_adapter; struct platform_device; +typedef int (*emac_sgmii_function)(struct emac_adapter *adpt); + +/** emac_sgmii - internal emac phy + * @base base address + * @digital per-lane digital block + * @irq the interrupt number + * @decode_error_count reference count of consecutive decode errors + * @initialize initialization function + * @open called when the driver is opened + * @close called when the driver is closed + * @link_up called when the link comes up + * @link_down called when the link comes down + */ +struct emac_sgmii { + void __iomem *base; + void __iomem *digital; + unsigned int irq; + atomic_t decode_error_count; + emac_sgmii_function initialize; + emac_sgmii_function open; + emac_sgmii_function close; + emac_sgmii_function link_up; + emac_sgmii_function link_down; +}; + int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt); void emac_sgmii_reset(struct emac_adapter *adpt); diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c index f46d300bd585..28a8cdc36485 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac.c @@ -129,7 +129,7 @@ static int emac_napi_rtx(struct napi_struct *napi, int budget) emac_mac_rx_process(adpt, rx_q, &work_done, budget); if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); irq->mask |= rx_q->intr; writel(irq->mask, adpt->base + EMAC_INT_MASK); @@ -256,22 +256,37 @@ static int emac_change_mtu(struct net_device *netdev, int new_mtu) static int emac_open(struct net_device *netdev) { struct emac_adapter *adpt = netdev_priv(netdev); + struct emac_irq *irq = &adpt->irq; int ret; + ret = request_irq(irq->irq, emac_isr, 0, "emac-core0", irq); + if (ret) { + netdev_err(adpt->netdev, "could not request emac-core0 irq\n"); + return ret; + } + /* allocate rx/tx dma buffer & descriptors */ ret = emac_mac_rx_tx_rings_alloc_all(adpt); if (ret) { netdev_err(adpt->netdev, "error allocating rx/tx rings\n"); + free_irq(irq->irq, irq); return ret; } ret = emac_mac_up(adpt); if (ret) { emac_mac_rx_tx_rings_free_all(adpt); + free_irq(irq->irq, irq); return ret; } - emac_mac_start(adpt); + ret = adpt->phy.open(adpt); + if (ret) { + emac_mac_down(adpt); + emac_mac_rx_tx_rings_free_all(adpt); + free_irq(irq->irq, irq); + return ret; + } return 0; } @@ -283,9 +298,12 @@ static int emac_close(struct net_device *netdev) mutex_lock(&adpt->reset_lock); + adpt->phy.close(adpt); emac_mac_down(adpt); emac_mac_rx_tx_rings_free_all(adpt); + free_irq(adpt->irq.irq, &adpt->irq); + mutex_unlock(&adpt->reset_lock); return 0; @@ -311,45 +329,56 @@ static int emac_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) return phy_mii_ioctl(netdev->phydev, ifr, cmd); } -/* Provide network statistics info for the interface */ -static struct rtnl_link_stats64 *emac_get_stats64(struct net_device *netdev, - struct rtnl_link_stats64 *net_stats) +/** + * emac_update_hw_stats - read the EMAC stat registers + * + * Reads the stats registers and write the values to adpt->stats. + * + * adpt->stats.lock must be held while calling this function, + * and while reading from adpt->stats. + */ +void emac_update_hw_stats(struct emac_adapter *adpt) { - struct emac_adapter *adpt = netdev_priv(netdev); - unsigned int addr = REG_MAC_RX_STATUS_BIN; struct emac_stats *stats = &adpt->stats; u64 *stats_itr = &adpt->stats.rx_ok; - u32 val; - - spin_lock(&stats->lock); + void __iomem *base = adpt->base; + unsigned int addr; + addr = REG_MAC_RX_STATUS_BIN; while (addr <= REG_MAC_RX_STATUS_END) { - val = readl_relaxed(adpt->base + addr); - *stats_itr += val; + *stats_itr += readl_relaxed(base + addr); stats_itr++; addr += sizeof(u32); } /* additional rx status */ - val = readl_relaxed(adpt->base + EMAC_RXMAC_STATC_REG23); - adpt->stats.rx_crc_align += val; - val = readl_relaxed(adpt->base + EMAC_RXMAC_STATC_REG24); - adpt->stats.rx_jabbers += val; + stats->rx_crc_align += readl_relaxed(base + EMAC_RXMAC_STATC_REG23); + stats->rx_jabbers += readl_relaxed(base + EMAC_RXMAC_STATC_REG24); /* update tx status */ addr = REG_MAC_TX_STATUS_BIN; - stats_itr = &adpt->stats.tx_ok; + stats_itr = &stats->tx_ok; while (addr <= REG_MAC_TX_STATUS_END) { - val = readl_relaxed(adpt->base + addr); - *stats_itr += val; - ++stats_itr; + *stats_itr += readl_relaxed(base + addr); + stats_itr++; addr += sizeof(u32); } /* additional tx status */ - val = readl_relaxed(adpt->base + EMAC_TXMAC_STATC_REG25); - adpt->stats.tx_col += val; + stats->tx_col += readl_relaxed(base + EMAC_TXMAC_STATC_REG25); +} + +/* Provide network statistics info for the interface */ +static void emac_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *net_stats) +{ + struct emac_adapter *adpt = netdev_priv(netdev); + struct emac_stats *stats = &adpt->stats; + + spin_lock(&stats->lock); + + emac_update_hw_stats(adpt); /* return parsed statistics */ net_stats->rx_packets = stats->rx_ok; @@ -377,8 +406,6 @@ static struct rtnl_link_stats64 *emac_get_stats64(struct net_device *netdev, net_stats->tx_window_errors = stats->tx_late_col; spin_unlock(&stats->lock); - - return net_stats; } static const struct net_device_ops emac_netdev_ops = { @@ -409,6 +436,10 @@ static void emac_init_adapter(struct emac_adapter *adpt) { u32 reg; + adpt->rrd_size = EMAC_RRD_SIZE; + adpt->tpd_size = EMAC_TPD_SIZE; + adpt->rfd_size = EMAC_RFD_SIZE; + /* descriptors */ adpt->tx_desc_cnt = EMAC_DEF_TX_DESCS; adpt->rx_desc_cnt = EMAC_DEF_RX_DESCS; @@ -429,6 +460,9 @@ static void emac_init_adapter(struct emac_adapter *adpt) /* others */ adpt->preamble = EMAC_PREAMBLE_DEF; + + /* default to automatic flow control */ + adpt->automatic = true; } /* Get the clock */ @@ -593,7 +627,7 @@ static int emac_probe(struct platform_device *pdev) { struct net_device *netdev; struct emac_adapter *adpt; - struct emac_phy *phy; + struct emac_sgmii *phy; u16 devid, revid; u32 reg; int ret; @@ -620,12 +654,14 @@ static int emac_probe(struct platform_device *pdev) dev_set_drvdata(&pdev->dev, netdev); SET_NETDEV_DEV(netdev, &pdev->dev); + emac_set_ethtool_ops(netdev); adpt = netdev_priv(netdev); adpt->netdev = netdev; adpt->msg_enable = EMAC_MSG_DEFAULT; phy = &adpt->phy; + atomic_set(&phy->decode_error_count, 0); mutex_init(&adpt->reset_lock); spin_lock_init(&adpt->stats.lock); @@ -646,10 +682,6 @@ static int emac_probe(struct platform_device *pdev) netdev->watchdog_timeo = EMAC_WATCHDOG_TIME; netdev->irq = adpt->irq.irq; - adpt->rrd_size = EMAC_RRD_SIZE; - adpt->tpd_size = EMAC_TPD_SIZE; - adpt->rfd_size = EMAC_RFD_SIZE; - netdev->netdev_ops = &emac_netdev_ops; emac_init_adapter(adpt); diff --git a/drivers/net/ethernet/qualcomm/emac/emac.h b/drivers/net/ethernet/qualcomm/emac/emac.h index 0c76e6cb8c9e..8ee4ec6aef2e 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac.h +++ b/drivers/net/ethernet/qualcomm/emac/emac.h @@ -19,37 +19,88 @@ #include <linux/platform_device.h> #include "emac-mac.h" #include "emac-phy.h" +#include "emac-sgmii.h" /* EMAC base register offsets */ -#define EMAC_DMA_MAS_CTRL 0x001400 -#define EMAC_IRQ_MOD_TIM_INIT 0x001408 -#define EMAC_BLK_IDLE_STS 0x00140c -#define EMAC_PHY_LINK_DELAY 0x00141c -#define EMAC_SYS_ALIV_CTRL 0x001434 -#define EMAC_MAC_IPGIFG_CTRL 0x001484 -#define EMAC_MAC_STA_ADDR0 0x001488 -#define EMAC_MAC_STA_ADDR1 0x00148c -#define EMAC_HASH_TAB_REG0 0x001490 -#define EMAC_HASH_TAB_REG1 0x001494 -#define EMAC_MAC_HALF_DPLX_CTRL 0x001498 -#define EMAC_MAX_FRAM_LEN_CTRL 0x00149c -#define EMAC_INT_STATUS 0x001600 -#define EMAC_INT_MASK 0x001604 -#define EMAC_RXMAC_STATC_REG0 0x001700 -#define EMAC_RXMAC_STATC_REG22 0x001758 -#define EMAC_TXMAC_STATC_REG0 0x001760 -#define EMAC_TXMAC_STATC_REG24 0x0017c0 -#define EMAC_CORE_HW_VERSION 0x001974 -#define EMAC_IDT_TABLE0 0x001b00 -#define EMAC_RXMAC_STATC_REG23 0x001bc8 -#define EMAC_RXMAC_STATC_REG24 0x001bcc -#define EMAC_TXMAC_STATC_REG25 0x001bd0 -#define EMAC_INT1_MASK 0x001bf0 -#define EMAC_INT1_STATUS 0x001bf4 -#define EMAC_INT2_MASK 0x001bf8 -#define EMAC_INT2_STATUS 0x001bfc -#define EMAC_INT3_MASK 0x001c00 -#define EMAC_INT3_STATUS 0x001c04 +#define EMAC_DMA_MAS_CTRL 0x1400 +#define EMAC_IRQ_MOD_TIM_INIT 0x1408 +#define EMAC_BLK_IDLE_STS 0x140c +#define EMAC_PHY_LINK_DELAY 0x141c +#define EMAC_SYS_ALIV_CTRL 0x1434 +#define EMAC_MAC_CTRL 0x1480 +#define EMAC_MAC_IPGIFG_CTRL 0x1484 +#define EMAC_MAC_STA_ADDR0 0x1488 +#define EMAC_MAC_STA_ADDR1 0x148c +#define EMAC_HASH_TAB_REG0 0x1490 +#define EMAC_HASH_TAB_REG1 0x1494 +#define EMAC_MAC_HALF_DPLX_CTRL 0x1498 +#define EMAC_MAX_FRAM_LEN_CTRL 0x149c +#define EMAC_WOL_CTRL0 0x14a0 +#define EMAC_RSS_KEY0 0x14b0 +#define EMAC_H1TPD_BASE_ADDR_LO 0x14e0 +#define EMAC_H2TPD_BASE_ADDR_LO 0x14e4 +#define EMAC_H3TPD_BASE_ADDR_LO 0x14e8 +#define EMAC_INTER_SRAM_PART9 0x1534 +#define EMAC_DESC_CTRL_0 0x1540 +#define EMAC_DESC_CTRL_1 0x1544 +#define EMAC_DESC_CTRL_2 0x1550 +#define EMAC_DESC_CTRL_10 0x1554 +#define EMAC_DESC_CTRL_12 0x1558 +#define EMAC_DESC_CTRL_13 0x155c +#define EMAC_DESC_CTRL_3 0x1560 +#define EMAC_DESC_CTRL_4 0x1564 +#define EMAC_DESC_CTRL_5 0x1568 +#define EMAC_DESC_CTRL_14 0x156c +#define EMAC_DESC_CTRL_15 0x1570 +#define EMAC_DESC_CTRL_16 0x1574 +#define EMAC_DESC_CTRL_6 0x1578 +#define EMAC_DESC_CTRL_8 0x1580 +#define EMAC_DESC_CTRL_9 0x1584 +#define EMAC_DESC_CTRL_11 0x1588 +#define EMAC_TXQ_CTRL_0 0x1590 +#define EMAC_TXQ_CTRL_1 0x1594 +#define EMAC_TXQ_CTRL_2 0x1598 +#define EMAC_RXQ_CTRL_0 0x15a0 +#define EMAC_RXQ_CTRL_1 0x15a4 +#define EMAC_RXQ_CTRL_2 0x15a8 +#define EMAC_RXQ_CTRL_3 0x15ac +#define EMAC_BASE_CPU_NUMBER 0x15b8 +#define EMAC_DMA_CTRL 0x15c0 +#define EMAC_MAILBOX_0 0x15e0 +#define EMAC_MAILBOX_5 0x15e4 +#define EMAC_MAILBOX_6 0x15e8 +#define EMAC_MAILBOX_13 0x15ec +#define EMAC_MAILBOX_2 0x15f4 +#define EMAC_MAILBOX_3 0x15f8 +#define EMAC_INT_STATUS 0x1600 +#define EMAC_INT_MASK 0x1604 +#define EMAC_MAILBOX_11 0x160c +#define EMAC_AXI_MAST_CTRL 0x1610 +#define EMAC_MAILBOX_12 0x1614 +#define EMAC_MAILBOX_9 0x1618 +#define EMAC_MAILBOX_10 0x161c +#define EMAC_ATHR_HEADER_CTRL 0x1620 +#define EMAC_RXMAC_STATC_REG0 0x1700 +#define EMAC_RXMAC_STATC_REG22 0x1758 +#define EMAC_TXMAC_STATC_REG0 0x1760 +#define EMAC_TXMAC_STATC_REG24 0x17c0 +#define EMAC_CLK_GATE_CTRL 0x1814 +#define EMAC_CORE_HW_VERSION 0x1974 +#define EMAC_MISC_CTRL 0x1990 +#define EMAC_MAILBOX_7 0x19e0 +#define EMAC_MAILBOX_8 0x19e4 +#define EMAC_IDT_TABLE0 0x1b00 +#define EMAC_RXMAC_STATC_REG23 0x1bc8 +#define EMAC_RXMAC_STATC_REG24 0x1bcc +#define EMAC_TXMAC_STATC_REG25 0x1bd0 +#define EMAC_MAILBOX_15 0x1bd4 +#define EMAC_MAILBOX_16 0x1bd8 +#define EMAC_INT1_MASK 0x1bf0 +#define EMAC_INT1_STATUS 0x1bf4 +#define EMAC_INT2_MASK 0x1bf8 +#define EMAC_INT2_STATUS 0x1bfc +#define EMAC_INT3_MASK 0x1c00 +#define EMAC_INT3_STATUS 0x1c04 /* EMAC_DMA_MAS_CTRL */ #define DEV_ID_NUM_BMSK 0x7f000000 @@ -166,10 +217,6 @@ enum emac_clk_id { #define EMAC_MAX_SETUP_LNK_CYCLE 100 -/* Wake On Lan */ -#define EMAC_WOL_PHY 0x00000001 /* PHY Status Change */ -#define EMAC_WOL_MAGIC 0x00000002 /* Magic Packet */ - struct emac_stats { /* rx */ u64 rx_ok; /* good packets */ @@ -291,7 +338,7 @@ struct emac_adapter { void __iomem *base; void __iomem *csr; - struct emac_phy phy; + struct emac_sgmii phy; struct emac_stats stats; struct emac_irq irq; @@ -309,6 +356,13 @@ struct emac_adapter { unsigned int rxbuf_size; + /* Flow control / pause frames support. If automatic=True, do whatever + * the PHY does. Otherwise, use tx_flow_control and rx_flow_control. + */ + bool automatic; + bool tx_flow_control; + bool rx_flow_control; + /* Ring parameter */ u8 tpd_burst; u8 rfd_burst; @@ -330,6 +384,8 @@ struct emac_adapter { int emac_reinit_locked(struct emac_adapter *adpt); void emac_reg_update32(void __iomem *addr, u32 mask, u32 val); -irqreturn_t emac_isr(int irq, void *data); + +void emac_set_ethtool_ops(struct net_device *netdev); +void emac_update_hw_stats(struct emac_adapter *adpt); #endif /* _EMAC_H_ */ diff --git a/drivers/net/ethernet/qualcomm/qca_debug.c b/drivers/net/ethernet/qualcomm/qca_debug.c index 8e28234dddad..d145df98feff 100644 --- a/drivers/net/ethernet/qualcomm/qca_debug.c +++ b/drivers/net/ethernet/qualcomm/qca_debug.c @@ -188,14 +188,16 @@ qcaspi_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *p) } static int -qcaspi_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +qcaspi_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { - cmd->transceiver = XCVR_INTERNAL; - cmd->supported = SUPPORTED_10baseT_Half; - ethtool_cmd_speed_set(cmd, SPEED_10); - cmd->duplex = DUPLEX_HALF; - cmd->port = PORT_OTHER; - cmd->autoneg = AUTONEG_DISABLE; + ethtool_link_ksettings_zero_link_mode(cmd, supported); + ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Half); + + cmd->base.speed = SPEED_10; + cmd->base.duplex = DUPLEX_HALF; + cmd->base.port = PORT_OTHER; + cmd->base.autoneg = AUTONEG_DISABLE; return 0; } @@ -295,7 +297,6 @@ qcaspi_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ring) static const struct ethtool_ops qcaspi_ethtool_ops = { .get_drvinfo = qcaspi_get_drvinfo, .get_link = ethtool_op_get_link, - .get_settings = qcaspi_get_settings, .get_ethtool_stats = qcaspi_get_ethtool_stats, .get_strings = qcaspi_get_strings, .get_sset_count = qcaspi_get_sset_count, @@ -303,6 +304,7 @@ static const struct ethtool_ops qcaspi_ethtool_ops = { .get_regs = qcaspi_get_regs, .get_ringparam = qcaspi_get_ringparam, .set_ringparam = qcaspi_set_ringparam, + .get_link_ksettings = qcaspi_get_link_ksettings, }; void qcaspi_set_ethtool_ops(struct net_device *dev) diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index 0b3cd58093d5..672f6b696069 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c @@ -465,10 +465,8 @@ static int cp_rx_poll(struct napi_struct *napi, int budget) struct cp_private *cp = container_of(napi, struct cp_private, napi); struct net_device *dev = cp->dev; unsigned int rx_tail = cp->rx_tail; - int rx; + int rx = 0; - rx = 0; -rx_status_loop: cpw16(IntrStatus, cp_rx_intr_mask); while (rx < budget) { @@ -556,15 +554,10 @@ rx_next: /* if we did not reach work limit, then we're done with * this round of polling */ - if (rx < budget) { + if (rx < budget && napi_complete_done(napi, rx)) { unsigned long flags; - if (cpr16(IntrStatus) & cp_rx_intr_mask) - goto rx_status_loop; - - napi_gro_flush(napi, false); spin_lock_irqsave(&cp->lock, flags); - __napi_complete(napi); cpw16_f(IntrMask, cp_intr_mask); spin_unlock_irqrestore(&cp->lock, flags); } diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c index 9bc047ac883b..89631753e799 100644 --- a/drivers/net/ethernet/realtek/8139too.c +++ b/drivers/net/ethernet/realtek/8139too.c @@ -653,9 +653,8 @@ static int rtl8139_poll(struct napi_struct *napi, int budget); static irqreturn_t rtl8139_interrupt (int irq, void *dev_instance); static int rtl8139_close (struct net_device *dev); static int netdev_ioctl (struct net_device *dev, struct ifreq *rq, int cmd); -static struct rtnl_link_stats64 *rtl8139_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 - *stats); +static void rtl8139_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats); static void rtl8139_set_rx_mode (struct net_device *dev); static void __set_rx_mode (struct net_device *dev); static void rtl8139_hw_start (struct net_device *dev); @@ -2136,14 +2135,10 @@ static int rtl8139_poll(struct napi_struct *napi, int budget) if (likely(RTL_R16(IntrStatus) & RxAckBits)) work_done += rtl8139_rx(dev, tp, budget); - if (work_done < budget) { + if (work_done < budget && napi_complete_done(napi, work_done)) { unsigned long flags; - /* - * Order is important since data can get interrupted - * again when we think we are done. - */ + spin_lock_irqsave(&tp->lock, flags); - __napi_complete(napi); RTL_W16_F(IntrMask, rtl8139_intr_mask); spin_unlock_irqrestore(&tp->lock, flags); } @@ -2516,7 +2511,7 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) } -static struct rtnl_link_stats64 * +static void rtl8139_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct rtl8139_private *tp = netdev_priv(dev); @@ -2544,8 +2539,6 @@ rtl8139_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) stats->tx_packets = tp->tx_stats.packets; stats->tx_bytes = tp->tx_stats.bytes; } while (u64_stats_fetch_retry_irq(&tp->tx_stats.syncp, start)); - - return stats; } /* Set or clear the multicast filter for this adaptor. diff --git a/drivers/net/ethernet/realtek/atp.c b/drivers/net/ethernet/realtek/atp.c index 570ed3bd3cbf..9bcd4aefc9c5 100644 --- a/drivers/net/ethernet/realtek/atp.c +++ b/drivers/net/ethernet/realtek/atp.c @@ -170,7 +170,7 @@ struct net_local { spinlock_t lock; struct net_device *next_module; struct timer_list timer; /* Media selection timer. */ - long last_rx_time; /* Last Rx, in jiffies, to handle Rx hang. */ + unsigned long last_rx_time; /* Last Rx, in jiffies, to handle Rx hang. */ int saved_tx_size; unsigned int tx_unit_busy:1; unsigned char re_tx, /* Number of packet retransmissions. */ @@ -668,11 +668,11 @@ static irqreturn_t atp_interrupt(int irq, void *dev_instance) } num_tx_since_rx++; } else if (num_tx_since_rx > 8 && - time_after(jiffies, dev->last_rx + HZ)) { + time_after(jiffies, lp->last_rx_time + HZ)) { if (net_debug > 2) printk(KERN_DEBUG "%s: Missed packet? No Rx after %d Tx and " "%ld jiffies status %02x CMR1 %02x.\n", dev->name, - num_tx_since_rx, jiffies - dev->last_rx, status, + num_tx_since_rx, jiffies - lp->last_rx_time, status, (read_nibble(ioaddr, CMR1) >> 3) & 15); dev->stats.rx_missed_errors++; hardware_init(dev); @@ -789,7 +789,6 @@ static void net_rx(struct net_device *dev) read_block(ioaddr, pkt_len, skb_put(skb,pkt_len), dev->if_port); skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); - dev->last_rx = jiffies; dev->stats.rx_packets++; dev->stats.rx_bytes += pkt_len; } diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 8f1623bf2134..81f18a833527 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -7583,7 +7583,7 @@ static int rtl8169_poll(struct napi_struct *napi, int budget) } if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); rtl_irq_enable(tp, enable_mask); mmiowb(); @@ -7755,7 +7755,7 @@ err_pm_runtime_put: goto out; } -static struct rtnl_link_stats64 * +static void rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct rtl8169_private *tp = netdev_priv(dev); @@ -7809,8 +7809,6 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) le16_to_cpu(tp->tc_offset.tx_aborted); pm_runtime_put_noidle(&pdev->dev); - - return stats; } static void rtl8169_net_suspend(struct net_device *dev) diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h index f1109661a533..0525bd696d5d 100644 --- a/drivers/net/ethernet/renesas/ravb.h +++ b/drivers/net/ethernet/renesas/ravb.h @@ -76,6 +76,7 @@ enum ravb_reg { CDAR20 = 0x0060, CDAR21 = 0x0064, ESR = 0x0088, + APSR = 0x008C, /* R-Car Gen3 only */ RCR = 0x0090, RQC0 = 0x0094, RQC1 = 0x0098, @@ -248,6 +249,15 @@ enum ESR_BIT { ESR_EIL = 0x00001000, }; +/* APSR */ +enum APSR_BIT { + APSR_MEMS = 0x00000002, + APSR_CMSW = 0x00000010, + APSR_DM = 0x00006000, /* Undocumented? */ + APSR_DM_RDM = 0x00002000, + APSR_DM_TDM = 0x00004000, +}; + /* RCR */ enum RCR_BIT { RCR_EFFS = 0x00000001, diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 301f48755093..8cfc4a54f2dc 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -31,6 +31,7 @@ #include <linux/pm_runtime.h> #include <linux/slab.h> #include <linux/spinlock.h> +#include <linux/sys_soc.h> #include <asm/div64.h> @@ -988,6 +989,11 @@ static void ravb_adjust_link(struct net_device *ndev) phy_print_status(phydev); } +static const struct soc_device_attribute r8a7795es10[] = { + { .soc_id = "r8a7795", .revision = "ES1.0", }, + { /* sentinel */ } +}; + /* PHY init function */ static int ravb_phy_init(struct net_device *ndev) { @@ -1023,10 +1029,10 @@ static int ravb_phy_init(struct net_device *ndev) goto err_deregister_fixed_link; } - /* This driver only support 10/100Mbit speeds on Gen3 + /* This driver only support 10/100Mbit speeds on R-Car H3 ES1.0 * at this time. */ - if (priv->chip_id == RCAR_GEN3) { + if (soc_device_match(r8a7795es10)) { err = phy_set_max_speed(phydev, SPEED_100); if (err) { netdev_err(ndev, "failed to limit PHY to 100Mbit/s\n"); @@ -1920,6 +1926,23 @@ static void ravb_set_config_mode(struct net_device *ndev) } } +/* Set tx and rx clock internal delay modes */ +static void ravb_set_delay_mode(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + int set = 0; + + if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || + priv->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) + set |= APSR_DM_RDM; + + if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || + priv->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) + set |= APSR_DM_TDM; + + ravb_modify(ndev, APSR, APSR_DM, set); +} + static int ravb_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; @@ -2032,6 +2055,9 @@ static int ravb_probe(struct platform_device *pdev) /* Request GTI loading */ ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI); + if (priv->chip_id != RCAR_GEN2) + ravb_set_delay_mode(ndev); + /* Allocate descriptor base address table */ priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM; priv->desc_bat = dma_alloc_coherent(ndev->dev.parent, priv->desc_bat_size, @@ -2168,6 +2194,9 @@ static int __maybe_unused ravb_resume(struct device *dev) /* Request GTI loading */ ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI); + if (priv->chip_id != RCAR_GEN2) + ravb_set_delay_mode(ndev); + /* Restore descriptor base address table */ ravb_write(ndev, priv->desc_bat_dma, DBAT); diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index f729a6b43958..54248775f227 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -1,9 +1,9 @@ /* SuperH Ethernet device driver * - * Copyright (C) 2014 Renesas Electronics Corporation + * Copyright (C) 2014 Renesas Electronics Corporation * Copyright (C) 2006-2012 Nobuhiro Iwamatsu * Copyright (C) 2008-2014 Renesas Solutions Corp. - * Copyright (C) 2013-2016 Cogent Embedded, Inc. + * Copyright (C) 2013-2017 Cogent Embedded, Inc. * Copyright (C) 2014 Codethink Limited * * This program is free software; you can redistribute it and/or modify it @@ -518,12 +518,19 @@ static struct sh_eth_cpu_data r7s72100_data = { .ecsr_value = ECSR_ICD, .ecsipr_value = ECSIPR_ICDIP, - .eesipr_value = 0xe77f009f, + .eesipr_value = EESIPR_TWB1IP | EESIPR_TWBIP | EESIPR_TC1IP | + EESIPR_TABTIP | EESIPR_RABTIP | EESIPR_RFCOFIP | + EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + EESIPR_RMAFIP | EESIPR_RRFIP | + EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, .tx_check = EESR_TC1 | EESR_FTC, .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | - EESR_TDE | EESR_ECI, + EESR_TDE, .fdr_value = 0x0000070f, .no_psr = 1, @@ -535,9 +542,8 @@ static struct sh_eth_cpu_data r7s72100_data = { .rpadir_value = 2 << 16, .no_trimd = 1, .no_ade = 1, - .hw_crc = 1, + .hw_checksum = 1, .tsu = 1, - .shift_rd0 = 1, }; static void sh_eth_chip_reset_r8a7740(struct net_device *ndev) @@ -557,12 +563,19 @@ static struct sh_eth_cpu_data r8a7740_data = { .ecsr_value = ECSR_ICD | ECSR_MPD, .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, - .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + 0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP | + EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP | + EESIPR_CEEFIP | EESIPR_CELFIP | + EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, .tx_check = EESR_TC1 | EESR_FTC, .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | - EESR_TDE | EESR_ECI, + EESR_TDE, .fdr_value = 0x0000070f, .apr = 1, @@ -574,10 +587,10 @@ static struct sh_eth_cpu_data r8a7740_data = { .rpadir_value = 2 << 16, .no_trimd = 1, .no_ade = 1, - .hw_crc = 1, + .hw_checksum = 1, .tsu = 1, .select_mii = 1, - .shift_rd0 = 1, + .magic = 1, }; /* There is CPU dependent code */ @@ -604,12 +617,16 @@ static struct sh_eth_cpu_data r8a777x_data = { .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD, .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP, - .eesipr_value = 0x01ff009f, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ADEIP | EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + EESIPR_RMAFIP | EESIPR_RRFIP | + EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | - EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | - EESR_ECI, + EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE, .fdr_value = 0x00000f0f, .apr = 1, @@ -625,14 +642,19 @@ static struct sh_eth_cpu_data r8a779x_data = { .register_type = SH_ETH_REG_FAST_RCAR, - .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD, - .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP, - .eesipr_value = 0x01ff009f, + .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD | ECSR_MPD, + .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP | + ECSIPR_MPDIP, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ADEIP | EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + EESIPR_RMAFIP | EESIPR_RRFIP | + EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | - EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | - EESR_ECI, + EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE, .fdr_value = 0x00000f0f, .trscer_err_mask = DESC_I_RINT8, @@ -642,6 +664,7 @@ static struct sh_eth_cpu_data r8a779x_data = { .tpauser = 1, .hw_swap = 1, .rmiimode = 1, + .magic = 1, }; #endif /* CONFIG_OF */ @@ -668,12 +691,16 @@ static struct sh_eth_cpu_data sh7724_data = { .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD, .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP, - .eesipr_value = 0x01ff009f, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ADEIP | EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + EESIPR_RMAFIP | EESIPR_RRFIP | + EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | - EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | - EESR_ECI, + EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE, .apr = 1, .mpr = 1, @@ -704,12 +731,18 @@ static struct sh_eth_cpu_data sh7757_data = { .register_type = SH_ETH_REG_FAST_SH4, - .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + 0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP | + EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP | + EESIPR_CEEFIP | EESIPR_CELFIP | + EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | - EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | - EESR_ECI, + EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE, .irq_flags = IRQF_SHARED, .apr = 1, @@ -772,12 +805,19 @@ static struct sh_eth_cpu_data sh7757_data_giga = { .ecsr_value = ECSR_ICD | ECSR_MPD, .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, - .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + 0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP | + EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP | + EESIPR_CEEFIP | EESIPR_CELFIP | + EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, .tx_check = EESR_TC1 | EESR_FTC, .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | - EESR_TDE | EESR_ECI, + EESR_TDE, .fdr_value = 0x0000072f, .irq_flags = IRQF_SHARED, @@ -803,12 +843,18 @@ static struct sh_eth_cpu_data sh7734_data = { .ecsr_value = ECSR_ICD | ECSR_MPD, .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, - .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003f07ff, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + EESIPR_DLCIP | EESIPR_CDIP | EESIPR_TROIP | + EESIPR_RMAFIP | EESIPR_CEEFIP | EESIPR_CELFIP | + EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, .tx_check = EESR_TC1 | EESR_FTC, .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | - EESR_TDE | EESR_ECI, + EESR_TDE, .apr = 1, .mpr = 1, @@ -818,9 +864,9 @@ static struct sh_eth_cpu_data sh7734_data = { .no_trimd = 1, .no_ade = 1, .tsu = 1, - .hw_crc = 1, + .hw_checksum = 1, .select_mii = 1, - .shift_rd0 = 1, + .magic = 1, }; /* SH7763 */ @@ -833,12 +879,17 @@ static struct sh_eth_cpu_data sh7763_data = { .ecsr_value = ECSR_ICD | ECSR_MPD, .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, - .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003f07ff, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + EESIPR_DLCIP | EESIPR_CDIP | EESIPR_TROIP | + EESIPR_RMAFIP | EESIPR_CEEFIP | EESIPR_CELFIP | + EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, .tx_check = EESR_TC1 | EESR_FTC, .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | - EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | - EESR_ECI, + EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE, .apr = 1, .mpr = 1, @@ -849,12 +900,20 @@ static struct sh_eth_cpu_data sh7763_data = { .no_ade = 1, .tsu = 1, .irq_flags = IRQF_SHARED, + .magic = 1, }; static struct sh_eth_cpu_data sh7619_data = { .register_type = SH_ETH_REG_FAST_SH3_SH2, - .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + 0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP | + EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP | + EESIPR_CEEFIP | EESIPR_CELFIP | + EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, .apr = 1, .mpr = 1, @@ -865,7 +924,14 @@ static struct sh_eth_cpu_data sh7619_data = { static struct sh_eth_cpu_data sh771x_data = { .register_type = SH_ETH_REG_FAST_SH3_SH2, - .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + 0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP | + EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP | + EESIPR_CEEFIP | EESIPR_CELFIP | + EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, .tsu = 1, }; @@ -936,7 +1002,7 @@ static int sh_eth_reset(struct net_device *ndev) sh_eth_write(ndev, 0x0, RDFFR); /* Reset HW CRC register */ - if (mdp->cd->hw_crc) + if (mdp->cd->hw_checksum) sh_eth_write(ndev, 0x0, CSMR); /* Select MII mode */ @@ -1421,7 +1487,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) * the RFS bits are from bit 25 to bit 16. So, the * driver needs right shifting by 16. */ - if (mdp->cd->shift_rd0) + if (mdp->cd->hw_checksum) desc_status >>= 16; skb = mdp->rx_skbuff[entry]; @@ -1528,44 +1594,46 @@ static void sh_eth_rcv_snd_enable(struct net_device *ndev) sh_eth_modify(ndev, ECMR, ECMR_RE | ECMR_TE, ECMR_RE | ECMR_TE); } -/* error control function */ -static void sh_eth_error(struct net_device *ndev, u32 intr_status) +/* E-MAC interrupt handler */ +static void sh_eth_emac_interrupt(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); u32 felic_stat; u32 link_stat; - u32 mask; - if (intr_status & EESR_ECI) { - felic_stat = sh_eth_read(ndev, ECSR); - sh_eth_write(ndev, felic_stat, ECSR); /* clear int */ - if (felic_stat & ECSR_ICD) - ndev->stats.tx_carrier_errors++; - if (felic_stat & ECSR_LCHNG) { - /* Link Changed */ - if (mdp->cd->no_psr || mdp->no_ether_link) { - goto ignore_link; - } else { - link_stat = (sh_eth_read(ndev, PSR)); - if (mdp->ether_link_active_low) - link_stat = ~link_stat; - } - if (!(link_stat & PHY_ST_LINK)) { - sh_eth_rcv_snd_disable(ndev); - } else { - /* Link Up */ - sh_eth_modify(ndev, EESIPR, DMAC_M_ECI, 0); - /* clear int */ - sh_eth_modify(ndev, ECSR, 0, 0); - sh_eth_modify(ndev, EESIPR, DMAC_M_ECI, - DMAC_M_ECI); - /* enable tx and rx */ - sh_eth_rcv_snd_enable(ndev); - } + felic_stat = sh_eth_read(ndev, ECSR) & sh_eth_read(ndev, ECSIPR); + sh_eth_write(ndev, felic_stat, ECSR); /* clear int */ + if (felic_stat & ECSR_ICD) + ndev->stats.tx_carrier_errors++; + if (felic_stat & ECSR_MPD) + pm_wakeup_event(&mdp->pdev->dev, 0); + if (felic_stat & ECSR_LCHNG) { + /* Link Changed */ + if (mdp->cd->no_psr || mdp->no_ether_link) + return; + link_stat = sh_eth_read(ndev, PSR); + if (mdp->ether_link_active_low) + link_stat = ~link_stat; + if (!(link_stat & PHY_ST_LINK)) { + sh_eth_rcv_snd_disable(ndev); + } else { + /* Link Up */ + sh_eth_modify(ndev, EESIPR, EESIPR_ECIIP, 0); + /* clear int */ + sh_eth_modify(ndev, ECSR, 0, 0); + sh_eth_modify(ndev, EESIPR, EESIPR_ECIIP, EESIPR_ECIIP); + /* enable tx and rx */ + sh_eth_rcv_snd_enable(ndev); } } +} + +/* error control function */ +static void sh_eth_error(struct net_device *ndev, u32 intr_status) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + u32 mask; -ignore_link: if (intr_status & EESR_TWB) { /* Unused write back interrupt */ if (intr_status & EESR_TABT) { /* Transmit Abort int */ @@ -1646,14 +1714,16 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev) /* Get interrupt status */ intr_status = sh_eth_read(ndev, EESR); - /* Mask it with the interrupt mask, forcing ECI interrupt to be always - * enabled since it's the one that comes thru regardless of the mask, - * and we need to fully handle it in sh_eth_error() in order to quench - * it as it doesn't get cleared by just writing 1 to the ECI bit... + /* Mask it with the interrupt mask, forcing ECI interrupt to be always + * enabled since it's the one that comes thru regardless of the mask, + * and we need to fully handle it in sh_eth_emac_interrupt() in order + * to quench it as it doesn't get cleared by just writing 1 to the ECI + * bit... */ intr_enable = sh_eth_read(ndev, EESIPR); - intr_status &= intr_enable | DMAC_M_ECI; - if (intr_status & (EESR_RX_CHECK | cd->tx_check | cd->eesr_err_check)) + intr_status &= intr_enable | EESIPR_ECIIP; + if (intr_status & (EESR_RX_CHECK | cd->tx_check | EESR_ECI | + cd->eesr_err_check)) ret = IRQ_HANDLED; else goto out; @@ -1685,6 +1755,10 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev) netif_wake_queue(ndev); } + /* E-MAC interrupt */ + if (intr_status & EESR_ECI) + sh_eth_emac_interrupt(ndev); + if (intr_status & cd->eesr_err_check) { /* Clear error interrupts */ sh_eth_write(ndev, intr_status & cd->eesr_err_check, EESR); @@ -1989,7 +2063,7 @@ static size_t __sh_eth_get_regs(struct net_device *ndev, u32 *buf) add_reg(MAFCR); if (cd->rtrate) add_reg(RTRATE); - if (cd->hw_crc) + if (cd->hw_checksum) add_reg(CSMR); if (cd->select_mii) add_reg(RMII_MII); @@ -2201,6 +2275,33 @@ static int sh_eth_set_ringparam(struct net_device *ndev, return 0; } +static void sh_eth_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + wol->supported = 0; + wol->wolopts = 0; + + if (mdp->cd->magic && mdp->clk) { + wol->supported = WAKE_MAGIC; + wol->wolopts = mdp->wol_enabled ? WAKE_MAGIC : 0; + } +} + +static int sh_eth_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + if (!mdp->cd->magic || !mdp->clk || wol->wolopts & ~WAKE_MAGIC) + return -EOPNOTSUPP; + + mdp->wol_enabled = !!(wol->wolopts & WAKE_MAGIC); + + device_set_wakeup_enable(&mdp->pdev->dev, mdp->wol_enabled); + + return 0; +} + static const struct ethtool_ops sh_eth_ethtool_ops = { .get_regs_len = sh_eth_get_regs_len, .get_regs = sh_eth_get_regs, @@ -2215,6 +2316,8 @@ static const struct ethtool_ops sh_eth_ethtool_ops = { .set_ringparam = sh_eth_set_ringparam, .get_link_ksettings = sh_eth_get_link_ksettings, .set_link_ksettings = sh_eth_set_link_ksettings, + .get_wol = sh_eth_get_wol, + .set_wol = sh_eth_set_wol, }; /* network device open function */ @@ -3017,6 +3120,11 @@ static int sh_eth_drv_probe(struct platform_device *pdev) goto out_release; } + /* Get clock, if not found that's OK but Wake-On-Lan is unavailable */ + mdp->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(mdp->clk)) + mdp->clk = NULL; + ndev->base_addr = res->start; spin_lock_init(&mdp->lock); @@ -3111,6 +3219,9 @@ static int sh_eth_drv_probe(struct platform_device *pdev) if (ret) goto out_napi_del; + if (mdp->cd->magic && mdp->clk) + device_set_wakeup_capable(&pdev->dev, 1); + /* print device information */ netdev_info(ndev, "Base address at 0x%x, %pM, IRQ %d.\n", (u32)ndev->base_addr, ndev->dev_addr, ndev->irq); @@ -3150,15 +3261,67 @@ static int sh_eth_drv_remove(struct platform_device *pdev) #ifdef CONFIG_PM #ifdef CONFIG_PM_SLEEP +static int sh_eth_wol_setup(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + /* Only allow ECI interrupts */ + synchronize_irq(ndev->irq); + napi_disable(&mdp->napi); + sh_eth_write(ndev, EESIPR_ECIIP, EESIPR); + + /* Enable MagicPacket */ + sh_eth_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE); + + /* Increased clock usage so device won't be suspended */ + clk_enable(mdp->clk); + + return enable_irq_wake(ndev->irq); +} + +static int sh_eth_wol_restore(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + int ret; + + napi_enable(&mdp->napi); + + /* Disable MagicPacket */ + sh_eth_modify(ndev, ECMR, ECMR_MPDE, 0); + + /* The device needs to be reset to restore MagicPacket logic + * for next wakeup. If we close and open the device it will + * both be reset and all registers restored. This is what + * happens during suspend and resume without WoL enabled. + */ + ret = sh_eth_close(ndev); + if (ret < 0) + return ret; + ret = sh_eth_open(ndev); + if (ret < 0) + return ret; + + /* Restore clock usage count */ + clk_disable(mdp->clk); + + return disable_irq_wake(ndev->irq); +} + static int sh_eth_suspend(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); + struct sh_eth_private *mdp = netdev_priv(ndev); int ret = 0; - if (netif_running(ndev)) { - netif_device_detach(ndev); + if (!netif_running(ndev)) + return 0; + + netif_device_detach(ndev); + + if (mdp->wol_enabled) + ret = sh_eth_wol_setup(ndev); + else ret = sh_eth_close(ndev); - } return ret; } @@ -3166,14 +3329,21 @@ static int sh_eth_suspend(struct device *dev) static int sh_eth_resume(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); + struct sh_eth_private *mdp = netdev_priv(ndev); int ret = 0; - if (netif_running(ndev)) { + if (!netif_running(ndev)) + return 0; + + if (mdp->wol_enabled) + ret = sh_eth_wol_restore(ndev); + else ret = sh_eth_open(ndev); - if (ret < 0) - return ret; - netif_device_attach(ndev); - } + + if (ret < 0) + return ret; + + netif_device_attach(ndev); return ret; } diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index d050f37f3e0f..a6753ccba711 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -265,22 +265,38 @@ enum EESR_BIT { EESR_RTO) #define DEFAULT_EESR_ERR_CHECK (EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | \ EESR_RDE | EESR_RFRMER | EESR_ADE | \ - EESR_TFE | EESR_TDE | EESR_ECI) + EESR_TFE | EESR_TDE) /* EESIPR */ -enum DMAC_IM_BIT { - DMAC_M_TWB = 0x40000000, DMAC_M_TABT = 0x04000000, - DMAC_M_RABT = 0x02000000, - DMAC_M_RFRMER = 0x01000000, DMAC_M_ADF = 0x00800000, - DMAC_M_ECI = 0x00400000, DMAC_M_FTC = 0x00200000, - DMAC_M_TDE = 0x00100000, DMAC_M_TFE = 0x00080000, - DMAC_M_FRC = 0x00040000, DMAC_M_RDE = 0x00020000, - DMAC_M_RFE = 0x00010000, DMAC_M_TINT4 = 0x00000800, - DMAC_M_TINT3 = 0x00000400, DMAC_M_TINT2 = 0x00000200, - DMAC_M_TINT1 = 0x00000100, DMAC_M_RINT8 = 0x00000080, - DMAC_M_RINT5 = 0x00000010, DMAC_M_RINT4 = 0x00000008, - DMAC_M_RINT3 = 0x00000004, DMAC_M_RINT2 = 0x00000002, - DMAC_M_RINT1 = 0x00000001, +enum EESIPR_BIT { + EESIPR_TWB1IP = 0x80000000, + EESIPR_TWBIP = 0x40000000, /* same as TWB0IP */ + EESIPR_TC1IP = 0x20000000, + EESIPR_TUCIP = 0x10000000, + EESIPR_ROCIP = 0x08000000, + EESIPR_TABTIP = 0x04000000, + EESIPR_RABTIP = 0x02000000, + EESIPR_RFCOFIP = 0x01000000, + EESIPR_ADEIP = 0x00800000, + EESIPR_ECIIP = 0x00400000, + EESIPR_FTCIP = 0x00200000, /* same as TC0IP */ + EESIPR_TDEIP = 0x00100000, + EESIPR_TFUFIP = 0x00080000, + EESIPR_FRIP = 0x00040000, + EESIPR_RDEIP = 0x00020000, + EESIPR_RFOFIP = 0x00010000, + EESIPR_CNDIP = 0x00000800, + EESIPR_DLCIP = 0x00000400, + EESIPR_CDIP = 0x00000200, + EESIPR_TROIP = 0x00000100, + EESIPR_RMAFIP = 0x00000080, + EESIPR_CEEFIP = 0x00000040, + EESIPR_CELFIP = 0x00000020, + EESIPR_RRFIP = 0x00000010, + EESIPR_RTLFIP = 0x00000008, + EESIPR_RTSFIP = 0x00000004, + EESIPR_PREIP = 0x00000002, + EESIPR_CERFIP = 0x00000001, }; /* Receive descriptor 0 bits */ @@ -339,7 +355,7 @@ enum FELIC_MODE_BIT { ECMR_DPAD = 0x00200000, ECMR_RZPF = 0x00100000, ECMR_ZPF = 0x00080000, ECMR_PFR = 0x00040000, ECMR_RXF = 0x00020000, ECMR_TXF = 0x00010000, ECMR_MCT = 0x00002000, ECMR_PRCEF = 0x00001000, - ECMR_PMDE = 0x00000200, ECMR_RE = 0x00000040, ECMR_TE = 0x00000020, + ECMR_MPDE = 0x00000200, ECMR_RE = 0x00000040, ECMR_TE = 0x00000020, ECMR_RTM = 0x00000010, ECMR_ILB = 0x00000008, ECMR_ELB = 0x00000004, ECMR_DM = 0x00000002, ECMR_PRM = 0x00000001, }; @@ -488,11 +504,11 @@ struct sh_eth_cpu_data { unsigned rpadir:1; /* E-DMAC have RPADIR */ unsigned no_trimd:1; /* E-DMAC DO NOT have TRIMD */ unsigned no_ade:1; /* E-DMAC DO NOT have ADE bit in EESR */ - unsigned hw_crc:1; /* E-DMAC have CSMR */ + unsigned hw_checksum:1; /* E-DMAC has CSMR */ unsigned select_mii:1; /* EtherC have RMII_MII (MII select register) */ - unsigned shift_rd0:1; /* shift Rx descriptor word 0 right by 16 */ unsigned rmiimode:1; /* EtherC has RMIIMODE register */ unsigned rtrate:1; /* EtherC has RTRATE register */ + unsigned magic:1; /* EtherC has ECMR.MPDE and ECSR.MPD */ }; struct sh_eth_private { @@ -501,6 +517,7 @@ struct sh_eth_private { const u16 *reg_offset; void __iomem *addr; void __iomem *tsu_addr; + struct clk *clk; u32 num_rx_ring; u32 num_tx_ring; dma_addr_t rx_desc_dma; @@ -529,6 +546,7 @@ struct sh_eth_private { unsigned no_ether_link:1; unsigned ether_link_active_low:1; unsigned is_opened:1; + unsigned wol_enabled:1; }; static inline void sh_eth_soft_swap(char *src, int len) diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index 7c450b5a1138..0f63a44a955d 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -2517,7 +2517,7 @@ static int rocker_port_poll_rx(struct napi_struct *napi, int budget) } if (credits < budget) - napi_complete(napi); + napi_complete_done(napi, credits); rocker_dma_ring_credits_set(rocker, &rocker_port->rx_ring, credits); diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c index cddcff5a00a7..d54490d3f7ad 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c @@ -1563,7 +1563,7 @@ static int sxgbe_poll(struct napi_struct *napi, int budget) work_done = sxgbe_rx(priv, budget); if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); priv->hw->dma->enable_dma_irq(priv->ioaddr, qnum); } @@ -1706,11 +1706,9 @@ static inline u64 sxgbe_get_stat64(void __iomem *ioaddr, int reg_lo, int reg_hi) * This function is a driver entry point whenever ifconfig command gets * executed to see device statistics. Statistics are number of * bytes sent or received, errors occurred etc. - * Return value: - * This function returns various statistical information of device. */ -static struct rtnl_link_stats64 *sxgbe_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *stats) +static void sxgbe_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) { struct sxgbe_priv_data *priv = netdev_priv(dev); void __iomem *ioaddr = priv->ioaddr; @@ -1761,8 +1759,6 @@ static struct rtnl_link_stats64 *sxgbe_get_stats64(struct net_device *dev, SXGBE_MMC_TXUFLWHI_GBCNT_REG); writel(0, ioaddr + SXGBE_MMC_CTL_REG); spin_unlock(&priv->stats_lock); - - return stats; } /* sxgbe_set_features - entry point to set offload features of the device. diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c index ed34196028b8..70347720fdf9 100644 --- a/drivers/net/ethernet/seeq/sgiseeq.c +++ b/drivers/net/ethernet/seeq/sgiseeq.c @@ -807,7 +807,7 @@ err_out: return err; } -static int __exit sgiseeq_remove(struct platform_device *pdev) +static int sgiseeq_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct sgiseeq_private *sp = netdev_priv(dev); @@ -822,7 +822,7 @@ static int __exit sgiseeq_remove(struct platform_device *pdev) static struct platform_driver sgiseeq_driver = { .probe = sgiseeq_probe, - .remove = __exit_p(sgiseeq_remove), + .remove = sgiseeq_remove, .driver = { .name = "sgiseeq", } diff --git a/drivers/net/ethernet/sfc/bitfield.h b/drivers/net/ethernet/sfc/bitfield.h index 17d83f37fbf2..41ad07d45144 100644 --- a/drivers/net/ethernet/sfc/bitfield.h +++ b/drivers/net/ethernet/sfc/bitfield.h @@ -433,6 +433,9 @@ typedef union efx_oword { (oword).u64[1] = (from).u64[1] & (mask).u64[1]; \ } while (0) +#define EFX_AND_QWORD(qword, from, mask) \ + (qword).u64[0] = (from).u64[0] & (mask).u64[0] + #define EFX_OR_OWORD(oword, from, mask) \ do { \ (oword).u64[0] = (from).u64[0] | (mask).u64[0]; \ diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 5eb0e684fd76..c60c2d4c646a 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -60,15 +60,33 @@ struct efx_ef10_vlan { u16 vid; }; +enum efx_ef10_default_filters { + EFX_EF10_BCAST, + EFX_EF10_UCDEF, + EFX_EF10_MCDEF, + EFX_EF10_VXLAN4_UCDEF, + EFX_EF10_VXLAN4_MCDEF, + EFX_EF10_VXLAN6_UCDEF, + EFX_EF10_VXLAN6_MCDEF, + EFX_EF10_NVGRE4_UCDEF, + EFX_EF10_NVGRE4_MCDEF, + EFX_EF10_NVGRE6_UCDEF, + EFX_EF10_NVGRE6_MCDEF, + EFX_EF10_GENEVE4_UCDEF, + EFX_EF10_GENEVE4_MCDEF, + EFX_EF10_GENEVE6_UCDEF, + EFX_EF10_GENEVE6_MCDEF, + + EFX_EF10_NUM_DEFAULT_FILTERS +}; + /* Per-VLAN filters information */ struct efx_ef10_filter_vlan { struct list_head list; u16 vid; u16 uc[EFX_EF10_FILTER_DEV_UC_MAX]; u16 mc[EFX_EF10_FILTER_DEV_MC_MAX]; - u16 ucdef; - u16 bcast; - u16 mcdef; + u16 default_filters[EFX_EF10_NUM_DEFAULT_FILTERS]; }; struct efx_ef10_dev_addr { @@ -78,7 +96,7 @@ struct efx_ef10_dev_addr { struct efx_ef10_filter_table { /* The MCDI match masks supported by this fw & hw, in order of priority */ u32 rx_match_mcdi_flags[ - MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM]; + MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM * 2]; unsigned int rx_match_count; struct { @@ -114,6 +132,23 @@ static int efx_ef10_filter_add_vlan(struct efx_nic *efx, u16 vid); static void efx_ef10_filter_del_vlan_internal(struct efx_nic *efx, struct efx_ef10_filter_vlan *vlan); static void efx_ef10_filter_del_vlan(struct efx_nic *efx, u16 vid); +static int efx_ef10_set_udp_tnl_ports(struct efx_nic *efx, bool unloading); + +static u32 efx_ef10_filter_get_unsafe_id(u32 filter_id) +{ + WARN_ON_ONCE(filter_id == EFX_EF10_FILTER_ID_INVALID); + return filter_id & (HUNT_FILTER_TBL_ROWS - 1); +} + +static unsigned int efx_ef10_filter_get_unsafe_pri(u32 filter_id) +{ + return filter_id / (HUNT_FILTER_TBL_ROWS * 2); +} + +static u32 efx_ef10_make_filter_id(unsigned int pri, u16 idx) +{ + return pri * HUNT_FILTER_TBL_ROWS * 2 + idx; +} static int efx_ef10_get_warm_boot_count(struct efx_nic *efx) { @@ -197,11 +232,15 @@ static int efx_ef10_init_datapath_caps(struct efx_nic *efx) nic_data->datapath_caps = MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1); - if (outlen >= MC_CMD_GET_CAPABILITIES_V2_OUT_LEN) + if (outlen >= MC_CMD_GET_CAPABILITIES_V2_OUT_LEN) { nic_data->datapath_caps2 = MCDI_DWORD(outbuf, GET_CAPABILITIES_V2_OUT_FLAGS2); - else + nic_data->piobuf_size = MCDI_WORD(outbuf, + GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF); + } else { nic_data->datapath_caps2 = 0; + nic_data->piobuf_size = ER_DZ_TX_PIOBUF_SIZE; + } /* record the DPCPU firmware IDs to determine VEB vswitching support. */ @@ -547,7 +586,6 @@ static DEVICE_ATTR(primary_flag, 0444, efx_ef10_show_primary_flag, NULL); static int efx_ef10_probe(struct efx_nic *efx) { struct efx_ef10_nic_data *nic_data; - struct net_device *net_dev = efx->net_dev; int i, rc; /* We can have one VI for each 8K region. However, until we @@ -603,6 +641,8 @@ static int efx_ef10_probe(struct efx_nic *efx) if (rc) goto fail2; + mutex_init(&nic_data->udp_tunnels_lock); + /* Reset (most) configuration for this function */ rc = efx_mcdi_reset(efx, RESET_TYPE_ALL); if (rc) @@ -637,7 +677,6 @@ static int efx_ef10_probe(struct efx_nic *efx) if (rc < 0) goto fail5; efx->port_num = rc; - net_dev->dev_port = rc; rc = efx->type->get_mac_address(efx, efx->net_dev->perm_addr); if (rc) @@ -692,6 +731,14 @@ fail5: fail4: device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag); fail3: + efx_mcdi_detach(efx); + + mutex_lock(&nic_data->udp_tunnels_lock); + memset(nic_data->udp_tunnels, 0, sizeof(nic_data->udp_tunnels)); + (void)efx_ef10_set_udp_tnl_ports(efx, true); + mutex_unlock(&nic_data->udp_tunnels_lock); + mutex_destroy(&nic_data->udp_tunnels_lock); + efx_mcdi_fini(efx); fail2: efx_nic_free_buffer(efx, &nic_data->mcdi_buf); @@ -781,9 +828,7 @@ static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n) static int efx_ef10_link_piobufs(struct efx_nic *efx) { struct efx_ef10_nic_data *nic_data = efx->nic_data; - _MCDI_DECLARE_BUF(inbuf, - max(MC_CMD_LINK_PIOBUF_IN_LEN, - MC_CMD_UNLINK_PIOBUF_IN_LEN)); + MCDI_DECLARE_BUF(inbuf, MC_CMD_LINK_PIOBUF_IN_LEN); struct efx_channel *channel; struct efx_tx_queue *tx_queue; unsigned int offset, index; @@ -792,8 +837,6 @@ static int efx_ef10_link_piobufs(struct efx_nic *efx) BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_OUT_LEN != 0); BUILD_BUG_ON(MC_CMD_UNLINK_PIOBUF_OUT_LEN != 0); - memset(inbuf, 0, sizeof(inbuf)); - /* Link a buffer to each VI in the write-combining mapping */ for (index = 0; index < nic_data->n_piobufs; ++index) { MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_PIOBUF_HANDLE, @@ -825,8 +868,8 @@ static int efx_ef10_link_piobufs(struct efx_nic *efx) offset = ((efx->tx_channel_offset + efx->n_tx_channels - tx_queue->channel->channel - 1) * efx_piobuf_size); - index = offset / ER_DZ_TX_PIOBUF_SIZE; - offset = offset % ER_DZ_TX_PIOBUF_SIZE; + index = offset / nic_data->piobuf_size; + offset = offset % nic_data->piobuf_size; /* When the host page size is 4K, the first * host page in the WC mapping may be within @@ -873,6 +916,10 @@ static int efx_ef10_link_piobufs(struct efx_nic *efx) return 0; fail: + /* inbuf was defined for MC_CMD_LINK_PIOBUF. We can use the same + * buffer for MC_CMD_UNLINK_PIOBUF because it's shorter. + */ + BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_IN_LEN < MC_CMD_UNLINK_PIOBUF_IN_LEN); while (index--) { MCDI_SET_DWORD(inbuf, UNLINK_PIOBUF_IN_TXQ_INSTANCE, nic_data->pio_write_vi_base + index); @@ -961,6 +1008,15 @@ static void efx_ef10_remove(struct efx_nic *efx) device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag); device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag); + efx_mcdi_detach(efx); + + memset(nic_data->udp_tunnels, 0, sizeof(nic_data->udp_tunnels)); + mutex_lock(&nic_data->udp_tunnels_lock); + (void)efx_ef10_set_udp_tnl_ports(efx, true); + mutex_unlock(&nic_data->udp_tunnels_lock); + + mutex_destroy(&nic_data->udp_tunnels_lock); + efx_mcdi_fini(efx); efx_nic_free_buffer(efx, &nic_data->mcdi_buf); kfree(nic_data); @@ -1161,14 +1217,20 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx) * functions of the controller. */ if (efx_piobuf_size != 0 && - ER_DZ_TX_PIOBUF_SIZE / efx_piobuf_size * EF10_TX_PIOBUF_COUNT >= + nic_data->piobuf_size / efx_piobuf_size * EF10_TX_PIOBUF_COUNT >= efx->n_tx_channels) { unsigned int n_piobufs = DIV_ROUND_UP(efx->n_tx_channels, - ER_DZ_TX_PIOBUF_SIZE / efx_piobuf_size); + nic_data->piobuf_size / efx_piobuf_size); rc = efx_ef10_alloc_piobufs(efx, n_piobufs); - if (rc) + if (rc == -ENOSPC) + netif_dbg(efx, probe, efx->net_dev, + "out of PIO buffers; cannot allocate more\n"); + else if (rc == -EPERM) + netif_dbg(efx, probe, efx->net_dev, + "not permitted to allocate PIO buffers\n"); + else if (rc) netif_err(efx, probe, efx->net_dev, "failed to allocate PIO buffers (%d)\n", rc); else @@ -1315,15 +1377,21 @@ static int efx_ef10_init_nic(struct efx_nic *efx) efx_ef10_free_piobufs(efx); } - /* Log an error on failure, but this is non-fatal */ - if (rc) + /* Log an error on failure, but this is non-fatal. + * Permission errors are less important - we've presumably + * had the PIO buffer licence removed. + */ + if (rc == -EPERM) + netif_dbg(efx, drv, efx->net_dev, + "not permitted to restore PIO buffers\n"); + else if (rc) netif_err(efx, drv, efx->net_dev, "failed to restore PIO buffers (%d)\n", rc); nic_data->must_restore_piobufs = false; } /* don't fail init if RSS setup doesn't work */ - rc = efx->type->rx_push_rss_config(efx, false, efx->rx_indir_table); + rc = efx->type->rx_push_rss_config(efx, false, efx->rx_indir_table, NULL); efx->rss_active = (rc == 0); return 0; @@ -2115,7 +2183,7 @@ static int efx_ef10_tx_tso_desc(struct efx_tx_queue *tx_queue, /* Modify IPv4 header if needed. */ ip->tot_len = 0; ip->check = 0; - ipv4_id = ip->id; + ipv4_id = ntohs(ip->id); } else { /* Modify IPv6 header if needed. */ struct ipv6hdr *ipv6 = ipv6_hdr(skb); @@ -2360,7 +2428,11 @@ static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue) /* Create TX descriptor ring entry */ if (buffer->flags & EFX_TX_BUF_OPTION) { *txd = buffer->option; + if (EFX_QWORD_FIELD(*txd, ESF_DZ_TX_OPTION_TYPE) == 1) + /* PIO descriptor */ + tx_queue->packet_write_count = tx_queue->write_count; } else { + tx_queue->packet_write_count = tx_queue->write_count; BUILD_BUG_ON(EFX_TX_BUF_CONT != 1); EFX_POPULATE_QWORD_3( *txd, @@ -2529,7 +2601,7 @@ static void efx_ef10_free_rss_context(struct efx_nic *efx, u32 context) } static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context, - const u32 *rx_indir_table) + const u32 *rx_indir_table, const u8 *key) { MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN); MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN); @@ -2540,6 +2612,11 @@ static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context, BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) != MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN); + /* This iterates over the length of efx->rx_indir_table, but copies + * bytes from rx_indir_table. That's because the latter is a pointer + * rather than an array, but should have the same length. + * The efx->rx_hash_key loop below is similar. + */ for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); ++i) MCDI_PTR(tablebuf, RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE)[i] = @@ -2555,8 +2632,7 @@ static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context, BUILD_BUG_ON(ARRAY_SIZE(efx->rx_hash_key) != MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN); for (i = 0; i < ARRAY_SIZE(efx->rx_hash_key); ++i) - MCDI_PTR(keybuf, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY)[i] = - efx->rx_hash_key[i]; + MCDI_PTR(keybuf, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY)[i] = key[i]; return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_KEY, keybuf, sizeof(keybuf), NULL, 0, NULL); @@ -2589,7 +2665,8 @@ static int efx_ef10_rx_push_shared_rss_config(struct efx_nic *efx, } static int efx_ef10_rx_push_exclusive_rss_config(struct efx_nic *efx, - const u32 *rx_indir_table) + const u32 *rx_indir_table, + const u8 *key) { struct efx_ef10_nic_data *nic_data = efx->nic_data; int rc; @@ -2608,7 +2685,7 @@ static int efx_ef10_rx_push_exclusive_rss_config(struct efx_nic *efx, } rc = efx_ef10_populate_rss_table(efx, new_rx_rss_context, - rx_indir_table); + rx_indir_table, key); if (rc != 0) goto fail2; @@ -2619,6 +2696,9 @@ static int efx_ef10_rx_push_exclusive_rss_config(struct efx_nic *efx, if (rx_indir_table != efx->rx_indir_table) memcpy(efx->rx_indir_table, rx_indir_table, sizeof(efx->rx_indir_table)); + if (key != efx->rx_hash_key) + memcpy(efx->rx_hash_key, key, efx->type->rx_hash_key_size); + return 0; fail2: @@ -2629,15 +2709,69 @@ fail1: return rc; } +static int efx_ef10_rx_pull_rss_config(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN); + MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN); + MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN); + size_t outlen; + int rc, i; + + BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN != + MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN); + + if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID) + return -ENOENT; + + MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID, + nic_data->rx_rss_context); + BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) != + MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_LEN); + rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_TABLE, inbuf, sizeof(inbuf), + tablebuf, sizeof(tablebuf), &outlen); + if (rc != 0) + return rc; + + if (WARN_ON(outlen != MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN)) + return -EIO; + + for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++) + efx->rx_indir_table[i] = MCDI_PTR(tablebuf, + RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE)[i]; + + MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID, + nic_data->rx_rss_context); + BUILD_BUG_ON(ARRAY_SIZE(efx->rx_hash_key) != + MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN); + rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_KEY, inbuf, sizeof(inbuf), + keybuf, sizeof(keybuf), &outlen); + if (rc != 0) + return rc; + + if (WARN_ON(outlen != MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN)) + return -EIO; + + for (i = 0; i < ARRAY_SIZE(efx->rx_hash_key); ++i) + efx->rx_hash_key[i] = MCDI_PTR( + keybuf, RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY)[i]; + + return 0; +} + static int efx_ef10_pf_rx_push_rss_config(struct efx_nic *efx, bool user, - const u32 *rx_indir_table) + const u32 *rx_indir_table, + const u8 *key) { int rc; if (efx->rss_spread == 1) return 0; - rc = efx_ef10_rx_push_exclusive_rss_config(efx, rx_indir_table); + if (!key) + key = efx->rx_hash_key; + + rc = efx_ef10_rx_push_exclusive_rss_config(efx, rx_indir_table, key); if (rc == -ENOBUFS && !user) { unsigned context_size; @@ -2675,6 +2809,8 @@ static int efx_ef10_pf_rx_push_rss_config(struct efx_nic *efx, bool user, static int efx_ef10_vf_rx_push_rss_config(struct efx_nic *efx, bool user, const u32 *rx_indir_table + __attribute__ ((unused)), + const u8 *key __attribute__ ((unused))) { struct efx_ef10_nic_data *nic_data = efx->nic_data; @@ -3054,13 +3190,103 @@ static void efx_ef10_handle_rx_abort(struct efx_rx_queue *rx_queue) ++efx_rx_queue_channel(rx_queue)->n_rx_nodesc_trunc; } +static u16 efx_ef10_handle_rx_event_errors(struct efx_channel *channel, + unsigned int n_packets, + unsigned int rx_encap_hdr, + unsigned int rx_l3_class, + unsigned int rx_l4_class, + const efx_qword_t *event) +{ + struct efx_nic *efx = channel->efx; + + if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_ECRC_ERR)) { + if (!efx->loopback_selftest) + channel->n_rx_eth_crc_err += n_packets; + return EFX_RX_PKT_DISCARD; + } + if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_IPCKSUM_ERR)) { + if (unlikely(rx_encap_hdr != ESE_EZ_ENCAP_HDR_VXLAN && + rx_l3_class != ESE_DZ_L3_CLASS_IP4 && + rx_l3_class != ESE_DZ_L3_CLASS_IP4_FRAG && + rx_l3_class != ESE_DZ_L3_CLASS_IP6 && + rx_l3_class != ESE_DZ_L3_CLASS_IP6_FRAG)) + netdev_WARN(efx->net_dev, + "invalid class for RX_IPCKSUM_ERR: event=" + EFX_QWORD_FMT "\n", + EFX_QWORD_VAL(*event)); + if (!efx->loopback_selftest) + *(rx_encap_hdr ? + &channel->n_rx_outer_ip_hdr_chksum_err : + &channel->n_rx_ip_hdr_chksum_err) += n_packets; + return 0; + } + if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_TCPUDP_CKSUM_ERR)) { + if (unlikely(rx_encap_hdr != ESE_EZ_ENCAP_HDR_VXLAN && + ((rx_l3_class != ESE_DZ_L3_CLASS_IP4 && + rx_l3_class != ESE_DZ_L3_CLASS_IP6) || + (rx_l4_class != ESE_DZ_L4_CLASS_TCP && + rx_l4_class != ESE_DZ_L4_CLASS_UDP)))) + netdev_WARN(efx->net_dev, + "invalid class for RX_TCPUDP_CKSUM_ERR: event=" + EFX_QWORD_FMT "\n", + EFX_QWORD_VAL(*event)); + if (!efx->loopback_selftest) + *(rx_encap_hdr ? + &channel->n_rx_outer_tcp_udp_chksum_err : + &channel->n_rx_tcp_udp_chksum_err) += n_packets; + return 0; + } + if (EFX_QWORD_FIELD(*event, ESF_EZ_RX_IP_INNER_CHKSUM_ERR)) { + if (unlikely(!rx_encap_hdr)) + netdev_WARN(efx->net_dev, + "invalid encapsulation type for RX_IP_INNER_CHKSUM_ERR: event=" + EFX_QWORD_FMT "\n", + EFX_QWORD_VAL(*event)); + else if (unlikely(rx_l3_class != ESE_DZ_L3_CLASS_IP4 && + rx_l3_class != ESE_DZ_L3_CLASS_IP4_FRAG && + rx_l3_class != ESE_DZ_L3_CLASS_IP6 && + rx_l3_class != ESE_DZ_L3_CLASS_IP6_FRAG)) + netdev_WARN(efx->net_dev, + "invalid class for RX_IP_INNER_CHKSUM_ERR: event=" + EFX_QWORD_FMT "\n", + EFX_QWORD_VAL(*event)); + if (!efx->loopback_selftest) + channel->n_rx_inner_ip_hdr_chksum_err += n_packets; + return 0; + } + if (EFX_QWORD_FIELD(*event, ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR)) { + if (unlikely(!rx_encap_hdr)) + netdev_WARN(efx->net_dev, + "invalid encapsulation type for RX_TCP_UDP_INNER_CHKSUM_ERR: event=" + EFX_QWORD_FMT "\n", + EFX_QWORD_VAL(*event)); + else if (unlikely((rx_l3_class != ESE_DZ_L3_CLASS_IP4 && + rx_l3_class != ESE_DZ_L3_CLASS_IP6) || + (rx_l4_class != ESE_DZ_L4_CLASS_TCP && + rx_l4_class != ESE_DZ_L4_CLASS_UDP))) + netdev_WARN(efx->net_dev, + "invalid class for RX_TCP_UDP_INNER_CHKSUM_ERR: event=" + EFX_QWORD_FMT "\n", + EFX_QWORD_VAL(*event)); + if (!efx->loopback_selftest) + channel->n_rx_inner_tcp_udp_chksum_err += n_packets; + return 0; + } + + WARN_ON(1); /* No error bits were recognised */ + return 0; +} + static int efx_ef10_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) { - unsigned int rx_bytes, next_ptr_lbits, rx_queue_label, rx_l4_class; + unsigned int rx_bytes, next_ptr_lbits, rx_queue_label; + unsigned int rx_l3_class, rx_l4_class, rx_encap_hdr; unsigned int n_descs, n_packets, i; struct efx_nic *efx = channel->efx; + struct efx_ef10_nic_data *nic_data = efx->nic_data; struct efx_rx_queue *rx_queue; + efx_qword_t errors; bool rx_cont; u16 flags = 0; @@ -3071,8 +3297,14 @@ static int efx_ef10_handle_rx_event(struct efx_channel *channel, rx_bytes = EFX_QWORD_FIELD(*event, ESF_DZ_RX_BYTES); next_ptr_lbits = EFX_QWORD_FIELD(*event, ESF_DZ_RX_DSC_PTR_LBITS); rx_queue_label = EFX_QWORD_FIELD(*event, ESF_DZ_RX_QLABEL); + rx_l3_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L3_CLASS); rx_l4_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L4_CLASS); rx_cont = EFX_QWORD_FIELD(*event, ESF_DZ_RX_CONT); + rx_encap_hdr = + nic_data->datapath_caps & + (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN) ? + EFX_QWORD_FIELD(*event, ESF_EZ_RX_ENCAP_HDR) : + ESE_EZ_ENCAP_HDR_NONE; if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_DROP_EVENT)) netdev_WARN(efx->net_dev, "saw RX_DROP_EVENT: event=" @@ -3132,17 +3364,38 @@ static int efx_ef10_handle_rx_event(struct efx_channel *channel, n_packets = 1; } - if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_RX_ECRC_ERR))) - flags |= EFX_RX_PKT_DISCARD; - - if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_RX_IPCKSUM_ERR))) { - channel->n_rx_ip_hdr_chksum_err += n_packets; - } else if (unlikely(EFX_QWORD_FIELD(*event, - ESF_DZ_RX_TCPUDP_CKSUM_ERR))) { - channel->n_rx_tcp_udp_chksum_err += n_packets; - } else if (rx_l4_class == ESE_DZ_L4_CLASS_TCP || - rx_l4_class == ESE_DZ_L4_CLASS_UDP) { - flags |= EFX_RX_PKT_CSUMMED; + EFX_POPULATE_QWORD_5(errors, ESF_DZ_RX_ECRC_ERR, 1, + ESF_DZ_RX_IPCKSUM_ERR, 1, + ESF_DZ_RX_TCPUDP_CKSUM_ERR, 1, + ESF_EZ_RX_IP_INNER_CHKSUM_ERR, 1, + ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR, 1); + EFX_AND_QWORD(errors, *event, errors); + if (unlikely(!EFX_QWORD_IS_ZERO(errors))) { + flags |= efx_ef10_handle_rx_event_errors(channel, n_packets, + rx_encap_hdr, + rx_l3_class, rx_l4_class, + event); + } else { + bool tcpudp = rx_l4_class == ESE_DZ_L4_CLASS_TCP || + rx_l4_class == ESE_DZ_L4_CLASS_UDP; + + switch (rx_encap_hdr) { + case ESE_EZ_ENCAP_HDR_VXLAN: /* VxLAN or GENEVE */ + flags |= EFX_RX_PKT_CSUMMED; /* outer UDP csum */ + if (tcpudp) + flags |= EFX_RX_PKT_CSUM_LEVEL; /* inner L4 */ + break; + case ESE_EZ_ENCAP_HDR_GRE: + case ESE_EZ_ENCAP_HDR_NONE: + if (tcpudp) + flags |= EFX_RX_PKT_CSUMMED; + break; + default: + netdev_WARN(efx->net_dev, + "unknown encapsulation type: event=" + EFX_QWORD_FMT "\n", + EFX_QWORD_VAL(*event)); + } } if (rx_l4_class == ESE_DZ_L4_CLASS_TCP) @@ -3510,6 +3763,104 @@ efx_ef10_filter_set_entry(struct efx_ef10_filter_table *table, table->entry[filter_idx].spec = (unsigned long)spec | flags; } +static void +efx_ef10_filter_push_prep_set_match_fields(struct efx_nic *efx, + const struct efx_filter_spec *spec, + efx_dword_t *inbuf) +{ + enum efx_encap_type encap_type = efx_filter_get_encap_type(spec); + u32 match_fields = 0, uc_match, mc_match; + + MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, + efx_ef10_filter_is_exclusive(spec) ? + MC_CMD_FILTER_OP_IN_OP_INSERT : + MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE); + + /* Convert match flags and values. Unlike almost + * everything else in MCDI, these fields are in + * network byte order. + */ +#define COPY_VALUE(value, mcdi_field) \ + do { \ + match_fields |= \ + 1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \ + mcdi_field ## _LBN; \ + BUILD_BUG_ON( \ + MC_CMD_FILTER_OP_IN_ ## mcdi_field ## _LEN < \ + sizeof(value)); \ + memcpy(MCDI_PTR(inbuf, FILTER_OP_IN_ ## mcdi_field), \ + &value, sizeof(value)); \ + } while (0) +#define COPY_FIELD(gen_flag, gen_field, mcdi_field) \ + if (spec->match_flags & EFX_FILTER_MATCH_ ## gen_flag) { \ + COPY_VALUE(spec->gen_field, mcdi_field); \ + } + /* Handle encap filters first. They will always be mismatch + * (unknown UC or MC) filters + */ + if (encap_type) { + /* ether_type and outer_ip_proto need to be variables + * because COPY_VALUE wants to memcpy them + */ + __be16 ether_type = + htons(encap_type & EFX_ENCAP_FLAG_IPV6 ? + ETH_P_IPV6 : ETH_P_IP); + u8 vni_type = MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE; + u8 outer_ip_proto; + + switch (encap_type & EFX_ENCAP_TYPES_MASK) { + case EFX_ENCAP_TYPE_VXLAN: + vni_type = MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN; + /* fallthrough */ + case EFX_ENCAP_TYPE_GENEVE: + COPY_VALUE(ether_type, ETHER_TYPE); + outer_ip_proto = IPPROTO_UDP; + COPY_VALUE(outer_ip_proto, IP_PROTO); + /* We always need to set the type field, even + * though we're not matching on the TNI. + */ + MCDI_POPULATE_DWORD_1(inbuf, + FILTER_OP_EXT_IN_VNI_OR_VSID, + FILTER_OP_EXT_IN_VNI_TYPE, + vni_type); + break; + case EFX_ENCAP_TYPE_NVGRE: + COPY_VALUE(ether_type, ETHER_TYPE); + outer_ip_proto = IPPROTO_GRE; + COPY_VALUE(outer_ip_proto, IP_PROTO); + break; + default: + WARN_ON(1); + } + + uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN; + mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN; + } else { + uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN; + mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN; + } + + if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) + match_fields |= + is_multicast_ether_addr(spec->loc_mac) ? + 1 << mc_match : + 1 << uc_match; + COPY_FIELD(REM_HOST, rem_host, SRC_IP); + COPY_FIELD(LOC_HOST, loc_host, DST_IP); + COPY_FIELD(REM_MAC, rem_mac, SRC_MAC); + COPY_FIELD(REM_PORT, rem_port, SRC_PORT); + COPY_FIELD(LOC_MAC, loc_mac, DST_MAC); + COPY_FIELD(LOC_PORT, loc_port, DST_PORT); + COPY_FIELD(ETHER_TYPE, ether_type, ETHER_TYPE); + COPY_FIELD(INNER_VID, inner_vid, INNER_VLAN); + COPY_FIELD(OUTER_VID, outer_vid, OUTER_VLAN); + COPY_FIELD(IP_PROTO, ip_proto, IP_PROTO); +#undef COPY_FIELD +#undef COPY_VALUE + MCDI_SET_DWORD(inbuf, FILTER_OP_IN_MATCH_FIELDS, + match_fields); +} + static void efx_ef10_filter_push_prep(struct efx_nic *efx, const struct efx_filter_spec *spec, efx_dword_t *inbuf, u64 handle, @@ -3518,7 +3869,7 @@ static void efx_ef10_filter_push_prep(struct efx_nic *efx, struct efx_ef10_nic_data *nic_data = efx->nic_data; u32 flags = spec->flags; - memset(inbuf, 0, MC_CMD_FILTER_OP_IN_LEN); + memset(inbuf, 0, MC_CMD_FILTER_OP_EXT_IN_LEN); /* Remove RSS flag if we don't have an RSS context. */ if (flags & EFX_FILTER_FLAG_RX_RSS && @@ -3531,46 +3882,7 @@ static void efx_ef10_filter_push_prep(struct efx_nic *efx, MC_CMD_FILTER_OP_IN_OP_REPLACE); MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, handle); } else { - u32 match_fields = 0; - - MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, - efx_ef10_filter_is_exclusive(spec) ? - MC_CMD_FILTER_OP_IN_OP_INSERT : - MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE); - - /* Convert match flags and values. Unlike almost - * everything else in MCDI, these fields are in - * network byte order. - */ - if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) - match_fields |= - is_multicast_ether_addr(spec->loc_mac) ? - 1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN : - 1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN; -#define COPY_FIELD(gen_flag, gen_field, mcdi_field) \ - if (spec->match_flags & EFX_FILTER_MATCH_ ## gen_flag) { \ - match_fields |= \ - 1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \ - mcdi_field ## _LBN; \ - BUILD_BUG_ON( \ - MC_CMD_FILTER_OP_IN_ ## mcdi_field ## _LEN < \ - sizeof(spec->gen_field)); \ - memcpy(MCDI_PTR(inbuf, FILTER_OP_IN_ ## mcdi_field), \ - &spec->gen_field, sizeof(spec->gen_field)); \ - } - COPY_FIELD(REM_HOST, rem_host, SRC_IP); - COPY_FIELD(LOC_HOST, loc_host, DST_IP); - COPY_FIELD(REM_MAC, rem_mac, SRC_MAC); - COPY_FIELD(REM_PORT, rem_port, SRC_PORT); - COPY_FIELD(LOC_MAC, loc_mac, DST_MAC); - COPY_FIELD(LOC_PORT, loc_port, DST_PORT); - COPY_FIELD(ETHER_TYPE, ether_type, ETHER_TYPE); - COPY_FIELD(INNER_VID, inner_vid, INNER_VLAN); - COPY_FIELD(OUTER_VID, outer_vid, OUTER_VLAN); - COPY_FIELD(IP_PROTO, ip_proto, IP_PROTO); -#undef COPY_FIELD - MCDI_SET_DWORD(inbuf, FILTER_OP_IN_MATCH_FIELDS, - match_fields); + efx_ef10_filter_push_prep_set_match_fields(efx, spec, inbuf); } MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, nic_data->vport_id); @@ -3599,8 +3911,8 @@ static int efx_ef10_filter_push(struct efx_nic *efx, const struct efx_filter_spec *spec, u64 *handle, bool replacing) { - MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN); - MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_OUT_LEN); + MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_EXT_OUT_LEN); int rc; efx_ef10_filter_push_prep(efx, spec, inbuf, *handle, replacing); @@ -3615,37 +3927,58 @@ static int efx_ef10_filter_push(struct efx_nic *efx, static u32 efx_ef10_filter_mcdi_flags_from_spec(const struct efx_filter_spec *spec) { + enum efx_encap_type encap_type = efx_filter_get_encap_type(spec); unsigned int match_flags = spec->match_flags; + unsigned int uc_match, mc_match; u32 mcdi_flags = 0; +#define MAP_FILTER_TO_MCDI_FLAG(gen_flag, mcdi_field, encap) { \ + unsigned int old_match_flags = match_flags; \ + match_flags &= ~EFX_FILTER_MATCH_ ## gen_flag; \ + if (match_flags != old_match_flags) \ + mcdi_flags |= \ + (1 << ((encap) ? \ + MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ ## \ + mcdi_field ## _LBN : \ + MC_CMD_FILTER_OP_EXT_IN_MATCH_ ##\ + mcdi_field ## _LBN)); \ + } + /* inner or outer based on encap type */ + MAP_FILTER_TO_MCDI_FLAG(REM_HOST, SRC_IP, encap_type); + MAP_FILTER_TO_MCDI_FLAG(LOC_HOST, DST_IP, encap_type); + MAP_FILTER_TO_MCDI_FLAG(REM_MAC, SRC_MAC, encap_type); + MAP_FILTER_TO_MCDI_FLAG(REM_PORT, SRC_PORT, encap_type); + MAP_FILTER_TO_MCDI_FLAG(LOC_MAC, DST_MAC, encap_type); + MAP_FILTER_TO_MCDI_FLAG(LOC_PORT, DST_PORT, encap_type); + MAP_FILTER_TO_MCDI_FLAG(ETHER_TYPE, ETHER_TYPE, encap_type); + MAP_FILTER_TO_MCDI_FLAG(IP_PROTO, IP_PROTO, encap_type); + /* always outer */ + MAP_FILTER_TO_MCDI_FLAG(INNER_VID, INNER_VLAN, false); + MAP_FILTER_TO_MCDI_FLAG(OUTER_VID, OUTER_VLAN, false); +#undef MAP_FILTER_TO_MCDI_FLAG + + /* special handling for encap type, and mismatch */ + if (encap_type) { + match_flags &= ~EFX_FILTER_MATCH_ENCAP_TYPE; + mcdi_flags |= + (1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN); + mcdi_flags |= (1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN); + + uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN; + mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN; + } else { + uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN; + mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN; + } + if (match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) { match_flags &= ~EFX_FILTER_MATCH_LOC_MAC_IG; mcdi_flags |= is_multicast_ether_addr(spec->loc_mac) ? - (1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN) : - (1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN); + 1 << mc_match : + 1 << uc_match; } -#define MAP_FILTER_TO_MCDI_FLAG(gen_flag, mcdi_field) { \ - unsigned int old_match_flags = match_flags; \ - match_flags &= ~EFX_FILTER_MATCH_ ## gen_flag; \ - if (match_flags != old_match_flags) \ - mcdi_flags |= \ - (1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \ - mcdi_field ## _LBN); \ - } - MAP_FILTER_TO_MCDI_FLAG(REM_HOST, SRC_IP); - MAP_FILTER_TO_MCDI_FLAG(LOC_HOST, DST_IP); - MAP_FILTER_TO_MCDI_FLAG(REM_MAC, SRC_MAC); - MAP_FILTER_TO_MCDI_FLAG(REM_PORT, SRC_PORT); - MAP_FILTER_TO_MCDI_FLAG(LOC_MAC, DST_MAC); - MAP_FILTER_TO_MCDI_FLAG(LOC_PORT, DST_PORT); - MAP_FILTER_TO_MCDI_FLAG(ETHER_TYPE, ETHER_TYPE); - MAP_FILTER_TO_MCDI_FLAG(INNER_VID, INNER_VLAN); - MAP_FILTER_TO_MCDI_FLAG(OUTER_VID, OUTER_VLAN); - MAP_FILTER_TO_MCDI_FLAG(IP_PROTO, IP_PROTO); -#undef MAP_FILTER_TO_MCDI_FLAG - /* Did we map them all? */ WARN_ON_ONCE(match_flags); @@ -3877,7 +4210,7 @@ found: /* If successful, return the inserted filter ID */ if (rc == 0) - rc = match_pri * HUNT_FILTER_TBL_ROWS + ins_index; + rc = efx_ef10_make_filter_id(match_pri, ins_index); wake_up_all(&table->waitq); out_unlock: @@ -3900,7 +4233,7 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx, unsigned int priority_mask, u32 filter_id, bool by_index) { - unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS; + unsigned int filter_idx = efx_ef10_filter_get_unsafe_id(filter_id); struct efx_ef10_filter_table *table = efx->filter_state; MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_HANDLE_OFST + @@ -3927,7 +4260,7 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx, if (!spec || (!by_index && efx_ef10_filter_pri(table, spec) != - filter_id / HUNT_FILTER_TBL_ROWS)) { + efx_ef10_filter_get_unsafe_pri(filter_id))) { rc = -ENOENT; goto out_unlock; } @@ -3976,13 +4309,18 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx, MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE); MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, table->entry[filter_idx].handle); - rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, - inbuf, sizeof(inbuf), NULL, 0, NULL); + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP, + inbuf, sizeof(inbuf), NULL, 0, NULL); spin_lock_bh(&efx->filter_lock); - if (rc == 0) { + if ((rc == 0) || (rc == -ENOENT)) { + /* Filter removed OK or didn't actually exist */ kfree(spec); efx_ef10_filter_set_entry(table, filter_idx, NULL, 0); + } else { + efx_mcdi_display_error(efx, MC_CMD_FILTER_OP, + MC_CMD_FILTER_OP_IN_LEN, + NULL, 0, rc); } } @@ -4002,11 +4340,6 @@ static int efx_ef10_filter_remove_safe(struct efx_nic *efx, filter_id, false); } -static u32 efx_ef10_filter_get_unsafe_id(struct efx_nic *efx, u32 filter_id) -{ - return filter_id % HUNT_FILTER_TBL_ROWS; -} - static void efx_ef10_filter_remove_unsafe(struct efx_nic *efx, enum efx_filter_priority priority, u32 filter_id) @@ -4020,7 +4353,7 @@ static int efx_ef10_filter_get_safe(struct efx_nic *efx, enum efx_filter_priority priority, u32 filter_id, struct efx_filter_spec *spec) { - unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS; + unsigned int filter_idx = efx_ef10_filter_get_unsafe_id(filter_id); struct efx_ef10_filter_table *table = efx->filter_state; const struct efx_filter_spec *saved_spec; int rc; @@ -4029,7 +4362,7 @@ static int efx_ef10_filter_get_safe(struct efx_nic *efx, saved_spec = efx_ef10_filter_entry_spec(table, filter_idx); if (saved_spec && saved_spec->priority == priority && efx_ef10_filter_pri(table, saved_spec) == - filter_id / HUNT_FILTER_TBL_ROWS) { + efx_ef10_filter_get_unsafe_pri(filter_id)) { *spec = *saved_spec; rc = 0; } else { @@ -4081,7 +4414,7 @@ static u32 efx_ef10_filter_get_rx_id_limit(struct efx_nic *efx) { struct efx_ef10_filter_table *table = efx->filter_state; - return table->rx_match_count * HUNT_FILTER_TBL_ROWS; + return table->rx_match_count * HUNT_FILTER_TBL_ROWS * 2; } static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx, @@ -4101,8 +4434,9 @@ static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx, count = -EMSGSIZE; break; } - buf[count++] = (efx_ef10_filter_pri(table, spec) * - HUNT_FILTER_TBL_ROWS + + buf[count++] = + efx_ef10_make_filter_id( + efx_ef10_filter_pri(table, spec), filter_idx); } } @@ -4305,29 +4639,54 @@ efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx, #endif /* CONFIG_RFS_ACCEL */ -static int efx_ef10_filter_match_flags_from_mcdi(u32 mcdi_flags) +static int efx_ef10_filter_match_flags_from_mcdi(bool encap, u32 mcdi_flags) { int match_flags = 0; -#define MAP_FLAG(gen_flag, mcdi_field) { \ +#define MAP_FLAG(gen_flag, mcdi_field) do { \ u32 old_mcdi_flags = mcdi_flags; \ - mcdi_flags &= ~(1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \ - mcdi_field ## _LBN); \ + mcdi_flags &= ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ ## \ + mcdi_field ## _LBN); \ if (mcdi_flags != old_mcdi_flags) \ match_flags |= EFX_FILTER_MATCH_ ## gen_flag; \ + } while (0) + + if (encap) { + /* encap filters must specify encap type */ + match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE; + /* and imply ethertype and ip proto */ + mcdi_flags &= + ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN); + mcdi_flags &= + ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN); + /* VLAN tags refer to the outer packet */ + MAP_FLAG(INNER_VID, INNER_VLAN); + MAP_FLAG(OUTER_VID, OUTER_VLAN); + /* everything else refers to the inner packet */ + MAP_FLAG(LOC_MAC_IG, IFRM_UNKNOWN_UCAST_DST); + MAP_FLAG(LOC_MAC_IG, IFRM_UNKNOWN_MCAST_DST); + MAP_FLAG(REM_HOST, IFRM_SRC_IP); + MAP_FLAG(LOC_HOST, IFRM_DST_IP); + MAP_FLAG(REM_MAC, IFRM_SRC_MAC); + MAP_FLAG(REM_PORT, IFRM_SRC_PORT); + MAP_FLAG(LOC_MAC, IFRM_DST_MAC); + MAP_FLAG(LOC_PORT, IFRM_DST_PORT); + MAP_FLAG(ETHER_TYPE, IFRM_ETHER_TYPE); + MAP_FLAG(IP_PROTO, IFRM_IP_PROTO); + } else { + MAP_FLAG(LOC_MAC_IG, UNKNOWN_UCAST_DST); + MAP_FLAG(LOC_MAC_IG, UNKNOWN_MCAST_DST); + MAP_FLAG(REM_HOST, SRC_IP); + MAP_FLAG(LOC_HOST, DST_IP); + MAP_FLAG(REM_MAC, SRC_MAC); + MAP_FLAG(REM_PORT, SRC_PORT); + MAP_FLAG(LOC_MAC, DST_MAC); + MAP_FLAG(LOC_PORT, DST_PORT); + MAP_FLAG(ETHER_TYPE, ETHER_TYPE); + MAP_FLAG(INNER_VID, INNER_VLAN); + MAP_FLAG(OUTER_VID, OUTER_VLAN); + MAP_FLAG(IP_PROTO, IP_PROTO); } - MAP_FLAG(LOC_MAC_IG, UNKNOWN_UCAST_DST); - MAP_FLAG(LOC_MAC_IG, UNKNOWN_MCAST_DST); - MAP_FLAG(REM_HOST, SRC_IP); - MAP_FLAG(LOC_HOST, DST_IP); - MAP_FLAG(REM_MAC, SRC_MAC); - MAP_FLAG(REM_PORT, SRC_PORT); - MAP_FLAG(LOC_MAC, DST_MAC); - MAP_FLAG(LOC_PORT, DST_PORT); - MAP_FLAG(ETHER_TYPE, ETHER_TYPE); - MAP_FLAG(INNER_VID, INNER_VLAN); - MAP_FLAG(OUTER_VID, OUTER_VLAN); - MAP_FLAG(IP_PROTO, IP_PROTO); #undef MAP_FLAG /* Did we map them all? */ @@ -4354,6 +4713,7 @@ static void efx_ef10_filter_cleanup_vlans(struct efx_nic *efx) } static bool efx_ef10_filter_match_supported(struct efx_ef10_filter_table *table, + bool encap, enum efx_filter_match_flags match_flags) { unsigned int match_pri; @@ -4362,7 +4722,7 @@ static bool efx_ef10_filter_match_supported(struct efx_ef10_filter_table *table, for (match_pri = 0; match_pri < table->rx_match_count; match_pri++) { - mf = efx_ef10_filter_match_flags_from_mcdi( + mf = efx_ef10_filter_match_flags_from_mcdi(encap, table->rx_match_mcdi_flags[match_pri]); if (mf == match_flags) return true; @@ -4371,39 +4731,30 @@ static bool efx_ef10_filter_match_supported(struct efx_ef10_filter_table *table, return false; } -static int efx_ef10_filter_table_probe(struct efx_nic *efx) +static int +efx_ef10_filter_table_probe_matches(struct efx_nic *efx, + struct efx_ef10_filter_table *table, + bool encap) { MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PARSER_DISP_INFO_IN_LEN); MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX); - struct efx_ef10_nic_data *nic_data = efx->nic_data; - struct net_device *net_dev = efx->net_dev; unsigned int pd_match_pri, pd_match_count; - struct efx_ef10_filter_table *table; - struct efx_ef10_vlan *vlan; size_t outlen; int rc; - if (!efx_rwsem_assert_write_locked(&efx->filter_sem)) - return -EINVAL; - - if (efx->filter_state) /* already probed */ - return 0; - - table = kzalloc(sizeof(*table), GFP_KERNEL); - if (!table) - return -ENOMEM; - /* Find out which RX filter types are supported, and their priorities */ MCDI_SET_DWORD(inbuf, GET_PARSER_DISP_INFO_IN_OP, + encap ? + MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_ENCAP_RX_MATCHES : MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES); rc = efx_mcdi_rpc(efx, MC_CMD_GET_PARSER_DISP_INFO, inbuf, sizeof(inbuf), outbuf, sizeof(outbuf), &outlen); if (rc) - goto fail; + return rc; + pd_match_count = MCDI_VAR_ARRAY_LEN( outlen, GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES); - table->rx_match_count = 0; for (pd_match_pri = 0; pd_match_pri < pd_match_count; pd_match_pri++) { u32 mcdi_flags = @@ -4411,7 +4762,7 @@ static int efx_ef10_filter_table_probe(struct efx_nic *efx) outbuf, GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES, pd_match_pri); - rc = efx_ef10_filter_match_flags_from_mcdi(mcdi_flags); + rc = efx_ef10_filter_match_flags_from_mcdi(encap, mcdi_flags); if (rc < 0) { netif_dbg(efx, probe, efx->net_dev, "%s: fw flags %#x pri %u not supported in driver\n", @@ -4426,10 +4777,40 @@ static int efx_ef10_filter_table_probe(struct efx_nic *efx) } } + return 0; +} + +static int efx_ef10_filter_table_probe(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + struct net_device *net_dev = efx->net_dev; + struct efx_ef10_filter_table *table; + struct efx_ef10_vlan *vlan; + int rc; + + if (!efx_rwsem_assert_write_locked(&efx->filter_sem)) + return -EINVAL; + + if (efx->filter_state) /* already probed */ + return 0; + + table = kzalloc(sizeof(*table), GFP_KERNEL); + if (!table) + return -ENOMEM; + + table->rx_match_count = 0; + rc = efx_ef10_filter_table_probe_matches(efx, table, false); + if (rc) + goto fail; + if (nic_data->datapath_caps & + (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN)) + rc = efx_ef10_filter_table_probe_matches(efx, table, true); + if (rc) + goto fail; if ((efx_supported_features(efx) & NETIF_F_HW_VLAN_CTAG_FILTER) && - !(efx_ef10_filter_match_supported(table, + !(efx_ef10_filter_match_supported(table, false, (EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC)) && - efx_ef10_filter_match_supported(table, + efx_ef10_filter_match_supported(table, false, (EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC_IG)))) { netif_info(efx, probe, net_dev, "VLAN filters are not supported in this firmware variant\n"); @@ -4475,10 +4856,13 @@ static void efx_ef10_filter_table_restore(struct efx_nic *efx) { struct efx_ef10_filter_table *table = efx->filter_state; struct efx_ef10_nic_data *nic_data = efx->nic_data; + unsigned int invalid_filters = 0, failed = 0; + struct efx_ef10_filter_vlan *vlan; struct efx_filter_spec *spec; unsigned int filter_idx; - bool failed = false; - int rc; + u32 mcdi_flags; + int match_pri; + int rc, i; WARN_ON(!rwsem_is_locked(&efx->filter_sem)); @@ -4495,6 +4879,20 @@ static void efx_ef10_filter_table_restore(struct efx_nic *efx) if (!spec) continue; + mcdi_flags = efx_ef10_filter_mcdi_flags_from_spec(spec); + match_pri = 0; + while (match_pri < table->rx_match_count && + table->rx_match_mcdi_flags[match_pri] != mcdi_flags) + ++match_pri; + if (match_pri >= table->rx_match_count) { + invalid_filters++; + goto not_restored; + } + if (spec->rss_context != EFX_FILTER_RSS_CONTEXT_DEFAULT && + spec->rss_context != nic_data->rx_rss_context) + netif_warn(efx, drv, efx->net_dev, + "Warning: unable to restore a filter with specific RSS context.\n"); + table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY; spin_unlock_bh(&efx->filter_lock); @@ -4502,10 +4900,17 @@ static void efx_ef10_filter_table_restore(struct efx_nic *efx) &table->entry[filter_idx].handle, false); if (rc) - failed = true; - + failed++; spin_lock_bh(&efx->filter_lock); + if (rc) { +not_restored: + list_for_each_entry(vlan, &table->vlan_list, list) + for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; ++i) + if (vlan->default_filters[i] == filter_idx) + vlan->default_filters[i] = + EFX_EF10_FILTER_ID_INVALID; + kfree(spec); efx_ef10_filter_set_entry(table, filter_idx, NULL, 0); } else { @@ -4516,9 +4921,17 @@ static void efx_ef10_filter_table_restore(struct efx_nic *efx) spin_unlock_bh(&efx->filter_lock); + /* This can happen validly if the MC's capabilities have changed, so + * is not an error. + */ + if (invalid_filters) + netif_dbg(efx, drv, efx->net_dev, + "Did not restore %u filters that are now unsupported.\n", + invalid_filters); + if (failed) netif_err(efx, hw, efx->net_dev, - "unable to restore all filters\n"); + "unable to restore %u filters\n", failed); else nic_data->must_restore_filters = false; } @@ -4575,7 +4988,7 @@ static void efx_ef10_filter_mark_one_old(struct efx_nic *efx, uint16_t *id) unsigned int filter_idx; if (*id != EFX_EF10_FILTER_ID_INVALID) { - filter_idx = efx_ef10_filter_get_unsafe_id(efx, *id); + filter_idx = efx_ef10_filter_get_unsafe_id(*id); if (!table->entry[filter_idx].spec) netif_dbg(efx, drv, efx->net_dev, "marked null spec old %04x:%04x\n", *id, @@ -4596,9 +5009,8 @@ static void _efx_ef10_filter_vlan_mark_old(struct efx_nic *efx, efx_ef10_filter_mark_one_old(efx, &vlan->uc[i]); for (i = 0; i < table->dev_mc_count; i++) efx_ef10_filter_mark_one_old(efx, &vlan->mc[i]); - efx_ef10_filter_mark_one_old(efx, &vlan->ucdef); - efx_ef10_filter_mark_one_old(efx, &vlan->bcast); - efx_ef10_filter_mark_one_old(efx, &vlan->mcdef); + for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++) + efx_ef10_filter_mark_one_old(efx, &vlan->default_filters[i]); } /* Mark old filters that may need to be removed. @@ -4711,11 +5123,13 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx, rc = EFX_EF10_FILTER_ID_INVALID; } } - ids[i] = efx_ef10_filter_get_unsafe_id(efx, rc); + ids[i] = efx_ef10_filter_get_unsafe_id(rc); } if (multicast && rollback) { /* Also need an Ethernet broadcast filter */ + EFX_WARN_ON_PARANOID(vlan->default_filters[EFX_EF10_BCAST] != + EFX_EF10_FILTER_ID_INVALID); efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); eth_broadcast_addr(baddr); efx_filter_set_eth_local(&spec, vlan->vid, baddr); @@ -4732,9 +5146,8 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx, } return rc; } else { - EFX_WARN_ON_PARANOID(vlan->bcast != - EFX_EF10_FILTER_ID_INVALID); - vlan->bcast = efx_ef10_filter_get_unsafe_id(efx, rc); + vlan->default_filters[EFX_EF10_BCAST] = + efx_ef10_filter_get_unsafe_id(rc); } } @@ -4743,6 +5156,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx, static int efx_ef10_filter_insert_def(struct efx_nic *efx, struct efx_ef10_filter_vlan *vlan, + enum efx_encap_type encap_type, bool multicast, bool rollback) { struct efx_ef10_nic_data *nic_data = efx->nic_data; @@ -4750,6 +5164,7 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, struct efx_filter_spec spec; u8 baddr[ETH_ALEN]; int rc; + u16 *id; filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0; @@ -4760,19 +5175,75 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, else efx_filter_set_uc_def(&spec); + if (encap_type) { + if (nic_data->datapath_caps & + (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN)) + efx_filter_set_encap_type(&spec, encap_type); + else + /* don't insert encap filters on non-supporting + * platforms. ID will be left as INVALID. + */ + return 0; + } + if (vlan->vid != EFX_FILTER_VID_UNSPEC) efx_filter_set_eth_local(&spec, vlan->vid, NULL); rc = efx_ef10_filter_insert(efx, &spec, true); if (rc < 0) { - netif_printk(efx, drv, rc == -EPERM ? KERN_DEBUG : KERN_WARNING, - efx->net_dev, - "%scast mismatch filter insert failed rc=%d\n", - multicast ? "Multi" : "Uni", rc); + const char *um = multicast ? "Multicast" : "Unicast"; + const char *encap_name = ""; + const char *encap_ipv = ""; + + if ((encap_type & EFX_ENCAP_TYPES_MASK) == + EFX_ENCAP_TYPE_VXLAN) + encap_name = "VXLAN "; + else if ((encap_type & EFX_ENCAP_TYPES_MASK) == + EFX_ENCAP_TYPE_NVGRE) + encap_name = "NVGRE "; + else if ((encap_type & EFX_ENCAP_TYPES_MASK) == + EFX_ENCAP_TYPE_GENEVE) + encap_name = "GENEVE "; + if (encap_type & EFX_ENCAP_FLAG_IPV6) + encap_ipv = "IPv6 "; + else if (encap_type) + encap_ipv = "IPv4 "; + + /* unprivileged functions can't insert mismatch filters + * for encapsulated or unicast traffic, so downgrade + * those warnings to debug. + */ + netif_cond_dbg(efx, drv, efx->net_dev, + rc == -EPERM && (encap_type || !multicast), warn, + "%s%s%s mismatch filter insert failed rc=%d\n", + encap_name, encap_ipv, um, rc); } else if (multicast) { - EFX_WARN_ON_PARANOID(vlan->mcdef != EFX_EF10_FILTER_ID_INVALID); - vlan->mcdef = efx_ef10_filter_get_unsafe_id(efx, rc); - if (!nic_data->workaround_26807) { + /* mapping from encap types to default filter IDs (multicast) */ + static enum efx_ef10_default_filters map[] = { + [EFX_ENCAP_TYPE_NONE] = EFX_EF10_MCDEF, + [EFX_ENCAP_TYPE_VXLAN] = EFX_EF10_VXLAN4_MCDEF, + [EFX_ENCAP_TYPE_NVGRE] = EFX_EF10_NVGRE4_MCDEF, + [EFX_ENCAP_TYPE_GENEVE] = EFX_EF10_GENEVE4_MCDEF, + [EFX_ENCAP_TYPE_VXLAN | EFX_ENCAP_FLAG_IPV6] = + EFX_EF10_VXLAN6_MCDEF, + [EFX_ENCAP_TYPE_NVGRE | EFX_ENCAP_FLAG_IPV6] = + EFX_EF10_NVGRE6_MCDEF, + [EFX_ENCAP_TYPE_GENEVE | EFX_ENCAP_FLAG_IPV6] = + EFX_EF10_GENEVE6_MCDEF, + }; + + /* quick bounds check (BCAST result impossible) */ + BUILD_BUG_ON(EFX_EF10_BCAST != 0); + if (encap_type >= ARRAY_SIZE(map) || map[encap_type] == 0) { + WARN_ON(1); + return -EINVAL; + } + /* then follow map */ + id = &vlan->default_filters[map[encap_type]]; + + EFX_WARN_ON_PARANOID(*id != EFX_EF10_FILTER_ID_INVALID); + *id = efx_ef10_filter_get_unsafe_id(rc); + if (!nic_data->workaround_26807 && !encap_type) { /* Also need an Ethernet broadcast filter */ efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); @@ -4787,20 +5258,44 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, /* Roll back the mc_def filter */ efx_ef10_filter_remove_unsafe( efx, EFX_FILTER_PRI_AUTO, - vlan->mcdef); - vlan->mcdef = EFX_EF10_FILTER_ID_INVALID; + *id); + *id = EFX_EF10_FILTER_ID_INVALID; return rc; } } else { - EFX_WARN_ON_PARANOID(vlan->bcast != - EFX_EF10_FILTER_ID_INVALID); - vlan->bcast = efx_ef10_filter_get_unsafe_id(efx, rc); + EFX_WARN_ON_PARANOID( + vlan->default_filters[EFX_EF10_BCAST] != + EFX_EF10_FILTER_ID_INVALID); + vlan->default_filters[EFX_EF10_BCAST] = + efx_ef10_filter_get_unsafe_id(rc); } } rc = 0; } else { - EFX_WARN_ON_PARANOID(vlan->ucdef != EFX_EF10_FILTER_ID_INVALID); - vlan->ucdef = rc; + /* mapping from encap types to default filter IDs (unicast) */ + static enum efx_ef10_default_filters map[] = { + [EFX_ENCAP_TYPE_NONE] = EFX_EF10_UCDEF, + [EFX_ENCAP_TYPE_VXLAN] = EFX_EF10_VXLAN4_UCDEF, + [EFX_ENCAP_TYPE_NVGRE] = EFX_EF10_NVGRE4_UCDEF, + [EFX_ENCAP_TYPE_GENEVE] = EFX_EF10_GENEVE4_UCDEF, + [EFX_ENCAP_TYPE_VXLAN | EFX_ENCAP_FLAG_IPV6] = + EFX_EF10_VXLAN6_UCDEF, + [EFX_ENCAP_TYPE_NVGRE | EFX_ENCAP_FLAG_IPV6] = + EFX_EF10_NVGRE6_UCDEF, + [EFX_ENCAP_TYPE_GENEVE | EFX_ENCAP_FLAG_IPV6] = + EFX_EF10_GENEVE6_UCDEF, + }; + + /* quick bounds check (BCAST result impossible) */ + BUILD_BUG_ON(EFX_EF10_BCAST != 0); + if (encap_type >= ARRAY_SIZE(map) || map[encap_type] == 0) { + WARN_ON(1); + return -EINVAL; + } + /* then follow map */ + id = &vlan->default_filters[map[encap_type]]; + EFX_WARN_ON_PARANOID(*id != EFX_EF10_FILTER_ID_INVALID); + *id = rc; rc = 0; } return rc; @@ -4894,7 +5389,7 @@ restore_filters: if (rc2) goto reset_nic; - netif_device_attach(efx->net_dev); + efx_device_attach_if_not_resetting(efx); return rc; @@ -4923,7 +5418,8 @@ static void efx_ef10_filter_vlan_sync_rx_mode(struct efx_nic *efx, /* Insert/renew unicast filters */ if (table->uc_promisc) { - efx_ef10_filter_insert_def(efx, vlan, false, false); + efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NONE, + false, false); efx_ef10_filter_insert_addr_list(efx, vlan, false, false); } else { /* If any of the filters failed to insert, fall back to @@ -4931,8 +5427,25 @@ static void efx_ef10_filter_vlan_sync_rx_mode(struct efx_nic *efx, * our individual unicast filters. */ if (efx_ef10_filter_insert_addr_list(efx, vlan, false, false)) - efx_ef10_filter_insert_def(efx, vlan, false, false); + efx_ef10_filter_insert_def(efx, vlan, + EFX_ENCAP_TYPE_NONE, + false, false); } + efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN, + false, false); + efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN | + EFX_ENCAP_FLAG_IPV6, + false, false); + efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE, + false, false); + efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE | + EFX_ENCAP_FLAG_IPV6, + false, false); + efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE, + false, false); + efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE | + EFX_ENCAP_FLAG_IPV6, + false, false); /* Insert/renew multicast filters */ /* If changing promiscuous state with cascaded multicast filters, remove @@ -4946,7 +5459,9 @@ static void efx_ef10_filter_vlan_sync_rx_mode(struct efx_nic *efx, /* If we failed to insert promiscuous filters, rollback * and fall back to individual multicast filters */ - if (efx_ef10_filter_insert_def(efx, vlan, true, true)) { + if (efx_ef10_filter_insert_def(efx, vlan, + EFX_ENCAP_TYPE_NONE, + true, true)) { /* Changing promisc state, so remove old filters */ efx_ef10_filter_remove_old(efx); efx_ef10_filter_insert_addr_list(efx, vlan, @@ -4956,7 +5471,9 @@ static void efx_ef10_filter_vlan_sync_rx_mode(struct efx_nic *efx, /* If we failed to insert promiscuous filters, don't * rollback. Regardless, also insert the mc_list */ - efx_ef10_filter_insert_def(efx, vlan, true, false); + efx_ef10_filter_insert_def(efx, vlan, + EFX_ENCAP_TYPE_NONE, + true, false); efx_ef10_filter_insert_addr_list(efx, vlan, true, false); } } else { @@ -4969,11 +5486,28 @@ static void efx_ef10_filter_vlan_sync_rx_mode(struct efx_nic *efx, /* Changing promisc state, so remove old filters */ if (nic_data->workaround_26807) efx_ef10_filter_remove_old(efx); - if (efx_ef10_filter_insert_def(efx, vlan, true, true)) + if (efx_ef10_filter_insert_def(efx, vlan, + EFX_ENCAP_TYPE_NONE, + true, true)) efx_ef10_filter_insert_addr_list(efx, vlan, true, false); } } + efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN, + true, false); + efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN | + EFX_ENCAP_FLAG_IPV6, + true, false); + efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE, + true, false); + efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE | + EFX_ENCAP_FLAG_IPV6, + true, false); + efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE, + true, false); + efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE | + EFX_ENCAP_FLAG_IPV6, + true, false); } /* Caller must hold efx->filter_sem for read if race against @@ -5060,9 +5594,8 @@ static int efx_ef10_filter_add_vlan(struct efx_nic *efx, u16 vid) vlan->uc[i] = EFX_EF10_FILTER_ID_INVALID; for (i = 0; i < ARRAY_SIZE(vlan->mc); i++) vlan->mc[i] = EFX_EF10_FILTER_ID_INVALID; - vlan->ucdef = EFX_EF10_FILTER_ID_INVALID; - vlan->bcast = EFX_EF10_FILTER_ID_INVALID; - vlan->mcdef = EFX_EF10_FILTER_ID_INVALID; + for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++) + vlan->default_filters[i] = EFX_EF10_FILTER_ID_INVALID; list_add_tail(&vlan->list, &table->vlan_list); @@ -5089,9 +5622,10 @@ static void efx_ef10_filter_del_vlan_internal(struct efx_nic *efx, for (i = 0; i < ARRAY_SIZE(vlan->mc); i++) efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, vlan->mc[i]); - efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, vlan->ucdef); - efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, vlan->bcast); - efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, vlan->mcdef); + for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++) + if (vlan->default_filters[i] != EFX_EF10_FILTER_ID_INVALID) + efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, + vlan->default_filters[i]); kfree(vlan); } @@ -5141,7 +5675,7 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx) if (was_enabled) efx_net_open(efx->net_dev); - netif_device_attach(efx->net_dev); + efx_device_attach_if_not_resetting(efx); #ifdef CONFIG_SFC_SRIOV if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) { @@ -5540,6 +6074,20 @@ static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx, } } +static int efx_ef10_get_phys_port_id(struct efx_nic *efx, + struct netdev_phys_item_id *ppid) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + + if (!is_valid_ether_addr(nic_data->port_id)) + return -EOPNOTSUPP; + + ppid->id_len = ETH_ALEN; + memcpy(ppid->id, nic_data->port_id, ppid->id_len); + + return 0; +} + static int efx_ef10_vlan_rx_add_vid(struct efx_nic *efx, __be16 proto, u16 vid) { if (proto != htons(ETH_P_8021Q)) @@ -5556,6 +6104,271 @@ static int efx_ef10_vlan_rx_kill_vid(struct efx_nic *efx, __be16 proto, u16 vid) return efx_ef10_del_vlan(efx, vid); } +/* We rely on the MCDI wiping out our TX rings if it made any changes to the + * ports table, ensuring that any TSO descriptors that were made on a now- + * removed tunnel port will be blown away and won't break things when we try + * to transmit them using the new ports table. + */ +static int efx_ef10_set_udp_tnl_ports(struct efx_nic *efx, bool unloading) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMAX); + MCDI_DECLARE_BUF(outbuf, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_LEN); + bool will_reset = false; + size_t num_entries = 0; + size_t inlen, outlen; + size_t i; + int rc; + efx_dword_t flags_and_num_entries; + + WARN_ON(!mutex_is_locked(&nic_data->udp_tunnels_lock)); + + nic_data->udp_tunnels_dirty = false; + + if (!(nic_data->datapath_caps & + (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))) { + efx_device_attach_if_not_resetting(efx); + return 0; + } + + BUILD_BUG_ON(ARRAY_SIZE(nic_data->udp_tunnels) > + MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM); + + for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i) { + if (nic_data->udp_tunnels[i].count && + nic_data->udp_tunnels[i].port) { + efx_dword_t entry; + + EFX_POPULATE_DWORD_2(entry, + TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT, + ntohs(nic_data->udp_tunnels[i].port), + TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL, + nic_data->udp_tunnels[i].type); + *_MCDI_ARRAY_DWORD(inbuf, + SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES, + num_entries++) = entry; + } + } + + BUILD_BUG_ON((MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_OFST - + MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS_OFST) * 8 != + EFX_WORD_1_LBN); + BUILD_BUG_ON(MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_LEN * 8 != + EFX_WORD_1_WIDTH); + EFX_POPULATE_DWORD_2(flags_and_num_entries, + MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING, + !!unloading, + EFX_WORD_1, num_entries); + *_MCDI_DWORD(inbuf, SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS) = + flags_and_num_entries; + + inlen = MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LEN(num_entries); + + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS, + inbuf, inlen, outbuf, sizeof(outbuf), &outlen); + if (rc == -EIO) { + /* Most likely the MC rebooted due to another function also + * setting its tunnel port list. Mark the tunnel port list as + * dirty, so it will be pushed upon coming up from the reboot. + */ + nic_data->udp_tunnels_dirty = true; + return 0; + } + + if (rc) { + /* expected not available on unprivileged functions */ + if (rc != -EPERM) + netif_warn(efx, drv, efx->net_dev, + "Unable to set UDP tunnel ports; rc=%d.\n", rc); + } else if (MCDI_DWORD(outbuf, SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS) & + (1 << MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_LBN)) { + netif_info(efx, drv, efx->net_dev, + "Rebooting MC due to UDP tunnel port list change\n"); + will_reset = true; + if (unloading) + /* Delay for the MC reset to complete. This will make + * unloading other functions a bit smoother. This is a + * race, but the other unload will work whichever way + * it goes, this just avoids an unnecessary error + * message. + */ + msleep(100); + } + if (!will_reset && !unloading) { + /* The caller will have detached, relying on the MC reset to + * trigger a re-attach. Since there won't be an MC reset, we + * have to do the attach ourselves. + */ + efx_device_attach_if_not_resetting(efx); + } + + return rc; +} + +static int efx_ef10_udp_tnl_push_ports(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + int rc = 0; + + mutex_lock(&nic_data->udp_tunnels_lock); + if (nic_data->udp_tunnels_dirty) { + /* Make sure all TX are stopped while we modify the table, else + * we might race against an efx_features_check(). + */ + efx_device_detach_sync(efx); + rc = efx_ef10_set_udp_tnl_ports(efx, false); + } + mutex_unlock(&nic_data->udp_tunnels_lock); + return rc; +} + +static struct efx_udp_tunnel *__efx_ef10_udp_tnl_lookup_port(struct efx_nic *efx, + __be16 port) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + size_t i; + + for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i) { + if (!nic_data->udp_tunnels[i].count) + continue; + if (nic_data->udp_tunnels[i].port == port) + return &nic_data->udp_tunnels[i]; + } + return NULL; +} + +static int efx_ef10_udp_tnl_add_port(struct efx_nic *efx, + struct efx_udp_tunnel tnl) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + struct efx_udp_tunnel *match; + char typebuf[8]; + size_t i; + int rc; + + if (!(nic_data->datapath_caps & + (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))) + return 0; + + efx_get_udp_tunnel_type_name(tnl.type, typebuf, sizeof(typebuf)); + netif_dbg(efx, drv, efx->net_dev, "Adding UDP tunnel (%s) port %d\n", + typebuf, ntohs(tnl.port)); + + mutex_lock(&nic_data->udp_tunnels_lock); + /* Make sure all TX are stopped while we add to the table, else we + * might race against an efx_features_check(). + */ + efx_device_detach_sync(efx); + + match = __efx_ef10_udp_tnl_lookup_port(efx, tnl.port); + if (match != NULL) { + if (match->type == tnl.type) { + netif_dbg(efx, drv, efx->net_dev, + "Referencing existing tunnel entry\n"); + match->count++; + /* No need to cause an MCDI update */ + rc = 0; + goto unlock_out; + } + efx_get_udp_tunnel_type_name(match->type, + typebuf, sizeof(typebuf)); + netif_dbg(efx, drv, efx->net_dev, + "UDP port %d is already in use by %s\n", + ntohs(tnl.port), typebuf); + rc = -EEXIST; + goto unlock_out; + } + + for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i) + if (!nic_data->udp_tunnels[i].count) { + nic_data->udp_tunnels[i] = tnl; + nic_data->udp_tunnels[i].count = 1; + rc = efx_ef10_set_udp_tnl_ports(efx, false); + goto unlock_out; + } + + netif_dbg(efx, drv, efx->net_dev, + "Unable to add UDP tunnel (%s) port %d; insufficient resources.\n", + typebuf, ntohs(tnl.port)); + + rc = -ENOMEM; + +unlock_out: + mutex_unlock(&nic_data->udp_tunnels_lock); + return rc; +} + +/* Called under the TX lock with the TX queue running, hence no-one can be + * in the middle of updating the UDP tunnels table. However, they could + * have tried and failed the MCDI, in which case they'll have set the dirty + * flag before dropping their locks. + */ +static bool efx_ef10_udp_tnl_has_port(struct efx_nic *efx, __be16 port) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + + if (!(nic_data->datapath_caps & + (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))) + return false; + + if (nic_data->udp_tunnels_dirty) + /* SW table may not match HW state, so just assume we can't + * use any UDP tunnel offloads. + */ + return false; + + return __efx_ef10_udp_tnl_lookup_port(efx, port) != NULL; +} + +static int efx_ef10_udp_tnl_del_port(struct efx_nic *efx, + struct efx_udp_tunnel tnl) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + struct efx_udp_tunnel *match; + char typebuf[8]; + int rc; + + if (!(nic_data->datapath_caps & + (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))) + return 0; + + efx_get_udp_tunnel_type_name(tnl.type, typebuf, sizeof(typebuf)); + netif_dbg(efx, drv, efx->net_dev, "Removing UDP tunnel (%s) port %d\n", + typebuf, ntohs(tnl.port)); + + mutex_lock(&nic_data->udp_tunnels_lock); + /* Make sure all TX are stopped while we remove from the table, else we + * might race against an efx_features_check(). + */ + efx_device_detach_sync(efx); + + match = __efx_ef10_udp_tnl_lookup_port(efx, tnl.port); + if (match != NULL) { + if (match->type == tnl.type) { + if (--match->count) { + /* Port is still in use, so nothing to do */ + netif_dbg(efx, drv, efx->net_dev, + "UDP tunnel port %d remains active\n", + ntohs(tnl.port)); + rc = 0; + goto out_unlock; + } + rc = efx_ef10_set_udp_tnl_ports(efx, false); + goto out_unlock; + } + efx_get_udp_tunnel_type_name(match->type, + typebuf, sizeof(typebuf)); + netif_warn(efx, drv, efx->net_dev, + "UDP port %d is actually in use by %s, not removing\n", + ntohs(tnl.port), typebuf); + } + rc = -ENOENT; + +out_unlock: + mutex_unlock(&nic_data->udp_tunnels_lock); + return rc; +} + #define EF10_OFFLOAD_FEATURES \ (NETIF_F_IP_CSUM | \ NETIF_F_HW_VLAN_CTAG_FILTER | \ @@ -5609,6 +6422,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = { .tx_write = efx_ef10_tx_write, .tx_limit_len = efx_ef10_tx_limit_len, .rx_push_rss_config = efx_ef10_vf_rx_push_rss_config, + .rx_pull_rss_config = efx_ef10_rx_pull_rss_config, .rx_probe = efx_ef10_rx_probe, .rx_init = efx_ef10_rx_init, .rx_remove = efx_ef10_rx_remove, @@ -5647,11 +6461,11 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = { .vswitching_probe = efx_ef10_vswitching_probe_vf, .vswitching_restore = efx_ef10_vswitching_restore_vf, .vswitching_remove = efx_ef10_vswitching_remove_vf, - .sriov_get_phys_port_id = efx_ef10_sriov_get_phys_port_id, #endif .get_mac_address = efx_ef10_get_mac_address_vf, .set_mac_address = efx_ef10_set_mac_address, + .get_phys_port_id = efx_ef10_get_phys_port_id, .revision = EFX_REV_HUNT_A0, .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH), .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE, @@ -5659,6 +6473,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = { .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST, .can_rx_scatter = true, .always_rx_scatter = true, + .min_interrupt_mode = EFX_INT_MODE_MSIX, .max_interrupt_mode = EFX_INT_MODE_MSIX, .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH, .offload_features = EF10_OFFLOAD_FEATURES, @@ -5666,6 +6481,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = { .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS, .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE | 1 << HWTSTAMP_FILTER_ALL, + .rx_hash_key_size = 40, }; const struct efx_nic_type efx_hunt_a0_nic_type = { @@ -5716,6 +6532,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = { .tx_write = efx_ef10_tx_write, .tx_limit_len = efx_ef10_tx_limit_len, .rx_push_rss_config = efx_ef10_pf_rx_push_rss_config, + .rx_pull_rss_config = efx_ef10_rx_pull_rss_config, .rx_probe = efx_ef10_rx_probe, .rx_init = efx_ef10_rx_init, .rx_remove = efx_ef10_rx_remove, @@ -5756,6 +6573,10 @@ const struct efx_nic_type efx_hunt_a0_nic_type = { .ptp_set_ts_config = efx_ef10_ptp_set_ts_config, .vlan_rx_add_vid = efx_ef10_vlan_rx_add_vid, .vlan_rx_kill_vid = efx_ef10_vlan_rx_kill_vid, + .udp_tnl_push_ports = efx_ef10_udp_tnl_push_ports, + .udp_tnl_add_port = efx_ef10_udp_tnl_add_port, + .udp_tnl_has_port = efx_ef10_udp_tnl_has_port, + .udp_tnl_del_port = efx_ef10_udp_tnl_del_port, #ifdef CONFIG_SFC_SRIOV .sriov_configure = efx_ef10_sriov_configure, .sriov_init = efx_ef10_sriov_init, @@ -5776,6 +6597,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = { .set_mac_address = efx_ef10_set_mac_address, .tso_versions = efx_ef10_tso_versions, + .get_phys_port_id = efx_ef10_get_phys_port_id, .revision = EFX_REV_HUNT_A0, .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH), .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE, @@ -5783,6 +6605,8 @@ const struct efx_nic_type efx_hunt_a0_nic_type = { .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST, .can_rx_scatter = true, .always_rx_scatter = true, + .option_descriptors = true, + .min_interrupt_mode = EFX_INT_MODE_LEGACY, .max_interrupt_mode = EFX_INT_MODE_MSIX, .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH, .offload_features = EF10_OFFLOAD_FEATURES, @@ -5790,4 +6614,5 @@ const struct efx_nic_type efx_hunt_a0_nic_type = { .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS, .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE | 1 << HWTSTAMP_FILTER_ALL, + .rx_hash_key_size = 40, }; diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c index a949b9d27329..b7e4345c990d 100644 --- a/drivers/net/ethernet/sfc/ef10_sriov.c +++ b/drivers/net/ethernet/sfc/ef10_sriov.c @@ -6,6 +6,7 @@ * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation, incorporated herein by reference. */ +#include <linux/etherdevice.h> #include <linux/pci.h> #include <linux/module.h> #include "net_driver.h" @@ -548,13 +549,13 @@ int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, u8 *mac) vf->efx->type->filter_table_probe(vf->efx); up_write(&vf->efx->filter_sem); efx_net_open(vf->efx->net_dev); - netif_device_attach(vf->efx->net_dev); + efx_device_attach_if_not_resetting(vf->efx); } return 0; fail: - memset(vf->mac, 0, ETH_ALEN); + eth_zero_addr(vf->mac); return rc; } @@ -666,7 +667,7 @@ restore_filters: if (rc2) goto reset_nic; - netif_device_attach(vf->efx->net_dev); + efx_device_attach_if_not_resetting(vf->efx); } return rc; @@ -760,17 +761,3 @@ int efx_ef10_sriov_get_vf_config(struct efx_nic *efx, int vf_i, return 0; } - -int efx_ef10_sriov_get_phys_port_id(struct efx_nic *efx, - struct netdev_phys_item_id *ppid) -{ - struct efx_ef10_nic_data *nic_data = efx->nic_data; - - if (!is_valid_ether_addr(nic_data->port_id)) - return -EOPNOTSUPP; - - ppid->id_len = ETH_ALEN; - memcpy(ppid->id, nic_data->port_id, ppid->id_len); - - return 0; -} diff --git a/drivers/net/ethernet/sfc/ef10_sriov.h b/drivers/net/ethernet/sfc/ef10_sriov.h index 9ceb7ef0a210..2aa444ed42de 100644 --- a/drivers/net/ethernet/sfc/ef10_sriov.h +++ b/drivers/net/ethernet/sfc/ef10_sriov.h @@ -56,9 +56,6 @@ int efx_ef10_sriov_get_vf_config(struct efx_nic *efx, int vf_i, int efx_ef10_sriov_set_vf_link_state(struct efx_nic *efx, int vf_i, int link_state); -int efx_ef10_sriov_get_phys_port_id(struct efx_nic *efx, - struct netdev_phys_item_id *ppid); - int efx_ef10_vswitching_probe_pf(struct efx_nic *efx); int efx_ef10_vswitching_probe_vf(struct efx_nic *efx); int efx_ef10_vswitching_restore_pf(struct efx_nic *efx); diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 5a5dcad8c49a..334bcc6df6b2 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -23,12 +23,15 @@ #include <linux/aer.h> #include <linux/interrupt.h> #include "net_driver.h" +#include <net/gre.h> +#include <net/udp_tunnel.h> #include "efx.h" #include "nic.h" #include "selftest.h" #include "sriov.h" #include "mcdi.h" +#include "mcdi_pcol.h" #include "workarounds.h" /************************************************************************** @@ -88,6 +91,21 @@ const char *const efx_reset_type_names[] = { [RESET_TYPE_MCDI_TIMEOUT] = "MCDI_TIMEOUT (FLR)", }; +/* UDP tunnel type names */ +static const char *const efx_udp_tunnel_type_names[] = { + [TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN] = "vxlan", + [TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE] = "geneve", +}; + +void efx_get_udp_tunnel_type_name(u16 type, char *buf, size_t buflen) +{ + if (type < ARRAY_SIZE(efx_udp_tunnel_type_names) && + efx_udp_tunnel_type_names[type] != NULL) + snprintf(buf, buflen, "%s", efx_udp_tunnel_type_names[type]); + else + snprintf(buf, buflen, "type %d", type); +} + /* Reset workqueue. If any NIC has a hardware failure then a reset will be * queued onto this work queue. This is not a per-nic work queue, because * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised. @@ -308,9 +326,6 @@ static int efx_poll(struct napi_struct *napi, int budget) struct efx_nic *efx = channel->efx; int spent; - if (!efx_channel_lock_napi(channel)) - return budget; - netif_vdbg(efx, intr, efx->net_dev, "channel %d NAPI poll executing on CPU %d\n", channel->channel, raw_smp_processor_id()); @@ -331,11 +346,10 @@ static int efx_poll(struct napi_struct *napi, int budget) * since efx_nic_eventq_read_ack() will have no effect if * interrupts have already been disabled. */ - napi_complete(napi); - efx_nic_eventq_read_ack(channel); + if (napi_complete_done(napi, spent)) + efx_nic_eventq_read_ack(channel); } - efx_channel_unlock_napi(channel); return spent; } @@ -391,7 +405,6 @@ void efx_start_eventq(struct efx_channel *channel) channel->enabled = true; smp_wmb(); - efx_channel_enable(channel); napi_enable(&channel->napi_str); efx_nic_eventq_read_ack(channel); } @@ -403,8 +416,6 @@ void efx_stop_eventq(struct efx_channel *channel) return; napi_disable(&channel->napi_str); - while (!efx_channel_disable(channel)) - usleep_range(1000, 20000); channel->enabled = false; } @@ -865,7 +876,7 @@ out: efx_schedule_reset(efx, RESET_TYPE_DISABLE); } else { efx_start_all(efx); - netif_device_attach(efx->net_dev); + efx_device_attach_if_not_resetting(efx); } return rc; @@ -1409,9 +1420,12 @@ static int efx_probe_interrupts(struct efx_nic *efx) xentries, 1, n_channels); if (rc < 0) { /* Fall back to single channel MSI */ - efx->interrupt_mode = EFX_INT_MODE_MSI; netif_err(efx, drv, efx->net_dev, "could not enable MSI-X\n"); + if (efx->type->min_interrupt_mode >= EFX_INT_MODE_MSI) + efx->interrupt_mode = EFX_INT_MODE_MSI; + else + return rc; } else if (rc < n_channels) { netif_err(efx, drv, efx->net_dev, "WARNING: Insufficient MSI-X vectors" @@ -1454,7 +1468,10 @@ static int efx_probe_interrupts(struct efx_nic *efx) } else { netif_err(efx, drv, efx->net_dev, "could not enable MSI\n"); - efx->interrupt_mode = EFX_INT_MODE_LEGACY; + if (efx->type->min_interrupt_mode >= EFX_INT_MODE_LEGACY) + efx->interrupt_mode = EFX_INT_MODE_LEGACY; + else + return rc; } } @@ -2088,7 +2105,6 @@ static void efx_init_napi_channel(struct efx_channel *channel) channel->napi_dev = efx->net_dev; netif_napi_add(channel->napi_dev, &channel->napi_str, efx_poll, napi_weight); - efx_channel_busy_poll_init(channel); } static void efx_init_napi(struct efx_nic *efx) @@ -2138,37 +2154,6 @@ static void efx_netpoll(struct net_device *net_dev) #endif -#ifdef CONFIG_NET_RX_BUSY_POLL -static int efx_busy_poll(struct napi_struct *napi) -{ - struct efx_channel *channel = - container_of(napi, struct efx_channel, napi_str); - struct efx_nic *efx = channel->efx; - int budget = 4; - int old_rx_packets, rx_packets; - - if (!netif_running(efx->net_dev)) - return LL_FLUSH_FAILED; - - if (!efx_channel_try_lock_poll(channel)) - return LL_FLUSH_BUSY; - - old_rx_packets = channel->rx_queue.rx_packets; - efx_process_channel(channel, budget); - - rx_packets = channel->rx_queue.rx_packets - old_rx_packets; - - /* There is no race condition with NAPI here. - * NAPI will automatically be rescheduled if it yielded during busy - * polling, because it was not able to take the lock and thus returned - * the full budget. - */ - efx_channel_unlock_poll(channel); - - return rx_packets; -} -#endif - /************************************************************************** * * Kernel net device interface @@ -2197,6 +2182,8 @@ int efx_net_open(struct net_device *net_dev) efx_link_status_changed(efx); efx_start_all(efx); + if (efx->state == STATE_DISABLED || efx->reset_pending) + netif_device_detach(efx->net_dev); efx_selftest_async_start(efx); return 0; } @@ -2219,16 +2206,14 @@ int efx_net_stop(struct net_device *net_dev) } /* Context: process, dev_base_lock or RTNL held, non-blocking. */ -static struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev, - struct rtnl_link_stats64 *stats) +static void efx_net_stats(struct net_device *net_dev, + struct rtnl_link_stats64 *stats) { struct efx_nic *efx = netdev_priv(net_dev); spin_lock_bh(&efx->stats_lock); efx->type->update_stats(efx, NULL, stats); spin_unlock_bh(&efx->stats_lock); - - return stats; } /* Context: netif_tx_lock held, BHs disabled. */ @@ -2265,7 +2250,7 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu) mutex_unlock(&efx->mac_lock); efx_start_all(efx); - netif_device_attach(efx->net_dev); + efx_device_attach_if_not_resetting(efx); return 0; } @@ -2336,6 +2321,27 @@ static int efx_set_features(struct net_device *net_dev, netdev_features_t data) return 0; } +static int efx_get_phys_port_id(struct net_device *net_dev, + struct netdev_phys_item_id *ppid) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + if (efx->type->get_phys_port_id) + return efx->type->get_phys_port_id(efx, ppid); + else + return -EOPNOTSUPP; +} + +static int efx_get_phys_port_name(struct net_device *net_dev, + char *name, size_t len) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + if (snprintf(name, len, "p%u", efx->port_num) >= len) + return -EINVAL; + return 0; +} + static int efx_vlan_rx_add_vid(struct net_device *net_dev, __be16 proto, u16 vid) { struct efx_nic *efx = netdev_priv(net_dev); @@ -2356,6 +2362,52 @@ static int efx_vlan_rx_kill_vid(struct net_device *net_dev, __be16 proto, u16 vi return -EOPNOTSUPP; } +static int efx_udp_tunnel_type_map(enum udp_parsable_tunnel_type in) +{ + switch (in) { + case UDP_TUNNEL_TYPE_VXLAN: + return TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN; + case UDP_TUNNEL_TYPE_GENEVE: + return TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE; + default: + return -1; + } +} + +static void efx_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti) +{ + struct efx_nic *efx = netdev_priv(dev); + struct efx_udp_tunnel tnl; + int efx_tunnel_type; + + efx_tunnel_type = efx_udp_tunnel_type_map(ti->type); + if (efx_tunnel_type < 0) + return; + + tnl.type = (u16)efx_tunnel_type; + tnl.port = ti->port; + + if (efx->type->udp_tnl_add_port) + (void)efx->type->udp_tnl_add_port(efx, tnl); +} + +static void efx_udp_tunnel_del(struct net_device *dev, struct udp_tunnel_info *ti) +{ + struct efx_nic *efx = netdev_priv(dev); + struct efx_udp_tunnel tnl; + int efx_tunnel_type; + + efx_tunnel_type = efx_udp_tunnel_type_map(ti->type); + if (efx_tunnel_type < 0) + return; + + tnl.type = (u16)efx_tunnel_type; + tnl.port = ti->port; + + if (efx->type->udp_tnl_add_port) + (void)efx->type->udp_tnl_del_port(efx, tnl); +} + static const struct net_device_ops efx_netdev_ops = { .ndo_open = efx_net_open, .ndo_stop = efx_net_stop, @@ -2376,18 +2428,18 @@ static const struct net_device_ops efx_netdev_ops = { .ndo_set_vf_spoofchk = efx_sriov_set_vf_spoofchk, .ndo_get_vf_config = efx_sriov_get_vf_config, .ndo_set_vf_link_state = efx_sriov_set_vf_link_state, - .ndo_get_phys_port_id = efx_sriov_get_phys_port_id, #endif + .ndo_get_phys_port_id = efx_get_phys_port_id, + .ndo_get_phys_port_name = efx_get_phys_port_name, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = efx_netpoll, #endif .ndo_setup_tc = efx_setup_tc, -#ifdef CONFIG_NET_RX_BUSY_POLL - .ndo_busy_poll = efx_busy_poll, -#endif #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = efx_filter_rfs, #endif + .ndo_udp_tunnel_add = efx_udp_tunnel_add, + .ndo_udp_tunnel_del = efx_udp_tunnel_del, }; static void efx_update_name(struct efx_nic *efx) @@ -2627,6 +2679,9 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok) efx_start_all(efx); + if (efx->type->udp_tnl_push_ports) + efx->type->udp_tnl_push_ports(efx); + return 0; fail: @@ -2691,7 +2746,7 @@ out: efx->state = STATE_DISABLED; } else { netif_dbg(efx, drv, efx->net_dev, "reset complete\n"); - netif_device_attach(efx->net_dev); + efx_device_attach_if_not_resetting(efx); } return rc; } @@ -2888,7 +2943,7 @@ static const struct efx_phy_operations efx_dummy_phy_operations = { static int efx_init_struct(struct efx_nic *efx, struct pci_dev *pci_dev, struct net_device *net_dev) { - int i; + int rc = -ENOMEM, i; /* Initialise common structures */ INIT_LIST_HEAD(&efx->node); @@ -2929,8 +2984,15 @@ static int efx_init_struct(struct efx_nic *efx, } /* Higher numbered interrupt modes are less capable! */ + if (WARN_ON_ONCE(efx->type->max_interrupt_mode > + efx->type->min_interrupt_mode)) { + rc = -EIO; + goto fail; + } efx->interrupt_mode = max(efx->type->max_interrupt_mode, interrupt_mode); + efx->interrupt_mode = min(efx->type->min_interrupt_mode, + interrupt_mode); /* Would be good to use the net_dev name, but we're too early */ snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s", @@ -2943,7 +3005,7 @@ static int efx_init_struct(struct efx_nic *efx, fail: efx_fini_struct(efx); - return -ENOMEM; + return rc; } static void efx_fini_struct(struct efx_nic *efx) @@ -3158,6 +3220,51 @@ static int efx_pci_probe_main(struct efx_nic *efx) return rc; } +static int efx_pci_probe_post_io(struct efx_nic *efx) +{ + struct net_device *net_dev = efx->net_dev; + int rc = efx_pci_probe_main(efx); + + if (rc) + return rc; + + if (efx->type->sriov_init) { + rc = efx->type->sriov_init(efx); + if (rc) + netif_err(efx, probe, efx->net_dev, + "SR-IOV can't be enabled rc %d\n", rc); + } + + /* Determine netdevice features */ + net_dev->features |= (efx->type->offload_features | NETIF_F_SG | + NETIF_F_TSO | NETIF_F_RXCSUM); + if (efx->type->offload_features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM)) + net_dev->features |= NETIF_F_TSO6; + /* Check whether device supports TSO */ + if (!efx->type->tso_versions || !efx->type->tso_versions(efx)) + net_dev->features &= ~NETIF_F_ALL_TSO; + /* Mask for features that also apply to VLAN devices */ + net_dev->vlan_features |= (NETIF_F_HW_CSUM | NETIF_F_SG | + NETIF_F_HIGHDMA | NETIF_F_ALL_TSO | + NETIF_F_RXCSUM); + + net_dev->hw_features = net_dev->features & ~efx->fixed_features; + + /* Disable VLAN filtering by default. It may be enforced if + * the feature is fixed (i.e. VLAN filters are required to + * receive VLAN tagged packets due to vPort restrictions). + */ + net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; + net_dev->features |= efx->fixed_features; + + rc = efx_register_netdev(efx); + if (!rc) + return 0; + + efx_pci_remove_main(efx); + return rc; +} + /* NIC initialisation * * This is called at module load (or hotplug insertion, @@ -3200,42 +3307,28 @@ static int efx_pci_probe(struct pci_dev *pci_dev, if (rc) goto fail2; - rc = efx_pci_probe_main(efx); + rc = efx_pci_probe_post_io(efx); + if (rc) { + /* On failure, retry once immediately. + * If we aborted probe due to a scheduled reset, dismiss it. + */ + efx->reset_pending = 0; + rc = efx_pci_probe_post_io(efx); + if (rc) { + /* On another failure, retry once more + * after a 50-305ms delay. + */ + unsigned char r; + + get_random_bytes(&r, 1); + msleep((unsigned int)r + 50); + efx->reset_pending = 0; + rc = efx_pci_probe_post_io(efx); + } + } if (rc) goto fail3; - net_dev->features |= (efx->type->offload_features | NETIF_F_SG | - NETIF_F_TSO | NETIF_F_RXCSUM); - if (efx->type->offload_features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM)) - net_dev->features |= NETIF_F_TSO6; - /* Check whether device supports TSO */ - if (!efx->type->tso_versions || !efx->type->tso_versions(efx)) - net_dev->features &= ~NETIF_F_ALL_TSO; - /* Mask for features that also apply to VLAN devices */ - net_dev->vlan_features |= (NETIF_F_HW_CSUM | NETIF_F_SG | - NETIF_F_HIGHDMA | NETIF_F_ALL_TSO | - NETIF_F_RXCSUM); - - net_dev->hw_features = net_dev->features & ~efx->fixed_features; - - /* Disable VLAN filtering by default. It may be enforced if - * the feature is fixed (i.e. VLAN filters are required to - * receive VLAN tagged packets due to vPort restrictions). - */ - net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; - net_dev->features |= efx->fixed_features; - - rc = efx_register_netdev(efx); - if (rc) - goto fail4; - - if (efx->type->sriov_init) { - rc = efx->type->sriov_init(efx); - if (rc) - netif_err(efx, probe, efx->net_dev, - "SR-IOV can't be enabled rc %d\n", rc); - } - netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n"); /* Try to create MTDs, but allow this to fail */ @@ -3252,10 +3345,11 @@ static int efx_pci_probe(struct pci_dev *pci_dev, "PCIE error reporting unavailable (%d).\n", rc); + if (efx->type->udp_tnl_push_ports) + efx->type->udp_tnl_push_ports(efx); + return 0; - fail4: - efx_pci_remove_main(efx); fail3: efx_fini_io(efx); fail2: @@ -3325,7 +3419,7 @@ static int efx_pm_thaw(struct device *dev) efx_start_all(efx); - netif_device_attach(efx->net_dev); + efx_device_attach_if_not_resetting(efx); efx->state = STATE_READY; @@ -3585,3 +3679,4 @@ MODULE_AUTHOR("Solarflare Communications and " MODULE_DESCRIPTION("Solarflare network driver"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, efx_pci_table); +MODULE_VERSION(EFX_DRIVER_VERSION); diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index 342ae16e1f2d..ee14662415c5 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@ -276,6 +276,12 @@ static inline void efx_device_detach_sync(struct efx_nic *efx) netif_tx_unlock_bh(dev); } +static inline void efx_device_attach_if_not_resetting(struct efx_nic *efx) +{ + if ((efx->state != STATE_DISABLED) && !efx->reset_pending) + netif_device_attach(efx->net_dev); +} + static inline bool efx_rwsem_assert_write_locked(struct rw_semaphore *sem) { if (WARN_ON(down_read_trylock(sem))) { diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c index 18ebaea44e82..3747b5644110 100644 --- a/drivers/net/ethernet/sfc/ethtool.c +++ b/drivers/net/ethernet/sfc/ethtool.c @@ -77,6 +77,11 @@ static const struct efx_sw_stat_desc efx_sw_stat_desc[] = { EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc), EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err), EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err), + EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_inner_ip_hdr_chksum_err), + EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_inner_tcp_udp_chksum_err), + EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_outer_ip_hdr_chksum_err), + EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_outer_tcp_udp_chksum_err), + EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_eth_crc_err), EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch), EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc), EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_events), @@ -1278,15 +1283,29 @@ static u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev) return (efx->n_rx_channels == 1) ? 0 : ARRAY_SIZE(efx->rx_indir_table); } +static u32 efx_ethtool_get_rxfh_key_size(struct net_device *net_dev) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + return efx->type->rx_hash_key_size; +} + static int efx_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key, u8 *hfunc) { struct efx_nic *efx = netdev_priv(net_dev); + int rc; + + rc = efx->type->rx_pull_rss_config(efx); + if (rc) + return rc; if (hfunc) *hfunc = ETH_RSS_HASH_TOP; if (indir) memcpy(indir, efx->rx_indir_table, sizeof(efx->rx_indir_table)); + if (key) + memcpy(key, efx->rx_hash_key, efx->type->rx_hash_key_size); return 0; } @@ -1295,14 +1314,18 @@ static int efx_ethtool_set_rxfh(struct net_device *net_dev, const u32 *indir, { struct efx_nic *efx = netdev_priv(net_dev); - /* We do not allow change in unsupported parameters */ - if (key || - (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) + /* Hash function is Toeplitz, cannot be changed */ + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) return -EOPNOTSUPP; - if (!indir) + if (!indir && !key) return 0; - return efx->type->rx_push_rss_config(efx, true, indir); + if (!key) + key = efx->rx_hash_key; + if (!indir) + indir = efx->rx_indir_table; + + return efx->type->rx_push_rss_config(efx, true, indir, key); } static int efx_ethtool_get_ts_info(struct net_device *net_dev, @@ -1377,6 +1400,7 @@ const struct ethtool_ops efx_ethtool_ops = { .get_rxnfc = efx_ethtool_get_rxnfc, .set_rxnfc = efx_ethtool_set_rxnfc, .get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size, + .get_rxfh_key_size = efx_ethtool_get_rxfh_key_size, .get_rxfh = efx_ethtool_get_rxfh, .set_rxfh = efx_ethtool_set_rxfh, .get_ts_info = efx_ethtool_get_ts_info, diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c index 5c5cb3c4c12e..f5e5cd1659a1 100644 --- a/drivers/net/ethernet/sfc/falcon/efx.c +++ b/drivers/net/ethernet/sfc/falcon/efx.c @@ -304,9 +304,6 @@ static int ef4_poll(struct napi_struct *napi, int budget) struct ef4_nic *efx = channel->efx; int spent; - if (!ef4_channel_lock_napi(channel)) - return budget; - netif_vdbg(efx, intr, efx->net_dev, "channel %d NAPI poll executing on CPU %d\n", channel->channel, raw_smp_processor_id()); @@ -327,11 +324,10 @@ static int ef4_poll(struct napi_struct *napi, int budget) * since ef4_nic_eventq_read_ack() will have no effect if * interrupts have already been disabled. */ - napi_complete(napi); + napi_complete_done(napi, spent); ef4_nic_eventq_read_ack(channel); } - ef4_channel_unlock_napi(channel); return spent; } @@ -387,7 +383,6 @@ void ef4_start_eventq(struct ef4_channel *channel) channel->enabled = true; smp_wmb(); - ef4_channel_enable(channel); napi_enable(&channel->napi_str); ef4_nic_eventq_read_ack(channel); } @@ -399,8 +394,6 @@ void ef4_stop_eventq(struct ef4_channel *channel) return; napi_disable(&channel->napi_str); - while (!ef4_channel_disable(channel)) - usleep_range(1000, 20000); channel->enabled = false; } @@ -986,7 +979,7 @@ void ef4_mac_reconfigure(struct ef4_nic *efx) /* Push loopback/power/transmit disable settings to the PHY, and reconfigure * the MAC appropriately. All other PHY configuration changes are pushed - * through phy_op->set_settings(), and pushed asynchronously to the MAC + * through phy_op->set_link_ksettings(), and pushed asynchronously to the MAC * through ef4_monitor(). * * Callers must hold the mac_lock @@ -2029,7 +2022,6 @@ static void ef4_init_napi_channel(struct ef4_channel *channel) channel->napi_dev = efx->net_dev; netif_napi_add(channel->napi_dev, &channel->napi_str, ef4_poll, napi_weight); - ef4_channel_busy_poll_init(channel); } static void ef4_init_napi(struct ef4_nic *efx) @@ -2079,37 +2071,6 @@ static void ef4_netpoll(struct net_device *net_dev) #endif -#ifdef CONFIG_NET_RX_BUSY_POLL -static int ef4_busy_poll(struct napi_struct *napi) -{ - struct ef4_channel *channel = - container_of(napi, struct ef4_channel, napi_str); - struct ef4_nic *efx = channel->efx; - int budget = 4; - int old_rx_packets, rx_packets; - - if (!netif_running(efx->net_dev)) - return LL_FLUSH_FAILED; - - if (!ef4_channel_try_lock_poll(channel)) - return LL_FLUSH_BUSY; - - old_rx_packets = channel->rx_queue.rx_packets; - ef4_process_channel(channel, budget); - - rx_packets = channel->rx_queue.rx_packets - old_rx_packets; - - /* There is no race condition with NAPI here. - * NAPI will automatically be rescheduled if it yielded during busy - * polling, because it was not able to take the lock and thus returned - * the full budget. - */ - ef4_channel_unlock_poll(channel); - - return rx_packets; -} -#endif - /************************************************************************** * * Kernel net device interface @@ -2158,16 +2119,14 @@ int ef4_net_stop(struct net_device *net_dev) } /* Context: process, dev_base_lock or RTNL held, non-blocking. */ -static struct rtnl_link_stats64 *ef4_net_stats(struct net_device *net_dev, - struct rtnl_link_stats64 *stats) +static void ef4_net_stats(struct net_device *net_dev, + struct rtnl_link_stats64 *stats) { struct ef4_nic *efx = netdev_priv(net_dev); spin_lock_bh(&efx->stats_lock); efx->type->update_stats(efx, NULL, stats); spin_unlock_bh(&efx->stats_lock); - - return stats; } /* Context: netif_tx_lock held, BHs disabled. */ @@ -2291,9 +2250,6 @@ static const struct net_device_ops ef4_netdev_ops = { .ndo_poll_controller = ef4_netpoll, #endif .ndo_setup_tc = ef4_setup_tc, -#ifdef CONFIG_NET_RX_BUSY_POLL - .ndo_busy_poll = ef4_busy_poll, -#endif #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = ef4_filter_rfs, #endif @@ -3348,3 +3304,4 @@ MODULE_AUTHOR("Solarflare Communications and " MODULE_DESCRIPTION("Solarflare Falcon network driver"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, ef4_pci_table); +MODULE_VERSION(EF4_DRIVER_VERSION); diff --git a/drivers/net/ethernet/sfc/falcon/ethtool.c b/drivers/net/ethernet/sfc/falcon/ethtool.c index 8e1929b01a32..56049157a5af 100644 --- a/drivers/net/ethernet/sfc/falcon/ethtool.c +++ b/drivers/net/ethernet/sfc/falcon/ethtool.c @@ -115,44 +115,47 @@ static int ef4_ethtool_phys_id(struct net_device *net_dev, } /* This must be called with rtnl_lock held. */ -static int ef4_ethtool_get_settings(struct net_device *net_dev, - struct ethtool_cmd *ecmd) +static int +ef4_ethtool_get_link_ksettings(struct net_device *net_dev, + struct ethtool_link_ksettings *cmd) { struct ef4_nic *efx = netdev_priv(net_dev); struct ef4_link_state *link_state = &efx->link_state; mutex_lock(&efx->mac_lock); - efx->phy_op->get_settings(efx, ecmd); + efx->phy_op->get_link_ksettings(efx, cmd); mutex_unlock(&efx->mac_lock); /* Both MACs support pause frames (bidirectional and respond-only) */ - ecmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; + ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); + ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause); if (LOOPBACK_INTERNAL(efx)) { - ethtool_cmd_speed_set(ecmd, link_state->speed); - ecmd->duplex = link_state->fd ? DUPLEX_FULL : DUPLEX_HALF; + cmd->base.speed = link_state->speed; + cmd->base.duplex = link_state->fd ? DUPLEX_FULL : DUPLEX_HALF; } return 0; } /* This must be called with rtnl_lock held. */ -static int ef4_ethtool_set_settings(struct net_device *net_dev, - struct ethtool_cmd *ecmd) +static int +ef4_ethtool_set_link_ksettings(struct net_device *net_dev, + const struct ethtool_link_ksettings *cmd) { struct ef4_nic *efx = netdev_priv(net_dev); int rc; /* GMAC does not support 1000Mbps HD */ - if ((ethtool_cmd_speed(ecmd) == SPEED_1000) && - (ecmd->duplex != DUPLEX_FULL)) { + if ((cmd->base.speed == SPEED_1000) && + (cmd->base.duplex != DUPLEX_FULL)) { netif_dbg(efx, drv, efx->net_dev, "rejecting unsupported 1000Mbps HD setting\n"); return -EINVAL; } mutex_lock(&efx->mac_lock); - rc = efx->phy_op->set_settings(efx, ecmd); + rc = efx->phy_op->set_link_ksettings(efx, cmd); mutex_unlock(&efx->mac_lock); return rc; } @@ -1310,8 +1313,6 @@ static int ef4_ethtool_get_module_info(struct net_device *net_dev, } const struct ethtool_ops ef4_ethtool_ops = { - .get_settings = ef4_ethtool_get_settings, - .set_settings = ef4_ethtool_set_settings, .get_drvinfo = ef4_ethtool_get_drvinfo, .get_regs_len = ef4_ethtool_get_regs_len, .get_regs = ef4_ethtool_get_regs, @@ -1340,4 +1341,6 @@ const struct ethtool_ops ef4_ethtool_ops = { .set_rxfh = ef4_ethtool_set_rxfh, .get_module_info = ef4_ethtool_get_module_info, .get_module_eeprom = ef4_ethtool_get_module_eeprom, + .get_link_ksettings = ef4_ethtool_get_link_ksettings, + .set_link_ksettings = ef4_ethtool_set_link_ksettings, }; diff --git a/drivers/net/ethernet/sfc/falcon/falcon.c b/drivers/net/ethernet/sfc/falcon/falcon.c index c6ff0cc5ef18..93c713c1f627 100644 --- a/drivers/net/ethernet/sfc/falcon/falcon.c +++ b/drivers/net/ethernet/sfc/falcon/falcon.c @@ -16,6 +16,8 @@ #include <linux/i2c.h> #include <linux/mii.h> #include <linux/slab.h> +#include <linux/sched/signal.h> + #include "net_driver.h" #include "bitfield.h" #include "efx.h" diff --git a/drivers/net/ethernet/sfc/falcon/mdio_10g.c b/drivers/net/ethernet/sfc/falcon/mdio_10g.c index e7d7c09296aa..ee0713f03d01 100644 --- a/drivers/net/ethernet/sfc/falcon/mdio_10g.c +++ b/drivers/net/ethernet/sfc/falcon/mdio_10g.c @@ -226,33 +226,45 @@ void ef4_mdio_set_mmds_lpower(struct ef4_nic *efx, } /** - * ef4_mdio_set_settings - Set (some of) the PHY settings over MDIO. + * ef4_mdio_set_link_ksettings - Set (some of) the PHY settings over MDIO. * @efx: Efx NIC - * @ecmd: New settings + * @cmd: New settings */ -int ef4_mdio_set_settings(struct ef4_nic *efx, struct ethtool_cmd *ecmd) +int ef4_mdio_set_link_ksettings(struct ef4_nic *efx, + const struct ethtool_link_ksettings *cmd) { - struct ethtool_cmd prev = { .cmd = ETHTOOL_GSET }; - - efx->phy_op->get_settings(efx, &prev); - - if (ecmd->advertising == prev.advertising && - ethtool_cmd_speed(ecmd) == ethtool_cmd_speed(&prev) && - ecmd->duplex == prev.duplex && - ecmd->port == prev.port && - ecmd->autoneg == prev.autoneg) + struct ethtool_link_ksettings prev = { + .base.cmd = ETHTOOL_GLINKSETTINGS + }; + u32 prev_advertising, advertising; + u32 prev_supported; + + efx->phy_op->get_link_ksettings(efx, &prev); + + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); + ethtool_convert_link_mode_to_legacy_u32(&prev_advertising, + prev.link_modes.advertising); + ethtool_convert_link_mode_to_legacy_u32(&prev_supported, + prev.link_modes.supported); + + if (advertising == prev_advertising && + cmd->base.speed == prev.base.speed && + cmd->base.duplex == prev.base.duplex && + cmd->base.port == prev.base.port && + cmd->base.autoneg == prev.base.autoneg) return 0; /* We can only change these settings for -T PHYs */ - if (prev.port != PORT_TP || ecmd->port != PORT_TP) + if (prev.base.port != PORT_TP || cmd->base.port != PORT_TP) return -EINVAL; /* Check that PHY supports these settings */ - if (!ecmd->autoneg || - (ecmd->advertising | SUPPORTED_Autoneg) & ~prev.supported) + if (!cmd->base.autoneg || + (advertising | SUPPORTED_Autoneg) & ~prev_supported) return -EINVAL; - ef4_link_set_advertising(efx, ecmd->advertising | ADVERTISED_Autoneg); + ef4_link_set_advertising(efx, advertising | ADVERTISED_Autoneg); ef4_mdio_an_reconfigure(efx); return 0; } diff --git a/drivers/net/ethernet/sfc/falcon/mdio_10g.h b/drivers/net/ethernet/sfc/falcon/mdio_10g.h index 885cf7a834a6..53cb5cc4ad37 100644 --- a/drivers/net/ethernet/sfc/falcon/mdio_10g.h +++ b/drivers/net/ethernet/sfc/falcon/mdio_10g.h @@ -83,7 +83,8 @@ void ef4_mdio_set_mmds_lpower(struct ef4_nic *efx, int low_power, unsigned int mmd_mask); /* Set (some of) the PHY settings over MDIO */ -int ef4_mdio_set_settings(struct ef4_nic *efx, struct ethtool_cmd *ecmd); +int ef4_mdio_set_link_ksettings(struct ef4_nic *efx, + const struct ethtool_link_ksettings *cmd); /* Push advertising flags and restart autonegotiation */ void ef4_mdio_an_reconfigure(struct ef4_nic *efx); diff --git a/drivers/net/ethernet/sfc/falcon/net_driver.h b/drivers/net/ethernet/sfc/falcon/net_driver.h index 210b28f7d2a1..37a8bdf32206 100644 --- a/drivers/net/ethernet/sfc/falcon/net_driver.h +++ b/drivers/net/ethernet/sfc/falcon/net_driver.h @@ -448,131 +448,6 @@ struct ef4_channel { struct ef4_tx_queue tx_queue[EF4_TXQ_TYPES]; }; -#ifdef CONFIG_NET_RX_BUSY_POLL -enum ef4_channel_busy_poll_state { - EF4_CHANNEL_STATE_IDLE = 0, - EF4_CHANNEL_STATE_NAPI = BIT(0), - EF4_CHANNEL_STATE_NAPI_REQ_BIT = 1, - EF4_CHANNEL_STATE_NAPI_REQ = BIT(1), - EF4_CHANNEL_STATE_POLL_BIT = 2, - EF4_CHANNEL_STATE_POLL = BIT(2), - EF4_CHANNEL_STATE_DISABLE_BIT = 3, -}; - -static inline void ef4_channel_busy_poll_init(struct ef4_channel *channel) -{ - WRITE_ONCE(channel->busy_poll_state, EF4_CHANNEL_STATE_IDLE); -} - -/* Called from the device poll routine to get ownership of a channel. */ -static inline bool ef4_channel_lock_napi(struct ef4_channel *channel) -{ - unsigned long prev, old = READ_ONCE(channel->busy_poll_state); - - while (1) { - switch (old) { - case EF4_CHANNEL_STATE_POLL: - /* Ensure ef4_channel_try_lock_poll() wont starve us */ - set_bit(EF4_CHANNEL_STATE_NAPI_REQ_BIT, - &channel->busy_poll_state); - /* fallthrough */ - case EF4_CHANNEL_STATE_POLL | EF4_CHANNEL_STATE_NAPI_REQ: - return false; - default: - break; - } - prev = cmpxchg(&channel->busy_poll_state, old, - EF4_CHANNEL_STATE_NAPI); - if (unlikely(prev != old)) { - /* This is likely to mean we've just entered polling - * state. Go back round to set the REQ bit. - */ - old = prev; - continue; - } - return true; - } -} - -static inline void ef4_channel_unlock_napi(struct ef4_channel *channel) -{ - /* Make sure write has completed from ef4_channel_lock_napi() */ - smp_wmb(); - WRITE_ONCE(channel->busy_poll_state, EF4_CHANNEL_STATE_IDLE); -} - -/* Called from ef4_busy_poll(). */ -static inline bool ef4_channel_try_lock_poll(struct ef4_channel *channel) -{ - return cmpxchg(&channel->busy_poll_state, EF4_CHANNEL_STATE_IDLE, - EF4_CHANNEL_STATE_POLL) == EF4_CHANNEL_STATE_IDLE; -} - -static inline void ef4_channel_unlock_poll(struct ef4_channel *channel) -{ - clear_bit_unlock(EF4_CHANNEL_STATE_POLL_BIT, &channel->busy_poll_state); -} - -static inline bool ef4_channel_busy_polling(struct ef4_channel *channel) -{ - return test_bit(EF4_CHANNEL_STATE_POLL_BIT, &channel->busy_poll_state); -} - -static inline void ef4_channel_enable(struct ef4_channel *channel) -{ - clear_bit_unlock(EF4_CHANNEL_STATE_DISABLE_BIT, - &channel->busy_poll_state); -} - -/* Stop further polling or napi access. - * Returns false if the channel is currently busy polling. - */ -static inline bool ef4_channel_disable(struct ef4_channel *channel) -{ - set_bit(EF4_CHANNEL_STATE_DISABLE_BIT, &channel->busy_poll_state); - /* Implicit barrier in ef4_channel_busy_polling() */ - return !ef4_channel_busy_polling(channel); -} - -#else /* CONFIG_NET_RX_BUSY_POLL */ - -static inline void ef4_channel_busy_poll_init(struct ef4_channel *channel) -{ -} - -static inline bool ef4_channel_lock_napi(struct ef4_channel *channel) -{ - return true; -} - -static inline void ef4_channel_unlock_napi(struct ef4_channel *channel) -{ -} - -static inline bool ef4_channel_try_lock_poll(struct ef4_channel *channel) -{ - return false; -} - -static inline void ef4_channel_unlock_poll(struct ef4_channel *channel) -{ -} - -static inline bool ef4_channel_busy_polling(struct ef4_channel *channel) -{ - return false; -} - -static inline void ef4_channel_enable(struct ef4_channel *channel) -{ -} - -static inline bool ef4_channel_disable(struct ef4_channel *channel) -{ - return true; -} -#endif /* CONFIG_NET_RX_BUSY_POLL */ - /** * struct ef4_msi_context - Context for each MSI * @efx: The associated NIC @@ -684,8 +559,8 @@ static inline bool ef4_link_state_equal(const struct ef4_link_state *left, * @reconfigure: Reconfigure PHY (e.g. for new link parameters) * @poll: Update @link_state and report whether it changed. * Serialised by the mac_lock. - * @get_settings: Get ethtool settings. Serialised by the mac_lock. - * @set_settings: Set ethtool settings. Serialised by the mac_lock. + * @get_link_ksettings: Get ethtool settings. Serialised by the mac_lock. + * @set_link_ksettings: Set ethtool settings. Serialised by the mac_lock. * @set_npage_adv: Set abilities advertised in (Extended) Next Page * (only needed where AN bit is set in mmds) * @test_alive: Test that PHY is 'alive' (online) @@ -700,10 +575,10 @@ struct ef4_phy_operations { void (*remove) (struct ef4_nic *efx); int (*reconfigure) (struct ef4_nic *efx); bool (*poll) (struct ef4_nic *efx); - void (*get_settings) (struct ef4_nic *efx, - struct ethtool_cmd *ecmd); - int (*set_settings) (struct ef4_nic *efx, - struct ethtool_cmd *ecmd); + void (*get_link_ksettings)(struct ef4_nic *efx, + struct ethtool_link_ksettings *cmd); + int (*set_link_ksettings)(struct ef4_nic *efx, + const struct ethtool_link_ksettings *cmd); void (*set_npage_adv) (struct ef4_nic *efx, u32); int (*test_alive) (struct ef4_nic *efx); const char *(*test_name) (struct ef4_nic *efx, unsigned int index); diff --git a/drivers/net/ethernet/sfc/falcon/qt202x_phy.c b/drivers/net/ethernet/sfc/falcon/qt202x_phy.c index d29331652548..f5e0f18d4ea8 100644 --- a/drivers/net/ethernet/sfc/falcon/qt202x_phy.c +++ b/drivers/net/ethernet/sfc/falcon/qt202x_phy.c @@ -437,9 +437,10 @@ static int qt202x_phy_reconfigure(struct ef4_nic *efx) return 0; } -static void qt202x_phy_get_settings(struct ef4_nic *efx, struct ethtool_cmd *ecmd) +static void qt202x_phy_get_link_ksettings(struct ef4_nic *efx, + struct ethtool_link_ksettings *cmd) { - mdio45_ethtool_gset(&efx->mdio, ecmd); + mdio45_ethtool_ksettings_get(&efx->mdio, cmd); } static void qt202x_phy_remove(struct ef4_nic *efx) @@ -487,8 +488,8 @@ const struct ef4_phy_operations falcon_qt202x_phy_ops = { .poll = qt202x_phy_poll, .fini = ef4_port_dummy_op_void, .remove = qt202x_phy_remove, - .get_settings = qt202x_phy_get_settings, - .set_settings = ef4_mdio_set_settings, + .get_link_ksettings = qt202x_phy_get_link_ksettings, + .set_link_ksettings = ef4_mdio_set_link_ksettings, .test_alive = ef4_mdio_test_alive, .get_module_eeprom = qt202x_phy_get_module_eeprom, .get_module_info = qt202x_phy_get_module_info, diff --git a/drivers/net/ethernet/sfc/falcon/rx.c b/drivers/net/ethernet/sfc/falcon/rx.c index 250458cbdb4d..6a8406dc0c2b 100644 --- a/drivers/net/ethernet/sfc/falcon/rx.c +++ b/drivers/net/ethernet/sfc/falcon/rx.c @@ -674,8 +674,7 @@ void __ef4_rx_packet(struct ef4_channel *channel) if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM))) rx_buf->flags &= ~EF4_RX_PKT_CSUMMED; - if ((rx_buf->flags & EF4_RX_PKT_TCP) && !channel->type->receive_skb && - !ef4_channel_busy_polling(channel)) + if ((rx_buf->flags & EF4_RX_PKT_TCP) && !channel->type->receive_skb) ef4_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh); else ef4_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags); diff --git a/drivers/net/ethernet/sfc/falcon/tenxpress.c b/drivers/net/ethernet/sfc/falcon/tenxpress.c index acc548a1c4d6..ff9b4e2b590c 100644 --- a/drivers/net/ethernet/sfc/falcon/tenxpress.c +++ b/drivers/net/ethernet/sfc/falcon/tenxpress.c @@ -351,9 +351,6 @@ static int tenxpress_phy_reconfigure(struct ef4_nic *efx) return 0; } -static void -tenxpress_get_settings(struct ef4_nic *efx, struct ethtool_cmd *ecmd); - /* Poll for link state changes */ static bool tenxpress_phy_poll(struct ef4_nic *efx) { @@ -443,7 +440,8 @@ sfx7101_run_tests(struct ef4_nic *efx, int *results, unsigned flags) } static void -tenxpress_get_settings(struct ef4_nic *efx, struct ethtool_cmd *ecmd) +tenxpress_get_link_ksettings(struct ef4_nic *efx, + struct ethtool_link_ksettings *cmd) { u32 adv = 0, lpa = 0; int reg; @@ -455,20 +453,22 @@ tenxpress_get_settings(struct ef4_nic *efx, struct ethtool_cmd *ecmd) if (reg & MDIO_AN_10GBT_STAT_LP10G) lpa |= ADVERTISED_10000baseT_Full; - mdio45_ethtool_gset_npage(&efx->mdio, ecmd, adv, lpa); + mdio45_ethtool_ksettings_get_npage(&efx->mdio, cmd, adv, lpa); /* In loopback, the PHY automatically brings up the correct interface, * but doesn't advertise the correct speed. So override it */ if (LOOPBACK_EXTERNAL(efx)) - ethtool_cmd_speed_set(ecmd, SPEED_10000); + cmd->base.speed = SPEED_10000; } -static int tenxpress_set_settings(struct ef4_nic *efx, struct ethtool_cmd *ecmd) +static int +tenxpress_set_link_ksettings(struct ef4_nic *efx, + const struct ethtool_link_ksettings *cmd) { - if (!ecmd->autoneg) + if (!cmd->base.autoneg) return -EINVAL; - return ef4_mdio_set_settings(efx, ecmd); + return ef4_mdio_set_link_ksettings(efx, cmd); } static void sfx7101_set_npage_adv(struct ef4_nic *efx, u32 advertising) @@ -485,8 +485,8 @@ const struct ef4_phy_operations falcon_sfx7101_phy_ops = { .poll = tenxpress_phy_poll, .fini = sfx7101_phy_fini, .remove = tenxpress_phy_remove, - .get_settings = tenxpress_get_settings, - .set_settings = tenxpress_set_settings, + .get_link_ksettings = tenxpress_get_link_ksettings, + .set_link_ksettings = tenxpress_set_link_ksettings, .set_npage_adv = sfx7101_set_npage_adv, .test_alive = ef4_mdio_test_alive, .test_name = sfx7101_test_name, diff --git a/drivers/net/ethernet/sfc/falcon/txc43128_phy.c b/drivers/net/ethernet/sfc/falcon/txc43128_phy.c index 18421f5e880f..3c55fd23c271 100644 --- a/drivers/net/ethernet/sfc/falcon/txc43128_phy.c +++ b/drivers/net/ethernet/sfc/falcon/txc43128_phy.c @@ -540,9 +540,10 @@ static int txc43128_run_tests(struct ef4_nic *efx, int *results, unsigned flags) return rc; } -static void txc43128_get_settings(struct ef4_nic *efx, struct ethtool_cmd *ecmd) +static void txc43128_get_link_ksettings(struct ef4_nic *efx, + struct ethtool_link_ksettings *cmd) { - mdio45_ethtool_gset(&efx->mdio, ecmd); + mdio45_ethtool_ksettings_get(&efx->mdio, cmd); } const struct ef4_phy_operations falcon_txc_phy_ops = { @@ -552,8 +553,8 @@ const struct ef4_phy_operations falcon_txc_phy_ops = { .poll = txc43128_phy_poll, .fini = txc43128_phy_fini, .remove = txc43128_phy_remove, - .get_settings = txc43128_get_settings, - .set_settings = ef4_mdio_set_settings, + .get_link_ksettings = txc43128_get_link_ksettings, + .set_link_ksettings = ef4_mdio_set_link_ksettings, .test_alive = ef4_mdio_test_alive, .run_tests = txc43128_run_tests, .test_name = txc43128_test_name, diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c index e4ca2161af70..ba45150f53c7 100644 --- a/drivers/net/ethernet/sfc/farch.c +++ b/drivers/net/ethernet/sfc/farch.c @@ -1649,6 +1649,22 @@ void efx_farch_rx_push_indir_table(struct efx_nic *efx) } } +void efx_farch_rx_pull_indir_table(struct efx_nic *efx) +{ + size_t i = 0; + efx_dword_t dword; + + BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) != + FR_BZ_RX_INDIRECTION_TBL_ROWS); + + for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) { + efx_readd(efx, &dword, + FR_BZ_RX_INDIRECTION_TBL + + FR_BZ_RX_INDIRECTION_TBL_STEP * i); + efx->rx_indir_table[i] = EFX_DWORD_FIELD(dword, FRF_BZ_IT_QUEUE); + } +} + /* Looks at available SRAM resources and works out how many queues we * can support, and where things like descriptor caches should live. * diff --git a/drivers/net/ethernet/sfc/filter.h b/drivers/net/ethernet/sfc/filter.h index d0ed7f71ea7e..8189a1cd973f 100644 --- a/drivers/net/ethernet/sfc/filter.h +++ b/drivers/net/ethernet/sfc/filter.h @@ -27,6 +27,7 @@ * @EFX_FILTER_MATCH_OUTER_VID: Match by outer VLAN ID * @EFX_FILTER_MATCH_IP_PROTO: Match by IP transport protocol * @EFX_FILTER_MATCH_LOC_MAC_IG: Match by local MAC address I/G bit. + * @EFX_FILTER_MATCH_ENCAP_TYPE: Match by encapsulation type. * Used for RX default unicast and multicast/broadcast filters. * * Only some combinations are supported, depending on NIC type: @@ -54,6 +55,7 @@ enum efx_filter_match_flags { EFX_FILTER_MATCH_OUTER_VID = 0x0100, EFX_FILTER_MATCH_IP_PROTO = 0x0200, EFX_FILTER_MATCH_LOC_MAC_IG = 0x0400, + EFX_FILTER_MATCH_ENCAP_TYPE = 0x0800, }; /** @@ -98,6 +100,26 @@ enum efx_filter_flags { EFX_FILTER_FLAG_TX = 0x10, }; +/** enum efx_encap_type - types of encapsulation + * @EFX_ENCAP_TYPE_NONE: no encapsulation + * @EFX_ENCAP_TYPE_VXLAN: VXLAN encapsulation + * @EFX_ENCAP_TYPE_NVGRE: NVGRE encapsulation + * @EFX_ENCAP_TYPE_GENEVE: GENEVE encapsulation + * @EFX_ENCAP_FLAG_IPV6: indicates IPv6 outer frame + * + * Contains both enumerated types and flags. + * To get just the type, OR with @EFX_ENCAP_TYPES_MASK. + */ +enum efx_encap_type { + EFX_ENCAP_TYPE_NONE = 0, + EFX_ENCAP_TYPE_VXLAN = 1, + EFX_ENCAP_TYPE_NVGRE = 2, + EFX_ENCAP_TYPE_GENEVE = 3, + + EFX_ENCAP_TYPES_MASK = 7, + EFX_ENCAP_FLAG_IPV6 = 8, +}; + /** * struct efx_filter_spec - specification for a hardware filter * @match_flags: Match type flags, from &enum efx_filter_match_flags @@ -118,6 +140,8 @@ enum efx_filter_flags { * @rem_host: Remote IP host to match, if %EFX_FILTER_MATCH_REM_HOST is set * @loc_port: Local TCP/UDP port to match, if %EFX_FILTER_MATCH_LOC_PORT is set * @rem_port: Remote TCP/UDP port to match, if %EFX_FILTER_MATCH_REM_PORT is set + * @encap_type: Encapsulation type to match (from &enum efx_encap_type), if + * %EFX_FILTER_MATCH_ENCAP_TYPE is set * * The efx_filter_init_rx() or efx_filter_init_tx() function *must* be * used to initialise the structure. The efx_filter_set_*() functions @@ -144,7 +168,8 @@ struct efx_filter_spec { __be32 rem_host[4]; __be16 loc_port; __be16 rem_port; - /* total 64 bytes */ + u32 encap_type:4; + /* total 65 bytes */ }; enum { @@ -269,4 +294,18 @@ static inline int efx_filter_set_mc_def(struct efx_filter_spec *spec) return 0; } +static inline void efx_filter_set_encap_type(struct efx_filter_spec *spec, + enum efx_encap_type encap_type) +{ + spec->match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE; + spec->encap_type = encap_type; +} + +static inline enum efx_encap_type efx_filter_get_encap_type( + const struct efx_filter_spec *spec) +{ + if (spec->match_flags & EFX_FILTER_MATCH_ENCAP_TYPE) + return spec->encap_type; + return EFX_ENCAP_TYPE_NONE; +} #endif /* EFX_FILTER_H */ diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index 995651341b94..b9422450deb8 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -128,7 +128,7 @@ fail: return rc; } -void efx_mcdi_fini(struct efx_nic *efx) +void efx_mcdi_detach(struct efx_nic *efx) { if (!efx->mcdi) return; @@ -137,6 +137,12 @@ void efx_mcdi_fini(struct efx_nic *efx) /* Relinquish the device (back to the BMC, if this is a LOM) */ efx_mcdi_drv_attach(efx, false, NULL); +} + +void efx_mcdi_fini(struct efx_nic *efx) +{ + if (!efx->mcdi) + return; #ifdef CONFIG_SFC_MCDI_LOGGING free_page((unsigned long)efx->mcdi->iface.logging_buffer); @@ -716,8 +722,11 @@ static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned int cmd, if (cmd == MC_CMD_REBOOT && rc == -EIO) { /* Don't reset if MC_CMD_REBOOT returns EIO */ } else if (rc == -EIO || rc == -EINTR) { - netif_err(efx, hw, efx->net_dev, "MC fatal error %d\n", - -rc); + netif_err(efx, hw, efx->net_dev, "MC reboot detected\n"); + netif_dbg(efx, hw, efx->net_dev, "MC rebooted during command %d rc %d\n", + cmd, -rc); + if (efx->type->mcdi_reboot_detected) + efx->type->mcdi_reboot_detected(efx); efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); } else if (proxy_handle && (rc == -EPROTO) && efx_mcdi_get_proxy_handle(efx, hdr_len, data_len, @@ -837,11 +846,9 @@ static int _efx_mcdi_rpc(struct efx_nic *efx, unsigned int cmd, outbuf, outlen, outlen_actual, quiet, NULL, raw_rc); } else { - netif_printk(efx, hw, - rc == -EPERM ? KERN_DEBUG : KERN_ERR, - efx->net_dev, - "MC command 0x%x failed after proxy auth rc=%d\n", - cmd, rc); + netif_cond_dbg(efx, hw, efx->net_dev, rc == -EPERM, err, + "MC command 0x%x failed after proxy auth rc=%d\n", + cmd, rc); if (rc == -EINTR || rc == -EIO) efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); @@ -1084,10 +1091,9 @@ void efx_mcdi_display_error(struct efx_nic *efx, unsigned cmd, code = MCDI_DWORD(outbuf, ERR_CODE); if (outlen >= MC_CMD_ERR_ARG_OFST + 4) err_arg = MCDI_DWORD(outbuf, ERR_ARG); - netif_printk(efx, hw, rc == -EPERM ? KERN_DEBUG : KERN_ERR, - efx->net_dev, - "MC command 0x%x inlen %zu failed rc=%d (raw=%d) arg=%d\n", - cmd, inlen, rc, code, err_arg); + netif_cond_dbg(efx, hw, efx->net_dev, rc == -EPERM, err, + "MC command 0x%x inlen %zu failed rc=%d (raw=%d) arg=%d\n", + cmd, inlen, rc, code, err_arg); } /* Switch to polled MCDI completions. This can be called in various @@ -2057,8 +2063,8 @@ fail: /* Older firmware lacks GET_WORKAROUNDS and this isn't especially * terrifying. The call site will have to deal with it though. */ - netif_printk(efx, hw, rc == -ENOSYS ? KERN_DEBUG : KERN_ERR, - efx->net_dev, "%s: failed rc=%d\n", __func__, rc); + netif_cond_dbg(efx, hw, efx->net_dev, rc == -ENOSYS, err, + "%s: failed rc=%d\n", __func__, rc); return rc; } diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h index 4472107ca8c1..154ef41d1927 100644 --- a/drivers/net/ethernet/sfc/mcdi.h +++ b/drivers/net/ethernet/sfc/mcdi.h @@ -142,6 +142,7 @@ static inline struct efx_mcdi_mon *efx_mcdi_mon(struct efx_nic *efx) #endif int efx_mcdi_init(struct efx_nic *efx); +void efx_mcdi_detach(struct efx_nic *efx); void efx_mcdi_fini(struct efx_nic *efx); int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, const efx_dword_t *inbuf, diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h index 35cc3d4fa5f6..91fb54fd03d9 100644 --- a/drivers/net/ethernet/sfc/mcdi_pcol.h +++ b/drivers/net/ethernet/sfc/mcdi_pcol.h @@ -10832,7 +10832,7 @@ /***********************************/ /* MC_CMD_GET_LICENSED_V3_FEATURE_STATES - * Query the state of an one or more licensed features. (Note that the actual + * Query the state of one or more licensed features. (Note that the actual * state may be invalidated by the MC_CMD_LICENSING_V3 OP_UPDATE_LICENSE * operation or a reboot of the MC.) Used for V3 licensing (Medford) */ @@ -11913,6 +11913,27 @@ #define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_LBN 0 #define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_WIDTH 1 +/* TUNNEL_ENCAP_UDP_PORT_ENTRY structuredef */ +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_LEN 4 +/* UDP port (the standard ports are named below but any port may be used) */ +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_OFST 0 +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LEN 2 +/* enum: the IANA allocated UDP port for VXLAN */ +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_VXLAN_UDP_PORT 0x12b5 +/* enum: the IANA allocated UDP port for Geneve */ +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_GENEVE_UDP_PORT 0x17c1 +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LBN 0 +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_WIDTH 16 +/* tunnel encapsulation protocol (only those named below are supported) */ +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_OFST 2 +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LEN 2 +/* enum: VXLAN */ +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN 0x0 +/* enum: Geneve */ +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE 0x1 +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LBN 16 +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_WIDTH 16 + /***********************************/ /* MC_CMD_RX_BALANCING diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 1c62c1a00fca..c0537ea06c9a 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -208,6 +208,12 @@ struct efx_tx_buffer { * @write_count: Current write pointer * This is the number of buffers that have been added to the * hardware ring. + * @packet_write_count: Completable write pointer + * This is the write pointer of the last packet written. + * Normally this will equal @write_count, but as option descriptors + * don't produce completion events, they won't update this. + * Filled in iff @efx->type->option_descriptors; only used for PIO. + * Thus, this is written and used on EF10, and neither on farch. * @old_read_count: The value of read_count when last checked. * This is here for performance reasons. The xmit path will * only get the up-to-date value of read_count if this @@ -255,6 +261,7 @@ struct efx_tx_queue { /* Members used only on the xmit path */ unsigned int insert_count ____cacheline_aligned_in_smp; unsigned int write_count; + unsigned int packet_write_count; unsigned int old_read_count; unsigned int tso_bursts; unsigned int tso_long_headers; @@ -300,6 +307,7 @@ struct efx_rx_buffer { #define EFX_RX_PKT_DISCARD 0x0004 #define EFX_RX_PKT_TCP 0x0040 #define EFX_RX_PKT_PREFIX_LEN 0x0080 /* length is in prefix only */ +#define EFX_RX_PKT_CSUM_LEVEL 0x0200 /** * struct efx_rx_page_state - Page-based rx buffer state @@ -462,13 +470,18 @@ struct efx_channel { u32 *rps_flow_id; #endif - unsigned n_rx_tobe_disc; - unsigned n_rx_ip_hdr_chksum_err; - unsigned n_rx_tcp_udp_chksum_err; - unsigned n_rx_mcast_mismatch; - unsigned n_rx_frm_trunc; - unsigned n_rx_overlength; - unsigned n_skbuff_leaks; + unsigned int n_rx_tobe_disc; + unsigned int n_rx_ip_hdr_chksum_err; + unsigned int n_rx_tcp_udp_chksum_err; + unsigned int n_rx_outer_ip_hdr_chksum_err; + unsigned int n_rx_outer_tcp_udp_chksum_err; + unsigned int n_rx_inner_ip_hdr_chksum_err; + unsigned int n_rx_inner_tcp_udp_chksum_err; + unsigned int n_rx_eth_crc_err; + unsigned int n_rx_mcast_mismatch; + unsigned int n_rx_frm_trunc; + unsigned int n_rx_overlength; + unsigned int n_skbuff_leaks; unsigned int n_rx_nodesc_trunc; unsigned int n_rx_merge_events; unsigned int n_rx_merge_packets; @@ -484,131 +497,6 @@ struct efx_channel { u32 sync_timestamp_minor; }; -#ifdef CONFIG_NET_RX_BUSY_POLL -enum efx_channel_busy_poll_state { - EFX_CHANNEL_STATE_IDLE = 0, - EFX_CHANNEL_STATE_NAPI = BIT(0), - EFX_CHANNEL_STATE_NAPI_REQ_BIT = 1, - EFX_CHANNEL_STATE_NAPI_REQ = BIT(1), - EFX_CHANNEL_STATE_POLL_BIT = 2, - EFX_CHANNEL_STATE_POLL = BIT(2), - EFX_CHANNEL_STATE_DISABLE_BIT = 3, -}; - -static inline void efx_channel_busy_poll_init(struct efx_channel *channel) -{ - WRITE_ONCE(channel->busy_poll_state, EFX_CHANNEL_STATE_IDLE); -} - -/* Called from the device poll routine to get ownership of a channel. */ -static inline bool efx_channel_lock_napi(struct efx_channel *channel) -{ - unsigned long prev, old = READ_ONCE(channel->busy_poll_state); - - while (1) { - switch (old) { - case EFX_CHANNEL_STATE_POLL: - /* Ensure efx_channel_try_lock_poll() wont starve us */ - set_bit(EFX_CHANNEL_STATE_NAPI_REQ_BIT, - &channel->busy_poll_state); - /* fallthrough */ - case EFX_CHANNEL_STATE_POLL | EFX_CHANNEL_STATE_NAPI_REQ: - return false; - default: - break; - } - prev = cmpxchg(&channel->busy_poll_state, old, - EFX_CHANNEL_STATE_NAPI); - if (unlikely(prev != old)) { - /* This is likely to mean we've just entered polling - * state. Go back round to set the REQ bit. - */ - old = prev; - continue; - } - return true; - } -} - -static inline void efx_channel_unlock_napi(struct efx_channel *channel) -{ - /* Make sure write has completed from efx_channel_lock_napi() */ - smp_wmb(); - WRITE_ONCE(channel->busy_poll_state, EFX_CHANNEL_STATE_IDLE); -} - -/* Called from efx_busy_poll(). */ -static inline bool efx_channel_try_lock_poll(struct efx_channel *channel) -{ - return cmpxchg(&channel->busy_poll_state, EFX_CHANNEL_STATE_IDLE, - EFX_CHANNEL_STATE_POLL) == EFX_CHANNEL_STATE_IDLE; -} - -static inline void efx_channel_unlock_poll(struct efx_channel *channel) -{ - clear_bit_unlock(EFX_CHANNEL_STATE_POLL_BIT, &channel->busy_poll_state); -} - -static inline bool efx_channel_busy_polling(struct efx_channel *channel) -{ - return test_bit(EFX_CHANNEL_STATE_POLL_BIT, &channel->busy_poll_state); -} - -static inline void efx_channel_enable(struct efx_channel *channel) -{ - clear_bit_unlock(EFX_CHANNEL_STATE_DISABLE_BIT, - &channel->busy_poll_state); -} - -/* Stop further polling or napi access. - * Returns false if the channel is currently busy polling. - */ -static inline bool efx_channel_disable(struct efx_channel *channel) -{ - set_bit(EFX_CHANNEL_STATE_DISABLE_BIT, &channel->busy_poll_state); - /* Implicit barrier in efx_channel_busy_polling() */ - return !efx_channel_busy_polling(channel); -} - -#else /* CONFIG_NET_RX_BUSY_POLL */ - -static inline void efx_channel_busy_poll_init(struct efx_channel *channel) -{ -} - -static inline bool efx_channel_lock_napi(struct efx_channel *channel) -{ - return true; -} - -static inline void efx_channel_unlock_napi(struct efx_channel *channel) -{ -} - -static inline bool efx_channel_try_lock_poll(struct efx_channel *channel) -{ - return false; -} - -static inline void efx_channel_unlock_poll(struct efx_channel *channel) -{ -} - -static inline bool efx_channel_busy_polling(struct efx_channel *channel) -{ - return false; -} - -static inline void efx_channel_enable(struct efx_channel *channel) -{ -} - -static inline bool efx_channel_disable(struct efx_channel *channel) -{ - return true; -} -#endif /* CONFIG_NET_RX_BUSY_POLL */ - /** * struct efx_msi_context - Context for each MSI * @efx: The associated NIC @@ -666,6 +554,8 @@ extern const unsigned int efx_reset_type_max; #define RESET_TYPE(type) \ STRING_TABLE_LOOKUP(type, efx_reset_type) +void efx_get_udp_tunnel_type_name(u16 type, char *buf, size_t buflen); + enum efx_int_mode { /* Be careful if altering to correct macro below */ EFX_INT_MODE_MSIX = 0, @@ -1105,6 +995,15 @@ struct efx_mtd_partition { char name[IFNAMSIZ + 20]; }; +struct efx_udp_tunnel { + u16 type; /* TUNNEL_ENCAP_UDP_PORT_ENTRY_foo, see mcdi_pcol.h */ + __be16 port; + /* Count of repeated adds of the same port. Used only inside the list, + * not in request arguments. + */ + u16 count; +}; + /** * struct efx_nic_type - Efx device type definition * @mem_bar: Get the memory BAR @@ -1174,6 +1073,7 @@ struct efx_mtd_partition { * @tx_remove: Free resources for TX queue * @tx_write: Write TX descriptors and doorbell * @rx_push_rss_config: Write RSS hash key and indirection table to the NIC + * @rx_pull_rss_config: Read RSS hash key and indirection table back from the NIC * @rx_probe: Allocate resources for RX queue * @rx_init: Initialise RX queue on the NIC * @rx_remove: Free resources for RX queue @@ -1220,9 +1120,14 @@ struct efx_mtd_partition { * @ptp_set_ts_config: Set hardware timestamp configuration. The flags * and tx_type will already have been validated but this operation * must validate and update rx_filter. + * @get_phys_port_id: Get the underlying physical port id. * @set_mac_address: Set the MAC address of the device * @tso_versions: Returns mask of firmware-assisted TSO versions supported. * If %NULL, then device does not support any TSO version. + * @udp_tnl_push_ports: Push the list of UDP tunnel ports to the NIC if required. + * @udp_tnl_add_port: Add a UDP tunnel port + * @udp_tnl_has_port: Check if a port has been added as UDP tunnel + * @udp_tnl_del_port: Remove a UDP tunnel port * @revision: Hardware architecture revision * @txd_ptr_tbl_base: TX descriptor ring base address * @rxd_ptr_tbl_base: RX descriptor ring base address @@ -1236,8 +1141,11 @@ struct efx_mtd_partition { * @rx_buffer_padding: Size of padding at end of RX packet * @can_rx_scatter: NIC is able to scatter packets to multiple buffers * @always_rx_scatter: NIC will always scatter packets to multiple buffers + * @option_descriptors: NIC supports TX option descriptors + * @min_interrupt_mode: Lowest capability interrupt mode supported + * from &enum efx_int_mode. * @max_interrupt_mode: Highest capability interrupt mode supported - * from &enum efx_init_mode. + * from &enum efx_int_mode. * @timer_period_max: Maximum period of interrupt timer (in ticks) * @offload_features: net_device feature flags for protocol offload * features implemented in hardware @@ -1302,7 +1210,8 @@ struct efx_nic_type { unsigned int (*tx_limit_len)(struct efx_tx_queue *tx_queue, dma_addr_t dma_addr, unsigned int len); int (*rx_push_rss_config)(struct efx_nic *efx, bool user, - const u32 *rx_indir_table); + const u32 *rx_indir_table, const u8 *key); + int (*rx_pull_rss_config)(struct efx_nic *efx); int (*rx_probe)(struct efx_rx_queue *rx_queue); void (*rx_init)(struct efx_rx_queue *rx_queue); void (*rx_remove)(struct efx_rx_queue *rx_queue); @@ -1358,6 +1267,8 @@ struct efx_nic_type { int (*sriov_configure)(struct efx_nic *efx, int num_vfs); int (*vlan_rx_add_vid)(struct efx_nic *efx, __be16 proto, u16 vid); int (*vlan_rx_kill_vid)(struct efx_nic *efx, __be16 proto, u16 vid); + int (*get_phys_port_id)(struct efx_nic *efx, + struct netdev_phys_item_id *ppid); int (*sriov_init)(struct efx_nic *efx); void (*sriov_fini)(struct efx_nic *efx); bool (*sriov_wanted)(struct efx_nic *efx); @@ -1372,14 +1283,16 @@ struct efx_nic_type { struct ifla_vf_info *ivi); int (*sriov_set_vf_link_state)(struct efx_nic *efx, int vf_i, int link_state); - int (*sriov_get_phys_port_id)(struct efx_nic *efx, - struct netdev_phys_item_id *ppid); int (*vswitching_probe)(struct efx_nic *efx); int (*vswitching_restore)(struct efx_nic *efx); void (*vswitching_remove)(struct efx_nic *efx); int (*get_mac_address)(struct efx_nic *efx, unsigned char *perm_addr); int (*set_mac_address)(struct efx_nic *efx); u32 (*tso_versions)(struct efx_nic *efx); + int (*udp_tnl_push_ports)(struct efx_nic *efx); + int (*udp_tnl_add_port)(struct efx_nic *efx, struct efx_udp_tunnel tnl); + bool (*udp_tnl_has_port)(struct efx_nic *efx, __be16 port); + int (*udp_tnl_del_port)(struct efx_nic *efx, struct efx_udp_tunnel tnl); int revision; unsigned int txd_ptr_tbl_base; @@ -1394,12 +1307,15 @@ struct efx_nic_type { unsigned int rx_buffer_padding; bool can_rx_scatter; bool always_rx_scatter; + bool option_descriptors; + unsigned int min_interrupt_mode; unsigned int max_interrupt_mode; unsigned int timer_period_max; netdev_features_t offload_features; int mcdi_max_ver; unsigned int max_rx_ip_filters; u32 hwtstamp_filters; + unsigned int rx_hash_key_size; }; /************************************************************************** diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index 223774635cba..7b916aa21bde 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h @@ -85,6 +85,17 @@ static inline bool __efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue, return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0; } +/* Report whether the NIC considers this TX queue empty, using + * packet_write_count (the write count recorded for the last completable + * doorbell push). May return false negative. EF10 only, which is OK + * because only EF10 supports PIO. + */ +static inline bool efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue) +{ + EFX_WARN_ON_ONCE_PARANOID(!tx_queue->efx->type->option_descriptors); + return __efx_nic_tx_is_empty(tx_queue, tx_queue->packet_write_count); +} + /* Decide whether we can use TX PIO, ie. write packet data directly into * a buffer on the device. This can reduce latency at the expense of * throughput, so we only do this if both hardware and software TX rings @@ -94,9 +105,9 @@ static inline bool __efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue, static inline bool efx_nic_may_tx_pio(struct efx_tx_queue *tx_queue) { struct efx_tx_queue *partner = efx_tx_queue_partner(tx_queue); - return tx_queue->piobuf && - __efx_nic_tx_is_empty(tx_queue, tx_queue->insert_count) && - __efx_nic_tx_is_empty(partner, partner->insert_count); + + return tx_queue->piobuf && efx_nic_tx_is_empty(tx_queue) && + efx_nic_tx_is_empty(partner); } /* Decide whether to push a TX descriptor to the NIC vs merely writing @@ -332,6 +343,7 @@ enum { * @pio_write_base: Base address for writing PIO buffers * @pio_write_vi_base: Relative VI number for @pio_write_base * @piobuf_handle: Handle of each PIO buffer allocated + * @piobuf_size: size of a single PIO buffer * @must_restore_piobufs: Flag: PIO buffers have yet to be restored after MC * reboot * @rx_rss_context: Firmware handle for our RSS context @@ -357,6 +369,10 @@ enum { * @vport_mac: The MAC address on the vport, only for PFs; VFs will be zero * @vlan_list: List of VLANs added over the interface. Serialised by vlan_lock. * @vlan_lock: Lock to serialize access to vlan_list. + * @udp_tunnels: UDP tunnel port numbers and types. + * @udp_tunnels_dirty: flag indicating a reboot occurred while pushing + * @udp_tunnels to hardware and thus the push must be re-done. + * @udp_tunnels_lock: Serialises writes to @udp_tunnels and @udp_tunnels_dirty. */ struct efx_ef10_nic_data { struct efx_buffer mcdi_buf; @@ -369,6 +385,7 @@ struct efx_ef10_nic_data { void __iomem *wc_membase, *pio_write_base; unsigned int pio_write_vi_base; unsigned int piobuf_handle[EF10_TX_PIOBUF_COUNT]; + u16 piobuf_size; bool must_restore_piobufs; u32 rx_rss_context; bool rx_rss_context_exclusive; @@ -392,6 +409,9 @@ struct efx_ef10_nic_data { u8 vport_mac[ETH_ALEN]; struct list_head vlan_list; struct mutex vlan_lock; + struct efx_udp_tunnel udp_tunnels[16]; + bool udp_tunnels_dirty; + struct mutex udp_tunnels_lock; }; int efx_init_sriov(void); @@ -613,6 +633,7 @@ void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw); void efx_farch_init_common(struct efx_nic *efx); void efx_ef10_handle_drain_event(struct efx_nic *efx); void efx_farch_rx_push_indir_table(struct efx_nic *efx); +void efx_farch_rx_pull_indir_table(struct efx_nic *efx); int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, unsigned int len, gfp_t gfp_flags); diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index 5f4ad4f3518f..42443f434569 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -434,6 +434,7 @@ efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf, PKT_HASH_TYPE_L3); skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE); + skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL); for (;;) { skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, @@ -621,8 +622,10 @@ static void efx_rx_deliver(struct efx_channel *channel, u8 *eh, /* Set the SKB flags */ skb_checksum_none_assert(skb); - if (likely(rx_buf->flags & EFX_RX_PKT_CSUMMED)) + if (likely(rx_buf->flags & EFX_RX_PKT_CSUMMED)) { skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL); + } efx_rx_skb_attach_timestamp(channel, skb); @@ -665,8 +668,7 @@ void __efx_rx_packet(struct efx_channel *channel) if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM))) rx_buf->flags &= ~EFX_RX_PKT_CSUMMED; - if ((rx_buf->flags & EFX_RX_PKT_TCP) && !channel->type->receive_skb && - !efx_channel_busy_polling(channel)) + if ((rx_buf->flags & EFX_RX_PKT_TCP) && !channel->type->receive_skb) efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh); else efx_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags); diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c index cd38b44ae23a..dab286a337a6 100644 --- a/drivers/net/ethernet/sfc/selftest.c +++ b/drivers/net/ethernet/sfc/selftest.c @@ -768,7 +768,7 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests, __efx_reconfigure_port(efx); mutex_unlock(&efx->mac_lock); - netif_device_attach(efx->net_dev); + efx_device_attach_if_not_resetting(efx); return rc_test; } diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c index 4e54e5dc9fcb..a617f657eae3 100644 --- a/drivers/net/ethernet/sfc/siena.c +++ b/drivers/net/ethernet/sfc/siena.c @@ -326,18 +326,40 @@ fail5: efx_nic_free_buffer(efx, &efx->irq_status); fail4: fail3: + efx_mcdi_detach(efx); efx_mcdi_fini(efx); fail1: kfree(efx->nic_data); return rc; } +static int siena_rx_pull_rss_config(struct efx_nic *efx) +{ + efx_oword_t temp; + + /* Read from IPv6 RSS key as that's longer (the IPv4 key is just the + * first 128 bits of the same key, assuming it's been set by + * siena_rx_push_rss_config, below) + */ + efx_reado(efx, &temp, FR_CZ_RX_RSS_IPV6_REG1); + memcpy(efx->rx_hash_key, &temp, sizeof(temp)); + efx_reado(efx, &temp, FR_CZ_RX_RSS_IPV6_REG2); + memcpy(efx->rx_hash_key + sizeof(temp), &temp, sizeof(temp)); + efx_reado(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3); + memcpy(efx->rx_hash_key + 2 * sizeof(temp), &temp, + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8); + efx_farch_rx_pull_indir_table(efx); + return 0; +} + static int siena_rx_push_rss_config(struct efx_nic *efx, bool user, - const u32 *rx_indir_table) + const u32 *rx_indir_table, const u8 *key) { efx_oword_t temp; /* Set hash key for IPv4 */ + if (key) + memcpy(efx->rx_hash_key, key, sizeof(temp)); memcpy(&temp, efx->rx_hash_key, sizeof(temp)); efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY); @@ -402,7 +424,7 @@ static int siena_init_nic(struct efx_nic *efx) EFX_RX_USR_BUF_SIZE >> 5); efx_writeo(efx, &temp, FR_AZ_RX_CFG); - siena_rx_push_rss_config(efx, false, efx->rx_indir_table); + siena_rx_push_rss_config(efx, false, efx->rx_indir_table, NULL); efx->rss_active = true; /* Enable event logging */ @@ -429,6 +451,7 @@ static void siena_remove_nic(struct efx_nic *efx) efx_mcdi_reset(efx, RESET_TYPE_ALL); + efx_mcdi_detach(efx); efx_mcdi_fini(efx); /* Tear down the private nic state */ @@ -979,6 +1002,7 @@ const struct efx_nic_type siena_a0_nic_type = { .tx_write = efx_farch_tx_write, .tx_limit_len = efx_farch_tx_limit_len, .rx_push_rss_config = siena_rx_push_rss_config, + .rx_pull_rss_config = siena_rx_pull_rss_config, .rx_probe = efx_farch_rx_probe, .rx_init = efx_farch_rx_init, .rx_remove = efx_farch_rx_remove, @@ -1044,6 +1068,8 @@ const struct efx_nic_type siena_a0_nic_type = { .rx_hash_offset = FS_BZ_RX_PREFIX_HASH_OFST, .rx_buffer_padding = 0, .can_rx_scatter = true, + .option_descriptors = false, + .min_interrupt_mode = EFX_INT_MODE_LEGACY, .max_interrupt_mode = EFX_INT_MODE_MSIX, .timer_period_max = 1 << FRF_CZ_TC_TIMER_VAL_WIDTH, .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | @@ -1053,4 +1079,5 @@ const struct efx_nic_type siena_a0_nic_type = { .hwtstamp_filters = (1 << HWTSTAMP_FILTER_NONE | 1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT | 1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT), + .rx_hash_key_size = 16, }; diff --git a/drivers/net/ethernet/sfc/sriov.c b/drivers/net/ethernet/sfc/sriov.c index 9abcf4aded30..0b766fdbcddb 100644 --- a/drivers/net/ethernet/sfc/sriov.c +++ b/drivers/net/ethernet/sfc/sriov.c @@ -73,14 +73,3 @@ int efx_sriov_set_vf_link_state(struct net_device *net_dev, int vf_i, else return -EOPNOTSUPP; } - -int efx_sriov_get_phys_port_id(struct net_device *net_dev, - struct netdev_phys_item_id *ppid) -{ - struct efx_nic *efx = netdev_priv(net_dev); - - if (efx->type->sriov_get_phys_port_id) - return efx->type->sriov_get_phys_port_id(efx, ppid); - else - return -EOPNOTSUPP; -} diff --git a/drivers/net/ethernet/sfc/sriov.h b/drivers/net/ethernet/sfc/sriov.h index ba1762e7f216..84c7984edcaf 100644 --- a/drivers/net/ethernet/sfc/sriov.h +++ b/drivers/net/ethernet/sfc/sriov.h @@ -23,9 +23,6 @@ int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i, struct ifla_vf_info *ivi); int efx_sriov_set_vf_link_state(struct net_device *net_dev, int vf_i, int link_state); -int efx_sriov_get_phys_port_id(struct net_device *net_dev, - struct netdev_phys_item_id *ppid); - #endif /* CONFIG_SFC_SRIOV */ #endif /* EFX_SRIOV_H */ diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index 3c0151424d12..ff88d60aa6d5 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -28,7 +28,6 @@ #ifdef EFX_USE_PIO -#define EFX_PIOBUF_SIZE_MAX ER_DZ_TX_PIOBUF_SIZE #define EFX_PIOBUF_SIZE_DEF ALIGN(256, L1_CACHE_BYTES) unsigned int efx_piobuf_size __read_mostly = EFX_PIOBUF_SIZE_DEF; @@ -817,6 +816,7 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue) tx_queue->insert_count = 0; tx_queue->write_count = 0; + tx_queue->packet_write_count = 0; tx_queue->old_write_count = 0; tx_queue->read_count = 0; tx_queue->old_read_count = 0; diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c index d390b9663dc3..57e6cef81ebe 100644 --- a/drivers/net/ethernet/sgi/ioc3-eth.c +++ b/drivers/net/ethernet/sgi/ioc3-eth.c @@ -914,7 +914,7 @@ static void ioc3_alloc_rings(struct net_device *dev) skb = ioc3_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC); if (!skb) { - show_free_areas(0); + show_free_areas(0, NULL); continue; } diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c index 69d2d30e5ef1..ea55abd62ec7 100644 --- a/drivers/net/ethernet/sgi/meth.c +++ b/drivers/net/ethernet/sgi/meth.c @@ -854,7 +854,7 @@ static int meth_probe(struct platform_device *pdev) return 0; } -static int __exit meth_remove(struct platform_device *pdev) +static int meth_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); @@ -866,7 +866,7 @@ static int __exit meth_remove(struct platform_device *pdev) static struct platform_driver meth_driver = { .probe = meth_probe, - .remove = __exit_p(meth_remove), + .remove = meth_remove, .driver = { .name = "meth", } diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c index 19a458716f1a..1b6f6171d078 100644 --- a/drivers/net/ethernet/sis/sis900.c +++ b/drivers/net/ethernet/sis/sis900.c @@ -176,7 +176,7 @@ struct sis900_private { u32 msg_enable; - unsigned int cur_rx, dirty_rx; /* producer/comsumer pointers for Tx/Rx ring */ + unsigned int cur_rx, dirty_rx; /* producer/consumer pointers for Tx/Rx ring */ unsigned int cur_tx, dirty_tx; /* The saved address of a sent/receive-in-place packet buffer */ diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c index 55a95e1d69d6..5f2737189c72 100644 --- a/drivers/net/ethernet/smsc/epic100.c +++ b/drivers/net/ethernet/smsc/epic100.c @@ -264,7 +264,6 @@ struct epic_private { spinlock_t lock; /* Group with Tx control cache line. */ spinlock_t napi_lock; struct napi_struct napi; - unsigned int reschedule_in_poll; unsigned int cur_tx, dirty_tx; unsigned int cur_rx, dirty_rx; @@ -400,7 +399,6 @@ static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) spin_lock_init(&ep->lock); spin_lock_init(&ep->napi_lock); - ep->reschedule_in_poll = 0; /* Bring the chip out of low-power mode. */ ew32(GENCTL, 0x4200); @@ -1086,13 +1084,12 @@ static irqreturn_t epic_interrupt(int irq, void *dev_instance) handled = 1; - if ((status & EpicNapiEvent) && !ep->reschedule_in_poll) { + if (status & EpicNapiEvent) { spin_lock(&ep->napi_lock); if (napi_schedule_prep(&ep->napi)) { epic_napi_irq_off(dev, ep); __napi_schedule(&ep->napi); - } else - ep->reschedule_in_poll++; + } spin_unlock(&ep->napi_lock); } status &= ~EpicNapiEvent; @@ -1248,37 +1245,23 @@ static int epic_poll(struct napi_struct *napi, int budget) { struct epic_private *ep = container_of(napi, struct epic_private, napi); struct net_device *dev = ep->mii.dev; - int work_done = 0; void __iomem *ioaddr = ep->ioaddr; - -rx_action: + int work_done; epic_tx(dev, ep); - work_done += epic_rx(dev, budget); + work_done = epic_rx(dev, budget); epic_rx_err(dev, ep); - if (work_done < budget) { + if (work_done < budget && napi_complete_done(napi, work_done)) { unsigned long flags; - int more; - - /* A bit baroque but it avoids a (space hungry) spin_unlock */ spin_lock_irqsave(&ep->napi_lock, flags); - more = ep->reschedule_in_poll; - if (!more) { - __napi_complete(napi); - ew32(INTSTAT, EpicNapiEvent); - epic_napi_irq_on(dev, ep); - } else - ep->reschedule_in_poll--; - + ew32(INTSTAT, EpicNapiEvent); + epic_napi_irq_on(dev, ep); spin_unlock_irqrestore(&ep->napi_lock, flags); - - if (more) - goto rx_action; } return work_done; diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c index 67154621abcf..97280daba27f 100644 --- a/drivers/net/ethernet/smsc/smc91c92_cs.c +++ b/drivers/net/ethernet/smsc/smc91c92_cs.c @@ -113,6 +113,7 @@ struct smc_private { struct mii_if_info mii_if; int duplex; int rx_ovrn; + unsigned long last_rx; }; /* Special definitions for Megahertz multifunction cards */ @@ -1491,6 +1492,7 @@ static void smc_rx(struct net_device *dev) if (!(rx_status & RS_ERRORS)) { /* do stuff to make a new packet */ struct sk_buff *skb; + struct smc_private *smc = netdev_priv(dev); /* Note: packet_length adds 5 or 6 extra bytes here! */ skb = netdev_alloc_skb(dev, packet_length+2); @@ -1509,7 +1511,7 @@ static void smc_rx(struct net_device *dev) skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); - dev->last_rx = jiffies; + smc->last_rx = jiffies; dev->stats.rx_packets++; dev->stats.rx_bytes += packet_length; if (rx_status & RS_MULTICAST) @@ -1790,7 +1792,7 @@ static void media_check(u_long arg) } /* Ignore collisions unless we've had no rx's recently */ - if (time_after(jiffies, dev->last_rx + HZ)) { + if (time_after(jiffies, smc->last_rx + HZ)) { if (smc->tx_err || (smc->media_status & EPH_16COL)) media |= EPH_16COL; } diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c index 3174aebb322f..2fa3c1d03abc 100644 --- a/drivers/net/ethernet/smsc/smsc9420.c +++ b/drivers/net/ethernet/smsc/smsc9420.c @@ -861,7 +861,7 @@ static int smsc9420_rx_poll(struct napi_struct *napi, int budget) smsc9420_pci_flush_write(pd); if (work_done < budget) { - napi_complete(&pd->napi); + napi_complete_done(&pd->napi, work_done); /* re-enable RX DMA interrupts */ dma_intr_ena = smsc9420_reg_read(pd, DMAC_INTR_ENA); diff --git a/drivers/net/ethernet/stmicro/Kconfig b/drivers/net/ethernet/stmicro/Kconfig index 1c1157d2bd40..ecd7a5edef5d 100644 --- a/drivers/net/ethernet/stmicro/Kconfig +++ b/drivers/net/ethernet/stmicro/Kconfig @@ -7,7 +7,8 @@ config NET_VENDOR_STMICRO default y depends on HAS_IOMEM ---help--- - If you have a network (Ethernet) card belonging to this class, say Y. + If you have a network (Ethernet) card based on Synopsys Ethernet IP + Cores, say Y. Note that the answer to this question doesn't directly affect the kernel: saying N will just cause the configurator to skip all diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index ab66248a4b78..cfbe3634dfa1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig @@ -1,5 +1,5 @@ config STMMAC_ETH - tristate "STMicroelectronics 10/100/1000 Ethernet driver" + tristate "STMicroelectronics 10/100/1000/EQOS Ethernet driver" depends on HAS_IOMEM && HAS_DMA select MII select PHYLIB @@ -7,9 +7,8 @@ config STMMAC_ETH imply PTP_1588_CLOCK select RESET_CONTROLLER ---help--- - This is the driver for the Ethernet IPs are built around a - Synopsys IP Core and only tested on the STMicroelectronics - platforms. + This is the driver for the Ethernet IPs built around a + Synopsys IP Core. if STMMAC_ETH @@ -29,6 +28,15 @@ config STMMAC_PLATFORM if STMMAC_PLATFORM +config DWMAC_DWC_QOS_ETH + tristate "Support for snps,dwc-qos-ethernet.txt DT binding." + select PHYLIB + select CRC32 + select MII + depends on OF && HAS_DMA + help + Support for chips using the snps,dwc-qos-ethernet.txt DT binding. + config DWMAC_GENERIC tristate "Generic driver for DWMAC" default STMMAC_PLATFORM @@ -143,11 +151,11 @@ config STMMAC_PCI tristate "STMMAC PCI bus support" depends on STMMAC_ETH && PCI ---help--- - This is to select the Synopsys DWMAC available on PCI devices, - if you have a controller with this interface, say Y or M here. + This selects the platform specific bus support for the stmmac driver. + This driver was tested on XLINX XC2V3000 FF1152AMT0221 + D1215994A VIRTEX FPGA board and SNPS QoS IPK Prototyping Kit. - This PCI support is tested on XLINX XC2V3000 FF1152AMT0221 - D1215994A VIRTEX FPGA board. + If you have a controller with this interface, say Y or M here. If unsure, say N. endif diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile index 8f83a86ba13c..700c60336674 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Makefile +++ b/drivers/net/ethernet/stmicro/stmmac/Makefile @@ -16,6 +16,7 @@ obj-$(CONFIG_DWMAC_SOCFPGA) += dwmac-altr-socfpga.o obj-$(CONFIG_DWMAC_STI) += dwmac-sti.o obj-$(CONFIG_DWMAC_STM32) += dwmac-stm32.o obj-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o +obj-$(CONFIG_DWMAC_DWC_QOS_ETH) += dwmac-dwc-qos-eth.o obj-$(CONFIG_DWMAC_GENERIC) += dwmac-generic.o stmmac-platform-objs:= stmmac_platform.o dwmac-altr-socfpga-objs := altr_tse_pcs.o dwmac-socfpga.o diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c index 026e8e9cb942..01a8c020d6db 100644 --- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c @@ -16,10 +16,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index b13a144f72ad..04d9245b7149 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -12,10 +12,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -71,7 +67,7 @@ struct stmmac_extra_stats { unsigned long overflow_error; unsigned long ipc_csum_error; unsigned long rx_collision; - unsigned long rx_crc; + unsigned long rx_crc_errors; unsigned long dribbling_bit; unsigned long rx_length; unsigned long rx_mii; @@ -323,6 +319,9 @@ struct dma_features { /* TX and RX number of channels */ unsigned int number_rx_channel; unsigned int number_tx_channel; + /* TX and RX number of queues */ + unsigned int number_rx_queues; + unsigned int number_tx_queues; /* Alternate (enhanced) DESC mode */ unsigned int enh_desc; }; @@ -340,7 +339,7 @@ struct dma_features { /* Common MAC defines */ #define MAC_CTRL_REG 0x00000000 /* MAC Control */ #define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */ -#define MAC_RNABLE_RX 0x00000004 /* Receiver Enable */ +#define MAC_ENABLE_RX 0x00000004 /* Receiver Enable */ /* Default LPI timers */ #define STMMAC_DEFAULT_LIT_LS 0x3E8 @@ -417,7 +416,7 @@ struct stmmac_dma_ops { /* Configure the AXI Bus Mode Register */ void (*axi)(void __iomem *ioaddr, struct stmmac_axi *axi); /* Dump DMA registers */ - void (*dump_regs) (void __iomem *ioaddr); + void (*dump_regs)(void __iomem *ioaddr, u32 *reg_space); /* Set tx/rx threshold in the csr6 register * An invalid value enables the store-and-forward mode */ void (*dma_mode)(void __iomem *ioaddr, int txmode, int rxmode, @@ -454,8 +453,10 @@ struct stmmac_ops { void (*core_init)(struct mac_device_info *hw, int mtu); /* Enable and verify that the IPC module is supported */ int (*rx_ipc)(struct mac_device_info *hw); + /* Enable RX Queues */ + void (*rx_queue_enable)(struct mac_device_info *hw, u32 queue); /* Dump MAC registers */ - void (*dump_regs)(struct mac_device_info *hw); + void (*dump_regs)(struct mac_device_info *hw, u32 *reg_space); /* Handle extra events on specific interrupts hw dependent */ int (*host_irq_status)(struct mac_device_info *hw, struct stmmac_extra_stats *x); @@ -471,7 +472,8 @@ struct stmmac_ops { unsigned int reg_n); void (*get_umac_addr)(struct mac_device_info *hw, unsigned char *addr, unsigned int reg_n); - void (*set_eee_mode)(struct mac_device_info *hw); + void (*set_eee_mode)(struct mac_device_info *hw, + bool en_tx_lpi_clockgating); void (*reset_eee_mode)(struct mac_device_info *hw); void (*set_eee_timer)(struct mac_device_info *hw, int ls, int tw); void (*set_eee_pls)(struct mac_device_info *hw, int link); diff --git a/drivers/net/ethernet/stmicro/stmmac/descs.h b/drivers/net/ethernet/stmicro/stmmac/descs.h index faeeef75d7f1..0c2432b1ce67 100644 --- a/drivers/net/ethernet/stmicro/stmmac/descs.h +++ b/drivers/net/ethernet/stmicro/stmmac/descs.h @@ -11,10 +11,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". diff --git a/drivers/net/ethernet/stmicro/stmmac/descs_com.h b/drivers/net/ethernet/stmicro/stmmac/descs_com.h index 1d181e205d6e..ca9d7e48034c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/descs_com.h +++ b/drivers/net/ethernet/stmicro/stmmac/descs_com.h @@ -17,10 +17,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c new file mode 100644 index 000000000000..1a3fa3d9f855 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c @@ -0,0 +1,202 @@ +/* + * Synopsys DWC Ethernet Quality-of-Service v4.10a linux driver + * + * Copyright (C) 2016 Joao Pinto <jpinto@synopsys.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/clk.h> +#include <linux/clk-provider.h> +#include <linux/device.h> +#include <linux/ethtool.h> +#include <linux/io.h> +#include <linux/ioport.h> +#include <linux/module.h> +#include <linux/of_net.h> +#include <linux/mfd/syscon.h> +#include <linux/platform_device.h> +#include <linux/stmmac.h> + +#include "stmmac_platform.h" + +static int dwc_eth_dwmac_config_dt(struct platform_device *pdev, + struct plat_stmmacenet_data *plat_dat) +{ + struct device_node *np = pdev->dev.of_node; + u32 burst_map = 0; + u32 bit_index = 0; + u32 a_index = 0; + + if (!plat_dat->axi) { + plat_dat->axi = kzalloc(sizeof(struct stmmac_axi), GFP_KERNEL); + + if (!plat_dat->axi) + return -ENOMEM; + } + + plat_dat->axi->axi_lpi_en = of_property_read_bool(np, "snps,en-lpi"); + if (of_property_read_u32(np, "snps,write-requests", + &plat_dat->axi->axi_wr_osr_lmt)) { + /** + * Since the register has a reset value of 1, if property + * is missing, default to 1. + */ + plat_dat->axi->axi_wr_osr_lmt = 1; + } else { + /** + * If property exists, to keep the behavior from dwc_eth_qos, + * subtract one after parsing. + */ + plat_dat->axi->axi_wr_osr_lmt--; + } + + if (of_property_read_u32(np, "read,read-requests", + &plat_dat->axi->axi_rd_osr_lmt)) { + /** + * Since the register has a reset value of 1, if property + * is missing, default to 1. + */ + plat_dat->axi->axi_rd_osr_lmt = 1; + } else { + /** + * If property exists, to keep the behavior from dwc_eth_qos, + * subtract one after parsing. + */ + plat_dat->axi->axi_rd_osr_lmt--; + } + of_property_read_u32(np, "snps,burst-map", &burst_map); + + /* converts burst-map bitmask to burst array */ + for (bit_index = 0; bit_index < 7; bit_index++) { + if (burst_map & (1 << bit_index)) { + switch (bit_index) { + case 0: + plat_dat->axi->axi_blen[a_index] = 4; break; + case 1: + plat_dat->axi->axi_blen[a_index] = 8; break; + case 2: + plat_dat->axi->axi_blen[a_index] = 16; break; + case 3: + plat_dat->axi->axi_blen[a_index] = 32; break; + case 4: + plat_dat->axi->axi_blen[a_index] = 64; break; + case 5: + plat_dat->axi->axi_blen[a_index] = 128; break; + case 6: + plat_dat->axi->axi_blen[a_index] = 256; break; + default: + break; + } + a_index++; + } + } + + /* dwc-qos needs GMAC4, AAL, TSO and PMT */ + plat_dat->has_gmac4 = 1; + plat_dat->dma_cfg->aal = 1; + plat_dat->tso_en = 1; + plat_dat->pmt = 1; + + return 0; +} + +static int dwc_eth_dwmac_probe(struct platform_device *pdev) +{ + struct plat_stmmacenet_data *plat_dat; + struct stmmac_resources stmmac_res; + struct resource *res; + int ret; + + memset(&stmmac_res, 0, sizeof(struct stmmac_resources)); + + /** + * Since stmmac_platform supports name IRQ only, basic platform + * resource initialization is done in the glue logic. + */ + stmmac_res.irq = platform_get_irq(pdev, 0); + if (stmmac_res.irq < 0) { + if (stmmac_res.irq != -EPROBE_DEFER) + dev_err(&pdev->dev, + "IRQ configuration information not found\n"); + + return stmmac_res.irq; + } + stmmac_res.wol_irq = stmmac_res.irq; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + stmmac_res.addr = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(stmmac_res.addr)) + return PTR_ERR(stmmac_res.addr); + + plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + if (IS_ERR(plat_dat)) + return PTR_ERR(plat_dat); + + plat_dat->stmmac_clk = devm_clk_get(&pdev->dev, "apb_pclk"); + if (IS_ERR(plat_dat->stmmac_clk)) { + dev_err(&pdev->dev, "apb_pclk clock not found.\n"); + ret = PTR_ERR(plat_dat->stmmac_clk); + plat_dat->stmmac_clk = NULL; + goto err_remove_config_dt; + } + clk_prepare_enable(plat_dat->stmmac_clk); + + plat_dat->pclk = devm_clk_get(&pdev->dev, "phy_ref_clk"); + if (IS_ERR(plat_dat->pclk)) { + dev_err(&pdev->dev, "phy_ref_clk clock not found.\n"); + ret = PTR_ERR(plat_dat->pclk); + plat_dat->pclk = NULL; + goto err_out_clk_dis_phy; + } + clk_prepare_enable(plat_dat->pclk); + + ret = dwc_eth_dwmac_config_dt(pdev, plat_dat); + if (ret) + goto err_out_clk_dis_aper; + + ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); + if (ret) + goto err_out_clk_dis_aper; + + return 0; + +err_out_clk_dis_aper: + clk_disable_unprepare(plat_dat->pclk); +err_out_clk_dis_phy: + clk_disable_unprepare(plat_dat->stmmac_clk); +err_remove_config_dt: + stmmac_remove_config_dt(pdev, plat_dat); + + return ret; +} + +static int dwc_eth_dwmac_remove(struct platform_device *pdev) +{ + return stmmac_pltfr_remove(pdev); +} + +static const struct of_device_id dwc_eth_dwmac_match[] = { + { .compatible = "snps,dwc-qos-ethernet-4.10", }, + { } +}; +MODULE_DEVICE_TABLE(of, dwc_eth_dwmac_match); + +static struct platform_driver dwc_eth_dwmac_driver = { + .probe = dwc_eth_dwmac_probe, + .remove = dwc_eth_dwmac_remove, + .driver = { + .name = "dwc-eth-dwmac", + .of_match_table = dwc_eth_dwmac_match, + }, +}; +module_platform_driver(dwc_eth_dwmac_driver); + +MODULE_AUTHOR("Joao Pinto <jpinto@synopsys.com>"); +MODULE_DESCRIPTION("Synopsys DWC Ethernet Quality-of-Service v4.10a driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c index ffaed1f35efe..9685555932ea 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c @@ -35,10 +35,6 @@ #define PRG_ETH0_TXDLY_SHIFT 5 #define PRG_ETH0_TXDLY_MASK GENMASK(6, 5) -#define PRG_ETH0_TXDLY_OFF (0x0 << PRG_ETH0_TXDLY_SHIFT) -#define PRG_ETH0_TXDLY_QUARTER (0x1 << PRG_ETH0_TXDLY_SHIFT) -#define PRG_ETH0_TXDLY_HALF (0x2 << PRG_ETH0_TXDLY_SHIFT) -#define PRG_ETH0_TXDLY_THREE_QUARTERS (0x3 << PRG_ETH0_TXDLY_SHIFT) /* divider for the result of m250_sel */ #define PRG_ETH0_CLK_M250_DIV_SHIFT 7 @@ -69,6 +65,8 @@ struct meson8b_dwmac { struct clk_divider m25_div; struct clk *m25_div_clk; + + u32 tx_delay_ns; }; static void meson8b_dwmac_mask_bits(struct meson8b_dwmac *dwmac, u32 reg, @@ -179,11 +177,19 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac) { int ret; unsigned long clk_rate; + u8 tx_dly_val = 0; switch (dwmac->phy_mode) { case PHY_INTERFACE_MODE_RGMII: - case PHY_INTERFACE_MODE_RGMII_ID: case PHY_INTERFACE_MODE_RGMII_RXID: + /* TX clock delay in ns = "8ns / 4 * tx_dly_val" (where + * 8ns are exactly one cycle of the 125MHz RGMII TX clock): + * 0ns = 0x0, 2ns = 0x1, 4ns = 0x2, 6ns = 0x3 + */ + tx_dly_val = dwmac->tx_delay_ns >> 1; + /* fall through */ + + case PHY_INTERFACE_MODE_RGMII_ID: case PHY_INTERFACE_MODE_RGMII_TXID: /* Generate a 25MHz clock for the PHY */ clk_rate = 25 * 1000 * 1000; @@ -196,9 +202,8 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac) meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_INVERTED_RMII_CLK, 0); - /* TX clock delay - all known boards use a 1/4 cycle delay */ meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_TXDLY_MASK, - PRG_ETH0_TXDLY_QUARTER); + tx_dly_val << PRG_ETH0_TXDLY_SHIFT); break; case PHY_INTERFACE_MODE_RMII: @@ -284,6 +289,11 @@ static int meson8b_dwmac_probe(struct platform_device *pdev) goto err_remove_config_dt; } + /* use 2ns as fallback since this value was previously hardcoded */ + if (of_property_read_u32(pdev->dev.of_node, "amlogic,tx-delay-ns", + &dwmac->tx_delay_ns)) + dwmac->tx_delay_ns = 2; + ret = meson8b_init_clk(dwmac); if (ret) goto err_remove_config_dt; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c index fa6e9704c077..e5db6ac36235 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c @@ -302,6 +302,122 @@ static const struct rk_gmac_ops rk3288_ops = { .set_rmii_speed = rk3288_set_rmii_speed, }; +#define RK3328_GRF_MAC_CON0 0x0900 +#define RK3328_GRF_MAC_CON1 0x0904 + +/* RK3328_GRF_MAC_CON0 */ +#define RK3328_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 7) +#define RK3328_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0) + +/* RK3328_GRF_MAC_CON1 */ +#define RK3328_GMAC_PHY_INTF_SEL_RGMII \ + (GRF_BIT(4) | GRF_CLR_BIT(5) | GRF_CLR_BIT(6)) +#define RK3328_GMAC_PHY_INTF_SEL_RMII \ + (GRF_CLR_BIT(4) | GRF_CLR_BIT(5) | GRF_BIT(6)) +#define RK3328_GMAC_FLOW_CTRL GRF_BIT(3) +#define RK3328_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(3) +#define RK3328_GMAC_SPEED_10M GRF_CLR_BIT(2) +#define RK3328_GMAC_SPEED_100M GRF_BIT(2) +#define RK3328_GMAC_RMII_CLK_25M GRF_BIT(7) +#define RK3328_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(7) +#define RK3328_GMAC_CLK_125M (GRF_CLR_BIT(11) | GRF_CLR_BIT(12)) +#define RK3328_GMAC_CLK_25M (GRF_BIT(11) | GRF_BIT(12)) +#define RK3328_GMAC_CLK_2_5M (GRF_CLR_BIT(11) | GRF_BIT(12)) +#define RK3328_GMAC_RMII_MODE GRF_BIT(9) +#define RK3328_GMAC_RMII_MODE_CLR GRF_CLR_BIT(9) +#define RK3328_GMAC_TXCLK_DLY_ENABLE GRF_BIT(0) +#define RK3328_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(0) +#define RK3328_GMAC_RXCLK_DLY_ENABLE GRF_BIT(1) +#define RK3328_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(0) + +static void rk3328_set_to_rgmii(struct rk_priv_data *bsp_priv, + int tx_delay, int rx_delay) +{ + struct device *dev = &bsp_priv->pdev->dev; + + if (IS_ERR(bsp_priv->grf)) { + dev_err(dev, "Missing rockchip,grf property\n"); + return; + } + + regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1, + RK3328_GMAC_PHY_INTF_SEL_RGMII | + RK3328_GMAC_RMII_MODE_CLR | + RK3328_GMAC_RXCLK_DLY_ENABLE | + RK3328_GMAC_TXCLK_DLY_ENABLE); + + regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON0, + RK3328_GMAC_CLK_RX_DL_CFG(rx_delay) | + RK3328_GMAC_CLK_TX_DL_CFG(tx_delay)); +} + +static void rk3328_set_to_rmii(struct rk_priv_data *bsp_priv) +{ + struct device *dev = &bsp_priv->pdev->dev; + + if (IS_ERR(bsp_priv->grf)) { + dev_err(dev, "Missing rockchip,grf property\n"); + return; + } + + regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1, + RK3328_GMAC_PHY_INTF_SEL_RMII | + RK3328_GMAC_RMII_MODE); + + /* set MAC to RMII mode */ + regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1, GRF_BIT(11)); +} + +static void rk3328_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed) +{ + struct device *dev = &bsp_priv->pdev->dev; + + if (IS_ERR(bsp_priv->grf)) { + dev_err(dev, "Missing rockchip,grf property\n"); + return; + } + + if (speed == 10) + regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1, + RK3328_GMAC_CLK_2_5M); + else if (speed == 100) + regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1, + RK3328_GMAC_CLK_25M); + else if (speed == 1000) + regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1, + RK3328_GMAC_CLK_125M); + else + dev_err(dev, "unknown speed value for RGMII! speed=%d", speed); +} + +static void rk3328_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed) +{ + struct device *dev = &bsp_priv->pdev->dev; + + if (IS_ERR(bsp_priv->grf)) { + dev_err(dev, "Missing rockchip,grf property\n"); + return; + } + + if (speed == 10) + regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1, + RK3328_GMAC_RMII_CLK_2_5M | + RK3328_GMAC_SPEED_10M); + else if (speed == 100) + regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1, + RK3328_GMAC_RMII_CLK_25M | + RK3328_GMAC_SPEED_100M); + else + dev_err(dev, "unknown speed value for RMII! speed=%d", speed); +} + +static const struct rk_gmac_ops rk3328_ops = { + .set_to_rgmii = rk3328_set_to_rgmii, + .set_to_rmii = rk3328_set_to_rmii, + .set_rgmii_speed = rk3328_set_rgmii_speed, + .set_rmii_speed = rk3328_set_rmii_speed, +}; + #define RK3366_GRF_SOC_CON6 0x0418 #define RK3366_GRF_SOC_CON7 0x041c @@ -1006,6 +1122,7 @@ static SIMPLE_DEV_PM_OPS(rk_gmac_pm_ops, rk_gmac_suspend, rk_gmac_resume); static const struct of_device_id rk_gmac_dwmac_match[] = { { .compatible = "rockchip,rk3228-gmac", .data = &rk3228_ops }, { .compatible = "rockchip,rk3288-gmac", .data = &rk3288_ops }, + { .compatible = "rockchip,rk3328-gmac", .data = &rk3328_ops }, { .compatible = "rockchip,rk3366-gmac", .data = &rk3366_ops }, { .compatible = "rockchip,rk3368-gmac", .data = &rk3368_ops }, { .compatible = "rockchip,rk3399-gmac", .data = &rk3399_ops }, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c index 1f997027ae51..17d4bbaeb65c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c @@ -341,7 +341,7 @@ static int socfpga_dwmac_probe(struct platform_device *pdev) * mode. Create a copy of the core reset handle so it can be used by * the driver later. */ - dwmac->stmmac_rst = stpriv->stmmac_rst; + dwmac->stmmac_rst = stpriv->plat->stmmac_rst; ret = socfpga_dwmac_set_phy_mode(dwmac); if (ret) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100.h b/drivers/net/ethernet/stmicro/stmmac/dwmac100.h index 1657acfa70c2..e14984814041 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac100.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100.h @@ -12,10 +12,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h index 52b9407a8a39..c02d36629c52 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h @@ -10,10 +10,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c index 5484fd726d5a..19b9b3087099 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c @@ -16,10 +16,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -96,17 +92,13 @@ static int dwmac1000_rx_ipc_enable(struct mac_device_info *hw) return !!(value & GMAC_CONTROL_IPC); } -static void dwmac1000_dump_regs(struct mac_device_info *hw) +static void dwmac1000_dump_regs(struct mac_device_info *hw, u32 *reg_space) { void __iomem *ioaddr = hw->pcsr; int i; - pr_info("\tDWMAC1000 regs (base addr = 0x%p)\n", ioaddr); - for (i = 0; i < 55; i++) { - int offset = i * 4; - pr_info("\tReg No. %d (offset 0x%x): 0x%08x\n", i, - offset, readl(ioaddr + offset)); - } + for (i = 0; i < 55; i++) + reg_space[i] = readl(ioaddr + i * 4); } static void dwmac1000_set_umac_addr(struct mac_device_info *hw, @@ -347,11 +339,14 @@ static int dwmac1000_irq_status(struct mac_device_info *hw, return ret; } -static void dwmac1000_set_eee_mode(struct mac_device_info *hw) +static void dwmac1000_set_eee_mode(struct mac_device_info *hw, + bool en_tx_lpi_clockgating) { void __iomem *ioaddr = hw->pcsr; u32 value; + /*TODO - en_tx_lpi_clockgating treatment */ + /* Enable the link status receive on RGMII, SGMII ore SMII * receive path and instruct the transmit to enter in LPI * state. diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c index 612d3aaac9a4..d3654a447046 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c @@ -16,10 +16,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -205,18 +201,14 @@ static void dwmac1000_dma_operation_mode(void __iomem *ioaddr, int txmode, writel(csr6, ioaddr + DMA_CONTROL); } -static void dwmac1000_dump_dma_regs(void __iomem *ioaddr) +static void dwmac1000_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space) { int i; - pr_info(" DMA registers\n"); - for (i = 0; i < 22; i++) { - if ((i < 9) || (i > 17)) { - int offset = i * 4; - pr_err("\t Reg No. %d (offset 0x%x): 0x%08x\n", i, - (DMA_BUS_MODE + offset), - readl(ioaddr + DMA_BUS_MODE + offset)); - } - } + + for (i = 0; i < 22; i++) + if ((i < 9) || (i > 17)) + reg_space[DMA_BUS_MODE / 4 + i] = + readl(ioaddr + DMA_BUS_MODE + i * 4); } static void dwmac1000_get_hw_feature(void __iomem *ioaddr, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c index 9dd2987e284d..e370ccec6176 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c @@ -18,10 +18,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -44,28 +40,18 @@ static void dwmac100_core_init(struct mac_device_info *hw, int mtu) #endif } -static void dwmac100_dump_mac_regs(struct mac_device_info *hw) +static void dwmac100_dump_mac_regs(struct mac_device_info *hw, u32 *reg_space) { void __iomem *ioaddr = hw->pcsr; - pr_info("\t----------------------------------------------\n" - "\t DWMAC 100 CSR (base addr = 0x%p)\n" - "\t----------------------------------------------\n", ioaddr); - pr_info("\tcontrol reg (offset 0x%x): 0x%08x\n", MAC_CONTROL, - readl(ioaddr + MAC_CONTROL)); - pr_info("\taddr HI (offset 0x%x): 0x%08x\n ", MAC_ADDR_HIGH, - readl(ioaddr + MAC_ADDR_HIGH)); - pr_info("\taddr LO (offset 0x%x): 0x%08x\n", MAC_ADDR_LOW, - readl(ioaddr + MAC_ADDR_LOW)); - pr_info("\tmulticast hash HI (offset 0x%x): 0x%08x\n", - MAC_HASH_HIGH, readl(ioaddr + MAC_HASH_HIGH)); - pr_info("\tmulticast hash LO (offset 0x%x): 0x%08x\n", - MAC_HASH_LOW, readl(ioaddr + MAC_HASH_LOW)); - pr_info("\tflow control (offset 0x%x): 0x%08x\n", - MAC_FLOW_CTRL, readl(ioaddr + MAC_FLOW_CTRL)); - pr_info("\tVLAN1 tag (offset 0x%x): 0x%08x\n", MAC_VLAN1, - readl(ioaddr + MAC_VLAN1)); - pr_info("\tVLAN2 tag (offset 0x%x): 0x%08x\n", MAC_VLAN2, - readl(ioaddr + MAC_VLAN2)); + + reg_space[MAC_CONTROL / 4] = readl(ioaddr + MAC_CONTROL); + reg_space[MAC_ADDR_HIGH / 4] = readl(ioaddr + MAC_ADDR_HIGH); + reg_space[MAC_ADDR_LOW / 4] = readl(ioaddr + MAC_ADDR_LOW); + reg_space[MAC_HASH_HIGH / 4] = readl(ioaddr + MAC_HASH_HIGH); + reg_space[MAC_HASH_LOW / 4] = readl(ioaddr + MAC_HASH_LOW); + reg_space[MAC_FLOW_CTRL / 4] = readl(ioaddr + MAC_FLOW_CTRL); + reg_space[MAC_VLAN1 / 4] = readl(ioaddr + MAC_VLAN1); + reg_space[MAC_VLAN2 / 4] = readl(ioaddr + MAC_VLAN2); } static int dwmac100_rx_ipc_enable(struct mac_device_info *hw) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c index e5664da382f3..eef2f222ce9a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c @@ -18,10 +18,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -70,19 +66,18 @@ static void dwmac100_dma_operation_mode(void __iomem *ioaddr, int txmode, writel(csr6, ioaddr + DMA_CONTROL); } -static void dwmac100_dump_dma_regs(void __iomem *ioaddr) +static void dwmac100_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space) { int i; - pr_debug("DWMAC 100 DMA CSR\n"); for (i = 0; i < 9; i++) - pr_debug("\t CSR%d (offset 0x%x): 0x%08x\n", i, - (DMA_BUS_MODE + i * 4), - readl(ioaddr + DMA_BUS_MODE + i * 4)); + reg_space[DMA_BUS_MODE / 4 + i] = + readl(ioaddr + DMA_BUS_MODE + i * 4); - pr_debug("\tCSR20 (0x%x): 0x%08x, CSR21 (0x%x): 0x%08x\n", - DMA_CUR_TX_BUF_ADDR, readl(ioaddr + DMA_CUR_TX_BUF_ADDR), - DMA_CUR_RX_BUF_ADDR, readl(ioaddr + DMA_CUR_RX_BUF_ADDR)); + reg_space[DMA_CUR_TX_BUF_ADDR / 4] = + readl(ioaddr + DMA_CUR_TX_BUF_ADDR); + reg_space[DMA_CUR_RX_BUF_ADDR / 4] = + readl(ioaddr + DMA_CUR_RX_BUF_ADDR); } /* DMA controller has two counters to track the number of the missed frames. */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h index 3e8d4fefa5e0..db45134fddf0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h @@ -22,6 +22,7 @@ #define GMAC_HASH_TAB_32_63 0x00000014 #define GMAC_RX_FLOW_CTRL 0x00000090 #define GMAC_QX_TX_FLOW_CTRL(x) (0x70 + x * 4) +#define GMAC_RXQ_CTRL0 0x000000a0 #define GMAC_INT_STATUS 0x000000b0 #define GMAC_INT_EN 0x000000b4 #define GMAC_PCS_BASE 0x000000e0 @@ -44,6 +45,11 @@ #define GMAC_MAX_PERFECT_ADDRESSES 128 +/* MAC RX Queue Enable */ +#define GMAC_RX_QUEUE_CLEAR(queue) ~(GENMASK(1, 0) << ((queue) * 2)) +#define GMAC_RX_AV_QUEUE_ENABLE(queue) BIT((queue) * 2) +#define GMAC_RX_DCB_QUEUE_ENABLE(queue) BIT(((queue) * 2) + 1) + /* MAC Flow Control RX */ #define GMAC_RX_FLOW_CTRL_RFE BIT(0) @@ -84,6 +90,19 @@ enum power_event { power_down = 0x00000001, }; +/* Energy Efficient Ethernet (EEE) for GMAC4 + * + * LPI status, timer and control register offset + */ +#define GMAC4_LPI_CTRL_STATUS 0xd0 +#define GMAC4_LPI_TIMER_CTRL 0xd4 + +/* LPI control and status defines */ +#define GMAC4_LPI_CTRL_STATUS_LPITCSE BIT(21) /* LPI Tx Clock Stop Enable */ +#define GMAC4_LPI_CTRL_STATUS_LPITXA BIT(19) /* Enable LPI TX Automate */ +#define GMAC4_LPI_CTRL_STATUS_PLS BIT(17) /* PHY Link Status */ +#define GMAC4_LPI_CTRL_STATUS_LPIEN BIT(16) /* LPI Enable */ + /* MAC Debug bitmap */ #define GMAC_DEBUG_TFCSTS_MASK GENMASK(18, 17) #define GMAC_DEBUG_TFCSTS_SHIFT 17 @@ -133,6 +152,8 @@ enum power_event { /* MAC HW features2 bitmap */ #define GMAC_HW_FEAT_TXCHCNT GENMASK(21, 18) #define GMAC_HW_FEAT_RXCHCNT GENMASK(15, 12) +#define GMAC_HW_FEAT_TXQCNT GENMASK(9, 6) +#define GMAC_HW_FEAT_RXQCNT GENMASK(3, 0) /* MAC HW ADDR regs */ #define GMAC_HI_DCS GENMASK(18, 16) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index eaed7cb21867..1e79e6529c4a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c @@ -59,19 +59,24 @@ static void dwmac4_core_init(struct mac_device_info *hw, int mtu) writel(value, ioaddr + GMAC_INT_EN); } -static void dwmac4_dump_regs(struct mac_device_info *hw) +static void dwmac4_rx_queue_enable(struct mac_device_info *hw, u32 queue) { void __iomem *ioaddr = hw->pcsr; - int i; + u32 value = readl(ioaddr + GMAC_RXQ_CTRL0); - pr_debug("\tDWMAC4 regs (base addr = 0x%p)\n", ioaddr); + value &= GMAC_RX_QUEUE_CLEAR(queue); + value |= GMAC_RX_AV_QUEUE_ENABLE(queue); - for (i = 0; i < GMAC_REG_NUM; i++) { - int offset = i * 4; + writel(value, ioaddr + GMAC_RXQ_CTRL0); +} - pr_debug("\tReg No. %d (offset 0x%x): 0x%08x\n", i, - offset, readl(ioaddr + offset)); - } +static void dwmac4_dump_regs(struct mac_device_info *hw, u32 *reg_space) +{ + void __iomem *ioaddr = hw->pcsr; + int i; + + for (i = 0; i < GMAC_REG_NUM; i++) + reg_space[i] = readl(ioaddr + i * 4); } static int dwmac4_rx_ipc_enable(struct mac_device_info *hw) @@ -126,6 +131,65 @@ static void dwmac4_get_umac_addr(struct mac_device_info *hw, GMAC_ADDR_LOW(reg_n)); } +static void dwmac4_set_eee_mode(struct mac_device_info *hw, + bool en_tx_lpi_clockgating) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + /* Enable the link status receive on RGMII, SGMII ore SMII + * receive path and instruct the transmit to enter in LPI + * state. + */ + value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS); + value |= GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA; + + if (en_tx_lpi_clockgating) + value |= GMAC4_LPI_CTRL_STATUS_LPITCSE; + + writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS); +} + +static void dwmac4_reset_eee_mode(struct mac_device_info *hw) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS); + value &= ~(GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA); + writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS); +} + +static void dwmac4_set_eee_pls(struct mac_device_info *hw, int link) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS); + + if (link) + value |= GMAC4_LPI_CTRL_STATUS_PLS; + else + value &= ~GMAC4_LPI_CTRL_STATUS_PLS; + + writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS); +} + +static void dwmac4_set_eee_timer(struct mac_device_info *hw, int ls, int tw) +{ + void __iomem *ioaddr = hw->pcsr; + int value = ((tw & 0xffff)) | ((ls & 0x3ff) << 16); + + /* Program the timers in the LPI timer control register: + * LS: minimum time (ms) for which the link + * status from PHY should be ok before transmitting + * the LPI pattern. + * TW: minimum time (us) for which the core waits + * after it has stopped transmitting the LPI pattern. + */ + writel(value, ioaddr + GMAC4_LPI_TIMER_CTRL); +} + static void dwmac4_set_filter(struct mac_device_info *hw, struct net_device *dev) { @@ -392,12 +456,17 @@ static void dwmac4_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x) static const struct stmmac_ops dwmac4_ops = { .core_init = dwmac4_core_init, .rx_ipc = dwmac4_rx_ipc_enable, + .rx_queue_enable = dwmac4_rx_queue_enable, .dump_regs = dwmac4_dump_regs, .host_irq_status = dwmac4_irq_status, .flow_ctrl = dwmac4_flow_ctrl, .pmt = dwmac4_pmt, .set_umac_addr = dwmac4_set_umac_addr, .get_umac_addr = dwmac4_get_umac_addr, + .set_eee_mode = dwmac4_set_eee_mode, + .reset_eee_mode = dwmac4_reset_eee_mode, + .set_eee_timer = dwmac4_set_eee_timer, + .set_eee_pls = dwmac4_set_eee_pls, .pcs_ctrl_ane = dwmac4_ctrl_ane, .pcs_rane = dwmac4_rane, .pcs_get_adv_lp = dwmac4_get_adv_lp, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c index 8816515e1bbb..843ec69222ea 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c @@ -103,7 +103,7 @@ static int dwmac4_wrback_get_rx_status(void *data, struct stmmac_extra_stats *x, x->rx_mii++; if (unlikely(rdes3 & RDES3_CRC_ERROR)) { - x->rx_crc++; + x->rx_crc_errors++; stats->rx_crc_errors++; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c index 8196ab5fc33c..f97b0d5d9987 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c @@ -127,53 +127,51 @@ static void dwmac4_dma_init(void __iomem *ioaddr, dwmac4_dma_init_channel(ioaddr, dma_cfg, dma_tx, dma_rx, i); } -static void _dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 channel) +static void _dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 channel, + u32 *reg_space) { - pr_debug(" Channel %d\n", channel); - pr_debug("\tDMA_CHAN_CONTROL, offset: 0x%x, val: 0x%x\n", 0, - readl(ioaddr + DMA_CHAN_CONTROL(channel))); - pr_debug("\tDMA_CHAN_TX_CONTROL, offset: 0x%x, val: 0x%x\n", 0x4, - readl(ioaddr + DMA_CHAN_TX_CONTROL(channel))); - pr_debug("\tDMA_CHAN_RX_CONTROL, offset: 0x%x, val: 0x%x\n", 0x8, - readl(ioaddr + DMA_CHAN_RX_CONTROL(channel))); - pr_debug("\tDMA_CHAN_TX_BASE_ADDR, offset: 0x%x, val: 0x%x\n", 0x14, - readl(ioaddr + DMA_CHAN_TX_BASE_ADDR(channel))); - pr_debug("\tDMA_CHAN_RX_BASE_ADDR, offset: 0x%x, val: 0x%x\n", 0x1c, - readl(ioaddr + DMA_CHAN_RX_BASE_ADDR(channel))); - pr_debug("\tDMA_CHAN_TX_END_ADDR, offset: 0x%x, val: 0x%x\n", 0x20, - readl(ioaddr + DMA_CHAN_TX_END_ADDR(channel))); - pr_debug("\tDMA_CHAN_RX_END_ADDR, offset: 0x%x, val: 0x%x\n", 0x28, - readl(ioaddr + DMA_CHAN_RX_END_ADDR(channel))); - pr_debug("\tDMA_CHAN_TX_RING_LEN, offset: 0x%x, val: 0x%x\n", 0x2c, - readl(ioaddr + DMA_CHAN_TX_RING_LEN(channel))); - pr_debug("\tDMA_CHAN_RX_RING_LEN, offset: 0x%x, val: 0x%x\n", 0x30, - readl(ioaddr + DMA_CHAN_RX_RING_LEN(channel))); - pr_debug("\tDMA_CHAN_INTR_ENA, offset: 0x%x, val: 0x%x\n", 0x34, - readl(ioaddr + DMA_CHAN_INTR_ENA(channel))); - pr_debug("\tDMA_CHAN_RX_WATCHDOG, offset: 0x%x, val: 0x%x\n", 0x38, - readl(ioaddr + DMA_CHAN_RX_WATCHDOG(channel))); - pr_debug("\tDMA_CHAN_SLOT_CTRL_STATUS, offset: 0x%x, val: 0x%x\n", 0x3c, - readl(ioaddr + DMA_CHAN_SLOT_CTRL_STATUS(channel))); - pr_debug("\tDMA_CHAN_CUR_TX_DESC, offset: 0x%x, val: 0x%x\n", 0x44, - readl(ioaddr + DMA_CHAN_CUR_TX_DESC(channel))); - pr_debug("\tDMA_CHAN_CUR_RX_DESC, offset: 0x%x, val: 0x%x\n", 0x4c, - readl(ioaddr + DMA_CHAN_CUR_RX_DESC(channel))); - pr_debug("\tDMA_CHAN_CUR_TX_BUF_ADDR, offset: 0x%x, val: 0x%x\n", 0x54, - readl(ioaddr + DMA_CHAN_CUR_TX_BUF_ADDR(channel))); - pr_debug("\tDMA_CHAN_CUR_RX_BUF_ADDR, offset: 0x%x, val: 0x%x\n", 0x5c, - readl(ioaddr + DMA_CHAN_CUR_RX_BUF_ADDR(channel))); - pr_debug("\tDMA_CHAN_STATUS, offset: 0x%x, val: 0x%x\n", 0x60, - readl(ioaddr + DMA_CHAN_STATUS(channel))); + reg_space[DMA_CHAN_CONTROL(channel) / 4] = + readl(ioaddr + DMA_CHAN_CONTROL(channel)); + reg_space[DMA_CHAN_TX_CONTROL(channel) / 4] = + readl(ioaddr + DMA_CHAN_TX_CONTROL(channel)); + reg_space[DMA_CHAN_RX_CONTROL(channel) / 4] = + readl(ioaddr + DMA_CHAN_RX_CONTROL(channel)); + reg_space[DMA_CHAN_TX_BASE_ADDR(channel) / 4] = + readl(ioaddr + DMA_CHAN_TX_BASE_ADDR(channel)); + reg_space[DMA_CHAN_RX_BASE_ADDR(channel) / 4] = + readl(ioaddr + DMA_CHAN_RX_BASE_ADDR(channel)); + reg_space[DMA_CHAN_TX_END_ADDR(channel) / 4] = + readl(ioaddr + DMA_CHAN_TX_END_ADDR(channel)); + reg_space[DMA_CHAN_RX_END_ADDR(channel) / 4] = + readl(ioaddr + DMA_CHAN_RX_END_ADDR(channel)); + reg_space[DMA_CHAN_TX_RING_LEN(channel) / 4] = + readl(ioaddr + DMA_CHAN_TX_RING_LEN(channel)); + reg_space[DMA_CHAN_RX_RING_LEN(channel) / 4] = + readl(ioaddr + DMA_CHAN_RX_RING_LEN(channel)); + reg_space[DMA_CHAN_INTR_ENA(channel) / 4] = + readl(ioaddr + DMA_CHAN_INTR_ENA(channel)); + reg_space[DMA_CHAN_RX_WATCHDOG(channel) / 4] = + readl(ioaddr + DMA_CHAN_RX_WATCHDOG(channel)); + reg_space[DMA_CHAN_SLOT_CTRL_STATUS(channel) / 4] = + readl(ioaddr + DMA_CHAN_SLOT_CTRL_STATUS(channel)); + reg_space[DMA_CHAN_CUR_TX_DESC(channel) / 4] = + readl(ioaddr + DMA_CHAN_CUR_TX_DESC(channel)); + reg_space[DMA_CHAN_CUR_RX_DESC(channel) / 4] = + readl(ioaddr + DMA_CHAN_CUR_RX_DESC(channel)); + reg_space[DMA_CHAN_CUR_TX_BUF_ADDR(channel) / 4] = + readl(ioaddr + DMA_CHAN_CUR_TX_BUF_ADDR(channel)); + reg_space[DMA_CHAN_CUR_RX_BUF_ADDR(channel) / 4] = + readl(ioaddr + DMA_CHAN_CUR_RX_BUF_ADDR(channel)); + reg_space[DMA_CHAN_STATUS(channel) / 4] = + readl(ioaddr + DMA_CHAN_STATUS(channel)); } -static void dwmac4_dump_dma_regs(void __iomem *ioaddr) +static void dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space) { int i; - pr_debug(" GMAC4 DMA registers\n"); - for (i = 0; i < DMA_CHANNEL_NB_MAX; i++) - _dwmac4_dump_dma_regs(ioaddr, i); + _dwmac4_dump_dma_regs(ioaddr, i, reg_space); } static void dwmac4_rx_watchdog(void __iomem *ioaddr, u32 riwt) @@ -303,6 +301,11 @@ static void dwmac4_get_hw_feature(void __iomem *ioaddr, ((hw_cap & GMAC_HW_FEAT_RXCHCNT) >> 12) + 1; dma_cap->number_tx_channel = ((hw_cap & GMAC_HW_FEAT_TXCHCNT) >> 18) + 1; + /* TX and RX number of queues */ + dma_cap->number_rx_queues = + ((hw_cap & GMAC_HW_FEAT_RXQCNT) >> 0) + 1; + dma_cap->number_tx_queues = + ((hw_cap & GMAC_HW_FEAT_TXQCNT) >> 6) + 1; /* IEEE 1588-2002 */ dma_cap->time_stamp = 0; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h index 726d9d9aaf83..56e485f79077 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h @@ -12,10 +12,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c index 84e3e84cec7d..e60bfca2a763 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c @@ -10,10 +10,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -21,6 +17,7 @@ *******************************************************************************/ #include <linux/io.h> +#include <linux/iopoll.h> #include "common.h" #include "dwmac_dma.h" @@ -29,19 +26,16 @@ int dwmac_dma_reset(void __iomem *ioaddr) { u32 value = readl(ioaddr + DMA_BUS_MODE); - int limit; + int err; /* DMA SW reset */ value |= DMA_BUS_MODE_SFT_RESET; writel(value, ioaddr + DMA_BUS_MODE); - limit = 10; - while (limit--) { - if (!(readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET)) - break; - mdelay(10); - } - if (limit < 0) + err = readl_poll_timeout(ioaddr + DMA_BUS_MODE, value, + !(value & DMA_BUS_MODE_SFT_RESET), + 100000, 10000); + if (err) return -EBUSY; return 0; @@ -102,7 +96,7 @@ static void show_tx_process_state(unsigned int status) pr_debug("- TX (Stopped): Reset or Stop command\n"); break; case 1: - pr_debug("- TX (Running):Fetching the Tx desc\n"); + pr_debug("- TX (Running): Fetching the Tx desc\n"); break; case 2: pr_debug("- TX (Running): Waiting for end of tx\n"); @@ -136,7 +130,7 @@ static void show_rx_process_state(unsigned int status) pr_debug("- RX (Running): Fetching the Rx desc\n"); break; case 2: - pr_debug("- RX (Running):Checking for end of pkt\n"); + pr_debug("- RX (Running): Checking for end of pkt\n"); break; case 3: pr_debug("- RX (Running): Waiting for Rx pkt\n"); @@ -246,7 +240,7 @@ void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6], unsigned long data; data = (addr[5] << 8) | addr[4]; - /* For MAC Addr registers se have to set the Address Enable (AE) + /* For MAC Addr registers we have to set the Address Enable (AE) * bit that has no effect on the High Reg 0 where the bit 31 (MO) * is RO. */ @@ -261,9 +255,9 @@ void stmmac_set_mac(void __iomem *ioaddr, bool enable) u32 value = readl(ioaddr + MAC_CTRL_REG); if (enable) - value |= MAC_RNABLE_RX | MAC_ENABLE_TX; + value |= MAC_ENABLE_RX | MAC_ENABLE_TX; else - value &= ~(MAC_ENABLE_TX | MAC_RNABLE_RX); + value &= ~(MAC_ENABLE_TX | MAC_ENABLE_RX); writel(value, ioaddr + MAC_CTRL_REG); } diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c index f0d86321dfe2..323b59ec74a3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c @@ -12,10 +12,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -225,7 +221,7 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x, x->rx_mii++; if (unlikely(rdes0 & RDES0_CRC_ERROR)) { - x->rx_crc++; + x->rx_crc_errors++; stats->rx_crc_errors++; } ret = discard_frame; diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h index 38a1a5603293..c037326331f5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/mmc.h +++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h @@ -12,10 +12,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c index ce9aa792857b..e9b04c28980f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c @@ -12,10 +12,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c index fd78406e2e9a..efb818ebd55e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c @@ -12,10 +12,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -115,7 +111,7 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x, stats->collisions++; } if (unlikely(rdes0 & RDES0_CRC_ERROR)) { - x->rx_crc++; + x->rx_crc_errors++; stats->rx_crc_errors++; } ret = discard_frame; diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c index 9983ce9bd90d..452f256ff03f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c @@ -16,10 +16,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index eab04aeeeb95..cd8fb619b1e9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -10,10 +10,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -106,9 +102,6 @@ struct stmmac_priv { u32 msg_enable; int wolopts; int wol_irq; - struct clk *stmmac_clk; - struct clk *pclk; - struct reset_control *stmmac_rst; int clk_csr; struct timer_list eee_ctrl_timer; int lpi_irq; @@ -120,8 +113,6 @@ struct stmmac_priv { struct ptp_clock *ptp_clock; struct ptp_clock_info ptp_clock_ops; unsigned int default_addend; - struct clk *clk_ptp_ref; - unsigned int clk_ptp_rate; u32 adv_ts; int use_riwt; int irq_wake; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 699ee1d30426..85d64114e159 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -12,10 +12,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -65,7 +61,7 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = { STMMAC_STAT(overflow_error), STMMAC_STAT(ipc_csum_error), STMMAC_STAT(rx_collision), - STMMAC_STAT(rx_crc), + STMMAC_STAT(rx_crc_errors), STMMAC_STAT(dribbling_bit), STMMAC_STAT(rx_length), STMMAC_STAT(rx_mii), @@ -439,32 +435,14 @@ static int stmmac_ethtool_get_regs_len(struct net_device *dev) static void stmmac_ethtool_gregs(struct net_device *dev, struct ethtool_regs *regs, void *space) { - int i; u32 *reg_space = (u32 *) space; struct stmmac_priv *priv = netdev_priv(dev); memset(reg_space, 0x0, REG_SPACE_SIZE); - if (!(priv->plat->has_gmac || priv->plat->has_gmac4)) { - /* MAC registers */ - for (i = 0; i < 12; i++) - reg_space[i] = readl(priv->ioaddr + (i * 4)); - /* DMA registers */ - for (i = 0; i < 9; i++) - reg_space[i + 12] = - readl(priv->ioaddr + (DMA_BUS_MODE + (i * 4))); - reg_space[22] = readl(priv->ioaddr + DMA_CUR_TX_BUF_ADDR); - reg_space[23] = readl(priv->ioaddr + DMA_CUR_RX_BUF_ADDR); - } else { - /* MAC registers */ - for (i = 0; i < 55; i++) - reg_space[i] = readl(priv->ioaddr + (i * 4)); - /* DMA registers */ - for (i = 0; i < 22; i++) - reg_space[i + 55] = - readl(priv->ioaddr + (DMA_BUS_MODE + (i * 4))); - } + priv->hw->mac->dump_regs(priv->hw, reg_space); + priv->hw->dma->dump_regs(priv->ioaddr, reg_space); } static void @@ -712,7 +690,7 @@ static int stmmac_ethtool_op_set_eee(struct net_device *dev, static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv) { - unsigned long clk = clk_get_rate(priv->stmmac_clk); + unsigned long clk = clk_get_rate(priv->plat->stmmac_clk); if (!clk) return 0; @@ -722,7 +700,7 @@ static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv) static u32 stmmac_riwt2usec(u32 riwt, struct stmmac_priv *priv) { - unsigned long clk = clk_get_rate(priv->stmmac_clk); + unsigned long clk = clk_get_rate(priv->plat->stmmac_clk); if (!clk) return 0; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c index 10d6059b2f26..721b61655261 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c @@ -12,10 +12,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index e3f6389e1b01..4498a3861aa3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -13,10 +13,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -158,7 +154,7 @@ static void stmmac_clk_csr_set(struct stmmac_priv *priv) { u32 clk_rate; - clk_rate = clk_get_rate(priv->stmmac_clk); + clk_rate = clk_get_rate(priv->plat->stmmac_clk); /* Platform provided default clk_csr would be assumed valid * for all other cases except for the below mentioned ones. @@ -191,7 +187,7 @@ static void print_pkt(unsigned char *buf, int len) static inline u32 stmmac_tx_avail(struct stmmac_priv *priv) { - unsigned avail; + u32 avail; if (priv->dirty_tx > priv->cur_tx) avail = priv->dirty_tx - priv->cur_tx - 1; @@ -203,7 +199,7 @@ static inline u32 stmmac_tx_avail(struct stmmac_priv *priv) static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv) { - unsigned dirty; + u32 dirty; if (priv->dirty_rx <= priv->cur_rx) dirty = priv->cur_rx - priv->dirty_rx; @@ -216,7 +212,7 @@ static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv) /** * stmmac_hw_fix_mac_speed - callback for speed selection * @priv: driver private structure - * Description: on some platforms (e.g. ST), some HW system configuraton + * Description: on some platforms (e.g. ST), some HW system configuration * registers have to be set according to the link speed negotiated. */ static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv) @@ -239,7 +235,8 @@ static void stmmac_enable_eee_mode(struct stmmac_priv *priv) /* Check and enter in LPI mode */ if ((priv->dirty_tx == priv->cur_tx) && (priv->tx_path_in_lpi_mode == false)) - priv->hw->mac->set_eee_mode(priv->hw); + priv->hw->mac->set_eee_mode(priv->hw, + priv->plat->en_tx_lpi_clockgating); } /** @@ -415,7 +412,7 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, /** * stmmac_hwtstamp_ioctl - control hardware timestamping. * @dev: device pointer. - * @ifr: An IOCTL specefic structure, that can contain a pointer to + * @ifr: An IOCTL specific structure, that can contain a pointer to * a proprietary structure used to pass information to the driver. * Description: * This function configures the MAC to enable/disable both outgoing(TX) @@ -606,7 +603,7 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) /* program Sub Second Increment reg */ sec_inc = priv->hw->ptp->config_sub_second_increment( - priv->ptpaddr, priv->clk_ptp_rate, + priv->ptpaddr, priv->plat->clk_ptp_rate, priv->plat->has_gmac4); temp = div_u64(1000000000ULL, sec_inc); @@ -616,7 +613,7 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) * where, freq_div_ratio = 1e9ns/sec_inc */ temp = (u64)(temp << 32); - priv->default_addend = div_u64(temp, priv->clk_ptp_rate); + priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate); priv->hw->ptp->config_addend(priv->ptpaddr, priv->default_addend); @@ -644,18 +641,6 @@ static int stmmac_init_ptp(struct stmmac_priv *priv) if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) return -EOPNOTSUPP; - /* Fall-back to main clock in case of no PTP ref is passed */ - priv->clk_ptp_ref = devm_clk_get(priv->device, "clk_ptp_ref"); - if (IS_ERR(priv->clk_ptp_ref)) { - priv->clk_ptp_rate = clk_get_rate(priv->stmmac_clk); - priv->clk_ptp_ref = NULL; - netdev_dbg(priv->dev, "PTP uses main clock\n"); - } else { - clk_prepare_enable(priv->clk_ptp_ref); - priv->clk_ptp_rate = clk_get_rate(priv->clk_ptp_ref); - netdev_dbg(priv->dev, "PTP rate %d\n", priv->clk_ptp_rate); - } - priv->adv_ts = 0; /* Check if adv_ts can be enabled for dwmac 4.x core */ if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp) @@ -682,8 +667,8 @@ static int stmmac_init_ptp(struct stmmac_priv *priv) static void stmmac_release_ptp(struct stmmac_priv *priv) { - if (priv->clk_ptp_ref) - clk_disable_unprepare(priv->clk_ptp_ref); + if (priv->plat->clk_ptp_ref) + clk_disable_unprepare(priv->plat->clk_ptp_ref); stmmac_ptp_unregister(priv); } @@ -704,7 +689,7 @@ static void stmmac_adjust_link(struct net_device *dev) int new_state = 0; unsigned int fc = priv->flow_ctrl, pause_time = priv->pause; - if (phydev == NULL) + if (!phydev) return; spin_lock_irqsave(&priv->lock, flags); @@ -731,33 +716,36 @@ static void stmmac_adjust_link(struct net_device *dev) new_state = 1; switch (phydev->speed) { case 1000: - if (likely((priv->plat->has_gmac) || - (priv->plat->has_gmac4))) + if (priv->plat->has_gmac || + priv->plat->has_gmac4) ctrl &= ~priv->hw->link.port; - stmmac_hw_fix_mac_speed(priv); break; case 100: + if (priv->plat->has_gmac || + priv->plat->has_gmac4) { + ctrl |= priv->hw->link.port; + ctrl |= priv->hw->link.speed; + } else { + ctrl &= ~priv->hw->link.port; + } + break; case 10: - if (likely((priv->plat->has_gmac) || - (priv->plat->has_gmac4))) { + if (priv->plat->has_gmac || + priv->plat->has_gmac4) { ctrl |= priv->hw->link.port; - if (phydev->speed == SPEED_100) { - ctrl |= priv->hw->link.speed; - } else { - ctrl &= ~(priv->hw->link.speed); - } + ctrl &= ~(priv->hw->link.speed); } else { ctrl &= ~priv->hw->link.port; } - stmmac_hw_fix_mac_speed(priv); break; default: netif_warn(priv, link, priv->dev, - "Speed (%d) not 10/100\n", - phydev->speed); + "broken speed: %d\n", phydev->speed); + phydev->speed = SPEED_UNKNOWN; break; } - + if (phydev->speed != SPEED_UNKNOWN) + stmmac_hw_fix_mac_speed(priv); priv->speed = phydev->speed; } @@ -770,8 +758,8 @@ static void stmmac_adjust_link(struct net_device *dev) } else if (priv->oldlink) { new_state = 1; priv->oldlink = 0; - priv->speed = 0; - priv->oldduplex = -1; + priv->speed = SPEED_UNKNOWN; + priv->oldduplex = DUPLEX_UNKNOWN; } if (new_state && netif_msg_link(priv)) @@ -833,8 +821,8 @@ static int stmmac_init_phy(struct net_device *dev) int interface = priv->plat->interface; int max_speed = priv->plat->max_speed; priv->oldlink = 0; - priv->speed = 0; - priv->oldduplex = -1; + priv->speed = SPEED_UNKNOWN; + priv->oldduplex = DUPLEX_UNKNOWN; if (priv->plat->phy_node) { phydev = of_phy_connect(dev, priv->plat->phy_node, @@ -886,9 +874,7 @@ static int stmmac_init_phy(struct net_device *dev) if (phydev->is_pseudo_fixed_link) phydev->irq = PHY_POLL; - netdev_dbg(priv->dev, "%s: attached to PHY (UID 0x%x) Link = %d\n", - __func__, phydev->phy_id, phydev->link); - + phy_attached_info(phydev); return 0; } @@ -1014,7 +1000,7 @@ static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i) * @dev: net device structure * @flags: gfp flag. * Description: this function initializes the DMA RX/TX descriptors - * and allocates the socket buffers. It suppors the chained and ring + * and allocates the socket buffers. It supports the chained and ring * modes. */ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags) @@ -1127,13 +1113,6 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv) int i; for (i = 0; i < DMA_TX_SIZE; i++) { - struct dma_desc *p; - - if (priv->extend_desc) - p = &((priv->dma_etx + i)->basic); - else - p = priv->dma_tx + i; - if (priv->tx_skbuff_dma[i].buf) { if (priv->tx_skbuff_dma[i].map_as_page) dma_unmap_page(priv->device, @@ -1147,7 +1126,7 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv) DMA_TO_DEVICE); } - if (priv->tx_skbuff[i] != NULL) { + if (priv->tx_skbuff[i]) { dev_kfree_skb_any(priv->tx_skbuff[i]); priv->tx_skbuff[i] = NULL; priv->tx_skbuff_dma[i].buf = 0; @@ -1271,6 +1250,28 @@ static void free_dma_desc_resources(struct stmmac_priv *priv) } /** + * stmmac_mac_enable_rx_queues - Enable MAC rx queues + * @priv: driver private structure + * Description: It is used for enabling the rx queues in the MAC + */ +static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv) +{ + int rx_count = priv->dma_cap.number_rx_queues; + int queue = 0; + + /* If GMAC does not have multiple queues, then this is not necessary*/ + if (rx_count == 1) + return; + + /** + * If the core is synthesized with multiple rx queues / multiple + * dma channels, then rx queues will be disabled by default. + * For now only rx queue 0 is enabled. + */ + priv->hw->mac->rx_queue_enable(priv->hw, queue); +} + +/** * stmmac_dma_operation_mode - HW DMA operation mode * @priv: driver private structure * Description: it is used for configuring the DMA operation mode register in @@ -1671,10 +1672,6 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) /* Copy the MAC addr into the HW */ priv->hw->mac->set_umac_addr(priv->hw, dev->dev_addr, 0); - /* If required, perform hw setup of the bus. */ - if (priv->plat->bus_setup) - priv->plat->bus_setup(priv->ioaddr); - /* PS and related bits will be programmed according to the speed */ if (priv->hw->pcs) { int speed = priv->plat->mac_port_sel_speed; @@ -1691,6 +1688,10 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) /* Initialize the MAC Core */ priv->hw->mac->core_init(priv->hw, dev->mtu); + /* Initialize MAC RX Queues */ + if (priv->hw->mac->rx_queue_enable) + stmmac_mac_enable_rx_queues(priv); + ret = priv->hw->mac->rx_ipc(priv->hw); if (!ret) { netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n"); @@ -1711,8 +1712,10 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) if (init_ptp) { ret = stmmac_init_ptp(priv); - if (ret) - netdev_warn(priv->dev, "fail to init PTP.\n"); + if (ret == -EOPNOTSUPP) + netdev_warn(priv->dev, "PTP not supported by HW\n"); + else if (ret) + netdev_warn(priv->dev, "PTP init failed\n"); } #ifdef CONFIG_DEBUG_FS @@ -1726,11 +1729,6 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) priv->hw->dma->start_tx(priv->ioaddr); priv->hw->dma->start_rx(priv->ioaddr); - /* Dump DMA/MAC registers */ - if (netif_msg_hw(priv)) { - priv->hw->mac->dump_regs(priv->hw); - priv->hw->dma->dump_regs(priv->ioaddr); - } priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS; if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) { @@ -2519,7 +2517,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit) if (unlikely(status == discard_frame)) { priv->dev->stats.rx_errors++; if (priv->hwts_rx_en && !priv->extend_desc) { - /* DESC2 & DESC3 will be overwitten by device + /* DESC2 & DESC3 will be overwritten by device * with timestamp value, hence reinitialize * them in stmmac_rx_refill() function so that * device can reuse it. @@ -2542,7 +2540,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit) frame_len = priv->hw->desc->get_rx_frame_len(p, coe); - /* If frame length is greather than skb buffer size + /* If frame length is greater than skb buffer size * (preallocated during init) then the packet is * ignored */ @@ -2669,7 +2667,7 @@ static int stmmac_poll(struct napi_struct *napi, int budget) work_done = stmmac_rx(priv, budget); if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); stmmac_enable_dma_irq(priv); } return work_done; @@ -2748,7 +2746,7 @@ static netdev_features_t stmmac_fix_features(struct net_device *dev, /* Some GMAC devices have a bugged Jumbo frame support that * needs to have the Tx COE disabled for oversized frames * (due to limited buffer sizes). In this case we disable - * the TX csum insertionin the TDES and not use SF. + * the TX csum insertion in the TDES and not use SF. */ if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN)) features &= ~NETIF_F_CSUM_MASK; @@ -2894,9 +2892,7 @@ static void sysfs_display_ring(void *head, int size, int extend_desc, struct dma_desc *p = (struct dma_desc *)head; for (i = 0; i < size; i++) { - u64 x; if (extend_desc) { - x = *(u64 *) ep; seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", i, (unsigned int)virt_to_phys(ep), le32_to_cpu(ep->basic.des0), @@ -2905,7 +2901,6 @@ static void sysfs_display_ring(void *head, int size, int extend_desc, le32_to_cpu(ep->basic.des3)); ep++; } else { - x = *(u64 *) p; seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", i, (unsigned int)virt_to_phys(ep), le32_to_cpu(p->des0), le32_to_cpu(p->des1), @@ -2975,7 +2970,7 @@ static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v) (priv->dma_cap.hash_filter) ? "Y" : "N"); seq_printf(seq, "\tMultiple MAC address registers: %s\n", (priv->dma_cap.multi_addr) ? "Y" : "N"); - seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfatces): %s\n", + seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n", (priv->dma_cap.pcs) ? "Y" : "N"); seq_printf(seq, "\tSMA (MDIO) Interface: %s\n", (priv->dma_cap.sma_mdio) ? "Y" : "N"); @@ -3251,44 +3246,8 @@ int stmmac_dvr_probe(struct device *device, if ((phyaddr >= 0) && (phyaddr <= 31)) priv->plat->phy_addr = phyaddr; - priv->stmmac_clk = devm_clk_get(priv->device, STMMAC_RESOURCE_NAME); - if (IS_ERR(priv->stmmac_clk)) { - netdev_warn(priv->dev, "%s: warning: cannot get CSR clock\n", - __func__); - /* If failed to obtain stmmac_clk and specific clk_csr value - * is NOT passed from the platform, probe fail. - */ - if (!priv->plat->clk_csr) { - ret = PTR_ERR(priv->stmmac_clk); - goto error_clk_get; - } else { - priv->stmmac_clk = NULL; - } - } - clk_prepare_enable(priv->stmmac_clk); - - priv->pclk = devm_clk_get(priv->device, "pclk"); - if (IS_ERR(priv->pclk)) { - if (PTR_ERR(priv->pclk) == -EPROBE_DEFER) { - ret = -EPROBE_DEFER; - goto error_pclk_get; - } - priv->pclk = NULL; - } - clk_prepare_enable(priv->pclk); - - priv->stmmac_rst = devm_reset_control_get(priv->device, - STMMAC_RESOURCE_NAME); - if (IS_ERR(priv->stmmac_rst)) { - if (PTR_ERR(priv->stmmac_rst) == -EPROBE_DEFER) { - ret = -EPROBE_DEFER; - goto error_hw_init; - } - dev_info(priv->device, "no reset control found\n"); - priv->stmmac_rst = NULL; - } - if (priv->stmmac_rst) - reset_control_deassert(priv->stmmac_rst); + if (priv->plat->stmmac_rst) + reset_control_deassert(priv->plat->stmmac_rst); /* Init MAC and get the capabilities */ ret = stmmac_hw_init(priv); @@ -3391,10 +3350,6 @@ error_netdev_register: error_mdio_register: netif_napi_del(&priv->napi); error_hw_init: - clk_disable_unprepare(priv->pclk); -error_pclk_get: - clk_disable_unprepare(priv->stmmac_clk); -error_clk_get: free_netdev(ndev); return ret; @@ -3420,10 +3375,10 @@ int stmmac_dvr_remove(struct device *dev) stmmac_set_mac(priv->ioaddr, false); netif_carrier_off(ndev); unregister_netdev(ndev); - if (priv->stmmac_rst) - reset_control_assert(priv->stmmac_rst); - clk_disable_unprepare(priv->pclk); - clk_disable_unprepare(priv->stmmac_clk); + if (priv->plat->stmmac_rst) + reset_control_assert(priv->plat->stmmac_rst); + clk_disable_unprepare(priv->plat->pclk); + clk_disable_unprepare(priv->plat->stmmac_clk); if (priv->hw->pcs != STMMAC_PCS_RGMII && priv->hw->pcs != STMMAC_PCS_TBI && priv->hw->pcs != STMMAC_PCS_RTBI) @@ -3472,14 +3427,14 @@ int stmmac_suspend(struct device *dev) stmmac_set_mac(priv->ioaddr, false); pinctrl_pm_select_sleep_state(priv->device); /* Disable clock in case of PWM is off */ - clk_disable(priv->pclk); - clk_disable(priv->stmmac_clk); + clk_disable(priv->plat->pclk); + clk_disable(priv->plat->stmmac_clk); } spin_unlock_irqrestore(&priv->lock, flags); priv->oldlink = 0; - priv->speed = 0; - priv->oldduplex = -1; + priv->speed = SPEED_UNKNOWN; + priv->oldduplex = DUPLEX_UNKNOWN; return 0; } EXPORT_SYMBOL_GPL(stmmac_suspend); @@ -3512,9 +3467,9 @@ int stmmac_resume(struct device *dev) priv->irq_wake = 0; } else { pinctrl_pm_select_default_state(priv->device); - /* enable the clk prevously disabled */ - clk_enable(priv->stmmac_clk); - clk_enable(priv->pclk); + /* enable the clk previously disabled */ + clk_enable(priv->plat->stmmac_clk); + clk_enable(priv->plat->pclk); /* reset the phy so that it's ready */ if (priv->mii) stmmac_mdio_reset(priv->mii); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index b0344c213752..db157a47000c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -13,10 +13,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -24,13 +20,14 @@ Maintainer: Giuseppe Cavallaro <peppe.cavallaro@st.com> *******************************************************************************/ +#include <linux/io.h> +#include <linux/iopoll.h> #include <linux/mii.h> -#include <linux/phy.h> -#include <linux/slab.h> #include <linux/of.h> #include <linux/of_gpio.h> #include <linux/of_mdio.h> -#include <asm/io.h> +#include <linux/phy.h> +#include <linux/slab.h> #include "stmmac.h" @@ -42,22 +39,6 @@ #define MII_GMAC4_WRITE (1 << MII_GMAC4_GOC_SHIFT) #define MII_GMAC4_READ (3 << MII_GMAC4_GOC_SHIFT) -static int stmmac_mdio_busy_wait(void __iomem *ioaddr, unsigned int mii_addr) -{ - unsigned long curr; - unsigned long finish = jiffies + 3 * HZ; - - do { - curr = jiffies; - if (readl(ioaddr + mii_addr) & MII_BUSY) - cpu_relax(); - else - return 0; - } while (!time_after_eq(curr, finish)); - - return -EBUSY; -} - /** * stmmac_mdio_read * @bus: points to the mii_bus structure @@ -74,7 +55,7 @@ static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg) struct stmmac_priv *priv = netdev_priv(ndev); unsigned int mii_address = priv->hw->mii.addr; unsigned int mii_data = priv->hw->mii.data; - + u32 v; int data; u32 value = MII_BUSY; @@ -86,12 +67,14 @@ static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg) if (priv->plat->has_gmac4) value |= MII_GMAC4_READ; - if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address)) + if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY), + 100, 10000)) return -EBUSY; writel(value, priv->ioaddr + mii_address); - if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address)) + if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY), + 100, 10000)) return -EBUSY; /* Read the data from the MII data register */ @@ -115,7 +98,7 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg, struct stmmac_priv *priv = netdev_priv(ndev); unsigned int mii_address = priv->hw->mii.addr; unsigned int mii_data = priv->hw->mii.data; - + u32 v; u32 value = MII_BUSY; value |= (phyaddr << priv->hw->mii.addr_shift) @@ -130,7 +113,8 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg, value |= MII_WRITE; /* Wait until any existing MII operation is complete */ - if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address)) + if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY), + 100, 10000)) return -EBUSY; /* Set the MII address register to write */ @@ -138,7 +122,8 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg, writel(value, priv->ioaddr + mii_address); /* Wait until any existing MII operation is complete */ - return stmmac_mdio_busy_wait(priv->ioaddr, mii_address); + return readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY), + 100, 10000); } /** @@ -156,9 +141,9 @@ int stmmac_mdio_reset(struct mii_bus *bus) #ifdef CONFIG_OF if (priv->device->of_node) { - if (data->reset_gpio < 0) { struct device_node *np = priv->device->of_node; + if (!np) return 0; @@ -198,7 +183,7 @@ int stmmac_mdio_reset(struct mii_bus *bus) /* This is a workaround for problems with the STE101P PHY. * It doesn't complete its reset until at least one clock cycle - * on MDC, so perform a dummy mdio read. To be upadted for GMAC4 + * on MDC, so perform a dummy mdio read. To be updated for GMAC4 * if needed. */ if (!priv->plat->has_gmac4) @@ -225,7 +210,7 @@ int stmmac_mdio_register(struct net_device *ndev) return 0; new_bus = mdiobus_alloc(); - if (new_bus == NULL) + if (!new_bus) return -ENOMEM; if (mdio_bus_data->irqs) @@ -262,49 +247,48 @@ int stmmac_mdio_register(struct net_device *ndev) found = 0; for (addr = 0; addr < PHY_MAX_ADDR; addr++) { struct phy_device *phydev = mdiobus_get_phy(new_bus, addr); - if (phydev) { - int act = 0; - char irq_num[4]; - char *irq_str; - - /* - * If an IRQ was provided to be assigned after - * the bus probe, do it here. - */ - if ((mdio_bus_data->irqs == NULL) && - (mdio_bus_data->probed_phy_irq > 0)) { - new_bus->irq[addr] = - mdio_bus_data->probed_phy_irq; - phydev->irq = mdio_bus_data->probed_phy_irq; - } - - /* - * If we're going to bind the MAC to this PHY bus, - * and no PHY number was provided to the MAC, - * use the one probed here. - */ - if (priv->plat->phy_addr == -1) - priv->plat->phy_addr = addr; - - act = (priv->plat->phy_addr == addr); - switch (phydev->irq) { - case PHY_POLL: - irq_str = "POLL"; - break; - case PHY_IGNORE_INTERRUPT: - irq_str = "IGNORE"; - break; - default: - sprintf(irq_num, "%d", phydev->irq); - irq_str = irq_num; - break; - } - netdev_info(ndev, "PHY ID %08x at %d IRQ %s (%s)%s\n", - phydev->phy_id, addr, - irq_str, phydev_name(phydev), - act ? " active" : ""); - found = 1; + int act = 0; + char irq_num[4]; + char *irq_str; + + if (!phydev) + continue; + + /* + * If an IRQ was provided to be assigned after + * the bus probe, do it here. + */ + if (!mdio_bus_data->irqs && + (mdio_bus_data->probed_phy_irq > 0)) { + new_bus->irq[addr] = mdio_bus_data->probed_phy_irq; + phydev->irq = mdio_bus_data->probed_phy_irq; + } + + /* + * If we're going to bind the MAC to this PHY bus, + * and no PHY number was provided to the MAC, + * use the one probed here. + */ + if (priv->plat->phy_addr == -1) + priv->plat->phy_addr = addr; + + act = (priv->plat->phy_addr == addr); + switch (phydev->irq) { + case PHY_POLL: + irq_str = "POLL"; + break; + case PHY_IGNORE_INTERRUPT: + irq_str = "IGNORE"; + break; + default: + sprintf(irq_num, "%d", phydev->irq); + irq_str = irq_num; + break; } + netdev_info(ndev, "PHY ID %08x at %d IRQ %s (%s)%s\n", + phydev->phy_id, addr, irq_str, phydev_name(phydev), + act ? " active" : ""); + found = 1; } if (!found && !mdio_node) { diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c index 3da4737620cb..5c9e462276b9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c @@ -12,10 +12,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 36942f5a6a53..433a84239a68 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -12,10 +12,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -121,7 +117,6 @@ static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev) axi->axi_lpi_en = of_property_read_bool(np, "snps,lpi_en"); axi->axi_xit_frm = of_property_read_bool(np, "snps,xit_frm"); axi->axi_kbbe = of_property_read_bool(np, "snps,axi_kbbe"); - axi->axi_axi_all = of_property_read_bool(np, "snps,axi_all"); axi->axi_fb = of_property_read_bool(np, "snps,axi_fb"); axi->axi_mb = of_property_read_bool(np, "snps,axi_mb"); axi->axi_rb = of_property_read_bool(np, "snps,axi_rb"); @@ -181,10 +176,19 @@ static int stmmac_dt_phy(struct plat_stmmacenet_data *plat, mdio = false; } - /* If snps,dwmac-mdio is passed from DT, always register the MDIO */ - for_each_child_of_node(np, plat->mdio_node) { - if (of_device_is_compatible(plat->mdio_node, "snps,dwmac-mdio")) - break; + /* exception for dwmac-dwc-qos-eth glue logic */ + if (of_device_is_compatible(np, "snps,dwc-qos-ethernet-4.10")) { + plat->mdio_node = of_get_child_by_name(np, "mdio"); + } else { + /** + * If snps,dwmac-mdio is passed from DT, always register + * the MDIO + */ + for_each_child_of_node(np, plat->mdio_node) { + if (of_device_is_compatible(plat->mdio_node, + "snps,dwmac-mdio")) + break; + } } if (plat->mdio_node) { @@ -249,6 +253,9 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) plat->force_sf_dma_mode = of_property_read_bool(np, "snps,force_sf_dma_mode"); + plat->en_tx_lpi_clockgating = + of_property_read_bool(np, "snps,en-tx-lpi-clockgating"); + /* Set the maxmtu to a default of JUMBO_LEN in case the * parameter is not present in the device tree. */ @@ -333,7 +340,54 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) plat->axi = stmmac_axi_setup(pdev); + /* clock setup */ + plat->stmmac_clk = devm_clk_get(&pdev->dev, + STMMAC_RESOURCE_NAME); + if (IS_ERR(plat->stmmac_clk)) { + dev_warn(&pdev->dev, "Cannot get CSR clock\n"); + plat->stmmac_clk = NULL; + } + clk_prepare_enable(plat->stmmac_clk); + + plat->pclk = devm_clk_get(&pdev->dev, "pclk"); + if (IS_ERR(plat->pclk)) { + if (PTR_ERR(plat->pclk) == -EPROBE_DEFER) + goto error_pclk_get; + + plat->pclk = NULL; + } + clk_prepare_enable(plat->pclk); + + /* Fall-back to main clock in case of no PTP ref is passed */ + plat->clk_ptp_ref = devm_clk_get(&pdev->dev, "clk_ptp_ref"); + if (IS_ERR(plat->clk_ptp_ref)) { + plat->clk_ptp_rate = clk_get_rate(plat->stmmac_clk); + plat->clk_ptp_ref = NULL; + dev_warn(&pdev->dev, "PTP uses main clock\n"); + } else { + clk_prepare_enable(plat->clk_ptp_ref); + plat->clk_ptp_rate = clk_get_rate(plat->clk_ptp_ref); + dev_dbg(&pdev->dev, "PTP rate %d\n", plat->clk_ptp_rate); + } + + plat->stmmac_rst = devm_reset_control_get(&pdev->dev, + STMMAC_RESOURCE_NAME); + if (IS_ERR(plat->stmmac_rst)) { + if (PTR_ERR(plat->stmmac_rst) == -EPROBE_DEFER) + goto error_hw_init; + + dev_info(&pdev->dev, "no reset control found\n"); + plat->stmmac_rst = NULL; + } + return plat; + +error_hw_init: + clk_disable_unprepare(plat->pclk); +error_pclk_get: + clk_disable_unprepare(plat->stmmac_clk); + + return ERR_PTR(-EPROBE_DEFER); } /** @@ -357,7 +411,7 @@ void stmmac_remove_config_dt(struct platform_device *pdev, struct plat_stmmacenet_data * stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) { - return ERR_PTR(-ENOSYS); + return ERR_PTR(-EINVAL); } void stmmac_remove_config_dt(struct platform_device *pdev, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c index 3eb281d1db08..d71bd80c5b5b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c @@ -12,10 +12,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h index c06938c47af5..48fb72fc423c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h @@ -12,10 +12,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". diff --git a/drivers/net/ethernet/sun/Kconfig b/drivers/net/ethernet/sun/Kconfig index a4b40e3015e5..b2caf5132bd2 100644 --- a/drivers/net/ethernet/sun/Kconfig +++ b/drivers/net/ethernet/sun/Kconfig @@ -70,19 +70,23 @@ config CASSINI <http://docs.oracle.com/cd/E19113-01/giga.ether.pci/817-4341-10/817-4341-10.pdf>. config SUNVNET_COMMON - bool + tristate "Common routines to support Sun Virtual Networking" depends on SUN_LDOMS - default y if SUN_LDOMS + default m config SUNVNET tristate "Sun Virtual Network support" + default m depends on SUN_LDOMS + depends on SUNVNET_COMMON ---help--- Support for virtual network devices under Sun Logical Domains. config LDMVSW tristate "Sun4v LDoms Virtual Switch support" + default m depends on SUN_LDOMS + depends on SUNVNET_COMMON ---help--- Support for virtual switch devices under Sun4v Logical Domains. This driver adds a network interface for every vsw-port node diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c index 335b87660638..89952deae47f 100644 --- a/drivers/net/ethernet/sun/ldmvsw.c +++ b/drivers/net/ethernet/sun/ldmvsw.c @@ -41,11 +41,11 @@ static u8 vsw_port_hwaddr[ETH_ALEN] = {0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; #define DRV_MODULE_NAME "ldmvsw" -#define DRV_MODULE_VERSION "1.0" -#define DRV_MODULE_RELDATE "Jan 15, 2016" +#define DRV_MODULE_VERSION "1.1" +#define DRV_MODULE_RELDATE "February 3, 2017" static char version[] = - DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; + DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")"; MODULE_AUTHOR("Oracle"); MODULE_DESCRIPTION("Sun4v LDOM Virtual Switch Driver"); MODULE_LICENSE("GPL"); @@ -234,8 +234,7 @@ static struct net_device *vsw_alloc_netdev(u8 hwaddr[], dev->ethtool_ops = &vsw_ethtool_ops; dev->watchdog_timeo = VSW_TX_TIMEOUT; - dev->hw_features = NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GSO_SOFTWARE | - NETIF_F_HW_CSUM | NETIF_F_SG; + dev->hw_features = NETIF_F_HW_CSUM | NETIF_F_SG; dev->features = dev->hw_features; /* MTU range: 68 - 65535 */ @@ -259,11 +258,6 @@ static struct vio_driver_ops vsw_vio_ops = { .handshake_complete = sunvnet_handshake_complete_common, }; -static void print_version(void) -{ - printk_once(KERN_INFO "%s", version); -} - static const char *remote_macaddr_prop = "remote-mac-address"; static const char *id_prop = "id"; @@ -279,8 +273,6 @@ static int vsw_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) const u64 *port_id; u64 handle; - print_version(); - hp = mdesc_grab(); rmac = mdesc_get_property(hp, vdev->mp, remote_macaddr_prop, &len); @@ -327,7 +319,7 @@ static int vsw_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) port->vp = vp; port->dev = dev; port->switch_port = 1; - port->tso = true; + port->tso = false; /* no tso in vsw, misbehaves in bridge */ port->tsolen = 0; /* Mark the port as belonging to ldmvsw which directs the @@ -457,6 +449,7 @@ static struct vio_driver vsw_port_driver = { static int __init vsw_init(void) { + pr_info("%s\n", version); return vio_register_driver(&vsw_port_driver); } diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index f90d1af6d390..57978056b336 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -3786,7 +3786,7 @@ static int niu_poll(struct napi_struct *napi, int budget) work_done = niu_poll_core(np, lp, budget); if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); niu_ldg_rearm(np, lp, 1); } return work_done; @@ -6294,8 +6294,8 @@ no_rings: stats->tx_errors = errors; } -static struct rtnl_link_stats64 *niu_get_stats(struct net_device *dev, - struct rtnl_link_stats64 *stats) +static void niu_get_stats(struct net_device *dev, + struct rtnl_link_stats64 *stats) { struct niu *np = netdev_priv(dev); @@ -6303,8 +6303,6 @@ static struct rtnl_link_stats64 *niu_get_stats(struct net_device *dev, niu_get_rx_stats(np, stats); niu_get_tx_stats(np, stats); } - - return stats; } static void niu_load_hash_xmac(struct niu *np, u16 *hash) diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c index d277e4107976..5c5952e782cd 100644 --- a/drivers/net/ethernet/sun/sungem.c +++ b/drivers/net/ethernet/sun/sungem.c @@ -922,7 +922,7 @@ static int gem_poll(struct napi_struct *napi, int budget) gp->status = readl(gp->regs + GREG_STAT); } while (gp->status & GREG_STAT_NAPI); - napi_complete(napi); + napi_complete_done(napi, work_done); gem_enable_ints(gp); return work_done; diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index 5356a7074796..4cc2571f71c6 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c @@ -38,11 +38,11 @@ #define VNET_TX_TIMEOUT (5 * HZ) #define DRV_MODULE_NAME "sunvnet" -#define DRV_MODULE_VERSION "1.0" -#define DRV_MODULE_RELDATE "June 25, 2007" +#define DRV_MODULE_VERSION "2.0" +#define DRV_MODULE_RELDATE "February 3, 2017" static char version[] = - DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; + DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")"; MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); MODULE_DESCRIPTION("Sun LDOM virtual network driver"); MODULE_LICENSE("GPL"); @@ -303,11 +303,6 @@ static struct vio_driver_ops vnet_vio_ops = { .handshake_complete = sunvnet_handshake_complete_common, }; -static void print_version(void) -{ - printk_once(KERN_INFO "%s", version); -} - const char *remote_macaddr_prop = "remote-mac-address"; static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) @@ -319,8 +314,6 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) const u64 *rmac; int len, i, err, switch_port; - print_version(); - hp = mdesc_grab(); vp = vnet_find_parent(hp, vdev->mp, vdev); @@ -446,6 +439,7 @@ static struct vio_driver vnet_port_driver = { static int __init vnet_init(void) { + pr_info("%s\n", version); return vio_register_driver(&vnet_port_driver); } diff --git a/drivers/net/ethernet/sun/sunvnet_common.c b/drivers/net/ethernet/sun/sunvnet_common.c index 8878b75d68b4..fa2d11ca9b81 100644 --- a/drivers/net/ethernet/sun/sunvnet_common.c +++ b/drivers/net/ethernet/sun/sunvnet_common.c @@ -37,6 +37,11 @@ */ #define VNET_MAX_RETRIES 10 +MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); +MODULE_DESCRIPTION("Sun LDOM virtual network support library"); +MODULE_LICENSE("GPL"); +MODULE_VERSION("1.1"); + static int __vnet_tx_trigger(struct vnet_port *port, u32 start); static void vnet_port_reset(struct vnet_port *port); @@ -181,6 +186,7 @@ static int handle_attr_info(struct vio_driver_state *vio, } else { pkt->cflags &= ~VNET_LSO_IPV4_CAPAB; pkt->ipv4_lso_maxlen = 0; + port->tsolen = 0; } /* for version >= 1.6, ACK packet mode we support */ @@ -714,12 +720,8 @@ static void maybe_tx_wakeup(struct vnet_port *port) txq = netdev_get_tx_queue(VNET_PORT_TO_NET_DEVICE(port), port->q_index); __netif_tx_lock(txq, smp_processor_id()); - if (likely(netif_tx_queue_stopped(txq))) { - struct vio_dring_state *dr; - - dr = &port->vio.drings[VIO_DRIVER_TX_RING]; + if (likely(netif_tx_queue_stopped(txq))) netif_tx_wake_queue(txq); - } __netif_tx_unlock(txq); } @@ -737,41 +739,37 @@ static int vnet_event_napi(struct vnet_port *port, int budget) struct vio_driver_state *vio = &port->vio; int tx_wakeup, err; int npkts = 0; - int event = (port->rx_event & LDC_EVENT_RESET); - -ldc_ctrl: - if (unlikely(event == LDC_EVENT_RESET || - event == LDC_EVENT_UP)) { - vio_link_state_change(vio, event); - - if (event == LDC_EVENT_RESET) { - vnet_port_reset(port); - vio_port_up(vio); - - /* If the device is running but its tx queue was - * stopped (due to flow control), restart it. - * This is necessary since vnet_port_reset() - * clears the tx drings and thus we may never get - * back a VIO_TYPE_DATA ACK packet - which is - * the normal mechanism to restart the tx queue. - */ - if (netif_running(dev)) - maybe_tx_wakeup(port); - } + + /* we don't expect any other bits */ + BUG_ON(port->rx_event & ~(LDC_EVENT_DATA_READY | + LDC_EVENT_RESET | + LDC_EVENT_UP)); + + /* RESET takes precedent over any other event */ + if (port->rx_event & LDC_EVENT_RESET) { + vio_link_state_change(vio, LDC_EVENT_RESET); + vnet_port_reset(port); + vio_port_up(vio); + + /* If the device is running but its tx queue was + * stopped (due to flow control), restart it. + * This is necessary since vnet_port_reset() + * clears the tx drings and thus we may never get + * back a VIO_TYPE_DATA ACK packet - which is + * the normal mechanism to restart the tx queue. + */ + if (netif_running(dev)) + maybe_tx_wakeup(port); + port->rx_event = 0; return 0; } - /* We may have multiple LDC events in rx_event. Unroll send_events() */ - event = (port->rx_event & LDC_EVENT_UP); - port->rx_event &= ~(LDC_EVENT_RESET | LDC_EVENT_UP); - if (event == LDC_EVENT_UP) - goto ldc_ctrl; - event = port->rx_event; - if (!(event & LDC_EVENT_DATA_READY)) - return 0; - /* we dont expect any other bits than RESET, UP, DATA_READY */ - BUG_ON(event != LDC_EVENT_DATA_READY); + if (port->rx_event & LDC_EVENT_UP) { + vio_link_state_change(vio, LDC_EVENT_UP); + port->rx_event = 0; + return 0; + } err = 0; tx_wakeup = 0; @@ -794,25 +792,25 @@ ldc_ctrl: pkt->start_idx = vio_dring_next(dr, port->napi_stop_idx); pkt->end_idx = -1; - goto napi_resume; - } - err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf)); - if (unlikely(err < 0)) { - if (err == -ECONNRESET) - vio_conn_reset(vio); - break; + } else { + err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf)); + if (unlikely(err < 0)) { + if (err == -ECONNRESET) + vio_conn_reset(vio); + break; + } + if (err == 0) + break; + viodbg(DATA, "TAG [%02x:%02x:%04x:%08x]\n", + msgbuf.tag.type, + msgbuf.tag.stype, + msgbuf.tag.stype_env, + msgbuf.tag.sid); + err = vio_validate_sid(vio, &msgbuf.tag); + if (err < 0) + break; } - if (err == 0) - break; - viodbg(DATA, "TAG [%02x:%02x:%04x:%08x]\n", - msgbuf.tag.type, - msgbuf.tag.stype, - msgbuf.tag.stype_env, - msgbuf.tag.sid); - err = vio_validate_sid(vio, &msgbuf.tag); - if (err < 0) - break; -napi_resume: + if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) { if (msgbuf.tag.stype == VIO_SUBTYPE_INFO) { if (!sunvnet_port_is_up_common(port)) { @@ -860,7 +858,7 @@ int sunvnet_poll_common(struct napi_struct *napi, int budget) int processed = vnet_event_napi(port, budget); if (processed < budget) { - napi_complete(napi); + napi_complete_done(napi, processed); port->rx_event &= ~LDC_EVENT_DATA_READY; vio_set_intr(vio->vdev->rx_ino, HV_INTR_ENABLED); } @@ -1256,10 +1254,8 @@ int sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev, rcu_read_lock(); port = vnet_tx_port(skb, dev); - if (unlikely(!port)) { - rcu_read_unlock(); + if (unlikely(!port)) goto out_dropped; - } if (skb_is_gso(skb) && skb->len > port->tsolen) { err = vnet_handle_offloads(port, skb, vnet_tx_port); @@ -1284,7 +1280,6 @@ int sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev, fl4.saddr = ip_hdr(skb)->saddr; rt = ip_route_output_key(dev_net(dev), &fl4); - rcu_read_unlock(); if (!IS_ERR(rt)) { skb_dst_set(skb, &rt->dst); icmp_send(skb, ICMP_DEST_UNREACH, @@ -1426,6 +1421,7 @@ ldc_start_done: dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1); if (unlikely(vnet_tx_dring_avail(dr) < 1)) { netif_tx_stop_queue(txq); + smp_rmb(); if (vnet_tx_dring_avail(dr) > VNET_TX_WAKEUP_THRESH(dr)) netif_tx_wake_queue(txq); } @@ -1443,8 +1439,7 @@ out_dropped: jiffies + VNET_CLEAN_TIMEOUT); else if (port) del_timer(&port->clean_timer); - if (port) - rcu_read_unlock(); + rcu_read_unlock(); if (skb) dev_kfree_skb(skb); vnet_free_skbs(freeskbs); @@ -1641,7 +1636,7 @@ static void vnet_port_reset(struct vnet_port *port) del_timer(&port->clean_timer); sunvnet_port_free_tx_bufs_common(port); port->rmtu = 0; - port->tso = true; + port->tso = (port->vsw == 0); /* no tso in vsw, misbehaves in bridge */ port->tsolen = 0; } diff --git a/drivers/net/ethernet/synopsys/Kconfig b/drivers/net/ethernet/synopsys/Kconfig deleted file mode 100644 index 8276ee5a7d54..000000000000 --- a/drivers/net/ethernet/synopsys/Kconfig +++ /dev/null @@ -1,27 +0,0 @@ -# -# Synopsys network device configuration -# - -config NET_VENDOR_SYNOPSYS - bool "Synopsys devices" - default y - ---help--- - If you have a network (Ethernet) device belonging to this class, say Y. - - Note that the answer to this question doesn't directly affect the - kernel: saying N will just cause the configurator to skip all - the questions about Synopsys devices. If you say Y, you will be asked - for your specific device in the following questions. - -if NET_VENDOR_SYNOPSYS - -config SYNOPSYS_DWC_ETH_QOS - tristate "Sypnopsys DWC Ethernet QOS v4.10a support" - select PHYLIB - select CRC32 - select MII - depends on OF && HAS_DMA - ---help--- - This driver supports the DWC Ethernet QoS from Synopsys - -endif # NET_VENDOR_SYNOPSYS diff --git a/drivers/net/ethernet/synopsys/Makefile b/drivers/net/ethernet/synopsys/Makefile deleted file mode 100644 index 7a375723fc18..000000000000 --- a/drivers/net/ethernet/synopsys/Makefile +++ /dev/null @@ -1,5 +0,0 @@ -# -# Makefile for the Synopsys network device drivers. -# - -obj-$(CONFIG_SYNOPSYS_DWC_ETH_QOS) += dwc_eth_qos.o diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c deleted file mode 100644 index 09f5a67da35e..000000000000 --- a/drivers/net/ethernet/synopsys/dwc_eth_qos.c +++ /dev/null @@ -1,2998 +0,0 @@ -/* Synopsys DWC Ethernet Quality-of-Service v4.10a linux driver - * - * This is a driver for the Synopsys DWC Ethernet QoS IP version 4.10a (GMAC). - * This version introduced a lot of changes which breaks backwards - * compatibility the non-QoS IP from Synopsys (used in the ST Micro drivers). - * Some fields differ between version 4.00a and 4.10a, mainly the interrupt - * bit fields. The driver could be made compatible with 4.00, if all relevant - * HW erratas are handled. - * - * The GMAC is highly configurable at synthesis time. This driver has been - * developed for a subset of the total available feature set. Currently - * it supports: - * - TSO - * - Checksum offload for RX and TX. - * - Energy efficient ethernet. - * - GMII phy interface. - * - The statistics module. - * - Single RX and TX queue. - * - * Copyright (C) 2015 Axis Communications AB. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - */ - -#include <linux/clk.h> -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/init.h> -#include <linux/io.h> -#include <linux/ethtool.h> -#include <linux/stat.h> -#include <linux/types.h> - -#include <linux/slab.h> -#include <linux/delay.h> -#include <linux/mm.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/platform_device.h> - -#include <linux/phy.h> -#include <linux/mii.h> -#include <linux/dma-mapping.h> -#include <linux/vmalloc.h> - -#include <linux/device.h> -#include <linux/bitrev.h> -#include <linux/crc32.h> - -#include <linux/of.h> -#include <linux/interrupt.h> -#include <linux/clocksource.h> -#include <linux/net_tstamp.h> -#include <linux/pm_runtime.h> -#include <linux/of_net.h> -#include <linux/of_address.h> -#include <linux/of_mdio.h> -#include <linux/timer.h> -#include <linux/tcp.h> - -#define DRIVER_NAME "dwceqos" -#define DRIVER_DESCRIPTION "Synopsys DWC Ethernet QoS driver" -#define DRIVER_VERSION "0.9" - -#define DWCEQOS_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \ - NETIF_MSG_LINK | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP) - -#define DWCEQOS_TX_TIMEOUT 5 /* Seconds */ - -#define DWCEQOS_LPI_TIMER_MIN 8 -#define DWCEQOS_LPI_TIMER_MAX ((1 << 20) - 1) - -#define DWCEQOS_RX_BUF_SIZE 2048 - -#define DWCEQOS_RX_DCNT 256 -#define DWCEQOS_TX_DCNT 256 - -#define DWCEQOS_HASH_TABLE_SIZE 64 - -/* The size field in the DMA descriptor is 14 bits */ -#define BYTES_PER_DMA_DESC 16376 - -/* Hardware registers */ -#define START_MAC_REG_OFFSET 0x0000 -#define MAX_MAC_REG_OFFSET 0x0bd0 -#define START_MTL_REG_OFFSET 0x0c00 -#define MAX_MTL_REG_OFFSET 0x0d7c -#define START_DMA_REG_OFFSET 0x1000 -#define MAX_DMA_REG_OFFSET 0x117C - -#define REG_SPACE_SIZE 0x1800 - -/* DMA */ -#define REG_DWCEQOS_DMA_MODE 0x1000 -#define REG_DWCEQOS_DMA_SYSBUS_MODE 0x1004 -#define REG_DWCEQOS_DMA_IS 0x1008 -#define REG_DWCEQOS_DMA_DEBUG_ST0 0x100c - -/* DMA channel registers */ -#define REG_DWCEQOS_DMA_CH0_CTRL 0x1100 -#define REG_DWCEQOS_DMA_CH0_TX_CTRL 0x1104 -#define REG_DWCEQOS_DMA_CH0_RX_CTRL 0x1108 -#define REG_DWCEQOS_DMA_CH0_TXDESC_LIST 0x1114 -#define REG_DWCEQOS_DMA_CH0_RXDESC_LIST 0x111c -#define REG_DWCEQOS_DMA_CH0_TXDESC_TAIL 0x1120 -#define REG_DWCEQOS_DMA_CH0_RXDESC_TAIL 0x1128 -#define REG_DWCEQOS_DMA_CH0_TXDESC_LEN 0x112c -#define REG_DWCEQOS_DMA_CH0_RXDESC_LEN 0x1130 -#define REG_DWCEQOS_DMA_CH0_IE 0x1134 -#define REG_DWCEQOS_DMA_CH0_CUR_TXDESC 0x1144 -#define REG_DWCEQOS_DMA_CH0_CUR_RXDESC 0x114c -#define REG_DWCEQOS_DMA_CH0_CUR_TXBUF 0x1154 -#define REG_DWCEQOS_DMA_CH0_CUR_RXBUG 0x115c -#define REG_DWCEQOS_DMA_CH0_STA 0x1160 - -#define DWCEQOS_DMA_MODE_TXPR BIT(11) -#define DWCEQOS_DMA_MODE_DA BIT(1) - -#define DWCEQOS_DMA_SYSBUS_MODE_EN_LPI BIT(31) -#define DWCEQOS_DMA_SYSBUS_MODE_FB BIT(0) -#define DWCEQOS_DMA_SYSBUS_MODE_AAL BIT(12) - -#define DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT(x) \ - (((x) << 16) & 0x000F0000) -#define DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT_DEFAULT 3 -#define DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT_MASK GENMASK(19, 16) - -#define DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT(x) \ - (((x) << 24) & 0x0F000000) -#define DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT_DEFAULT 3 -#define DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT_MASK GENMASK(27, 24) - -#define DWCEQOS_DMA_SYSBUS_MODE_BURST_MASK GENMASK(7, 1) -#define DWCEQOS_DMA_SYSBUS_MODE_BURST(x) \ - (((x) << 1) & DWCEQOS_DMA_SYSBUS_MODE_BURST_MASK) -#define DWCEQOS_DMA_SYSBUS_MODE_BURST_DEFAULT GENMASK(3, 1) - -#define DWCEQOS_DMA_CH_CTRL_PBLX8 BIT(16) -#define DWCEQOS_DMA_CH_CTRL_DSL(x) ((x) << 18) - -#define DWCEQOS_DMA_CH_CTRL_PBL(x) ((x) << 16) -#define DWCEQOS_DMA_CH_CTRL_START BIT(0) -#define DWCEQOS_DMA_CH_RX_CTRL_BUFSIZE(x) ((x) << 1) -#define DWCEQOS_DMA_CH_TX_OSP BIT(4) -#define DWCEQOS_DMA_CH_TX_TSE BIT(12) - -#define DWCEQOS_DMA_CH0_IE_NIE BIT(15) -#define DWCEQOS_DMA_CH0_IE_AIE BIT(14) -#define DWCEQOS_DMA_CH0_IE_RIE BIT(6) -#define DWCEQOS_DMA_CH0_IE_TIE BIT(0) -#define DWCEQOS_DMA_CH0_IE_FBEE BIT(12) -#define DWCEQOS_DMA_CH0_IE_RBUE BIT(7) - -#define DWCEQOS_DMA_IS_DC0IS BIT(0) -#define DWCEQOS_DMA_IS_MTLIS BIT(16) -#define DWCEQOS_DMA_IS_MACIS BIT(17) - -#define DWCEQOS_DMA_CH0_IS_TI BIT(0) -#define DWCEQOS_DMA_CH0_IS_RI BIT(6) -#define DWCEQOS_DMA_CH0_IS_RBU BIT(7) -#define DWCEQOS_DMA_CH0_IS_FBE BIT(12) -#define DWCEQOS_DMA_CH0_IS_CDE BIT(13) -#define DWCEQOS_DMA_CH0_IS_AIS BIT(14) - -#define DWCEQOS_DMA_CH0_IS_TEB GENMASK(18, 16) -#define DWCEQOS_DMA_CH0_IS_TX_ERR_READ BIT(16) -#define DWCEQOS_DMA_CH0_IS_TX_ERR_DESCR BIT(17) - -#define DWCEQOS_DMA_CH0_IS_REB GENMASK(21, 19) -#define DWCEQOS_DMA_CH0_IS_RX_ERR_READ BIT(19) -#define DWCEQOS_DMA_CH0_IS_RX_ERR_DESCR BIT(20) - -/* DMA descriptor bits for RX normal descriptor (read format) */ -#define DWCEQOS_DMA_RDES3_OWN BIT(31) -#define DWCEQOS_DMA_RDES3_INTE BIT(30) -#define DWCEQOS_DMA_RDES3_BUF2V BIT(25) -#define DWCEQOS_DMA_RDES3_BUF1V BIT(24) - -/* DMA descriptor bits for RX normal descriptor (write back format) */ -#define DWCEQOS_DMA_RDES1_IPCE BIT(7) -#define DWCEQOS_DMA_RDES3_ES BIT(15) -#define DWCEQOS_DMA_RDES3_E_JT BIT(14) -#define DWCEQOS_DMA_RDES3_PL(x) ((x) & 0x7fff) -#define DWCEQOS_DMA_RDES1_PT 0x00000007 -#define DWCEQOS_DMA_RDES1_PT_UDP BIT(0) -#define DWCEQOS_DMA_RDES1_PT_TCP BIT(1) -#define DWCEQOS_DMA_RDES1_PT_ICMP 0x00000003 - -/* DMA descriptor bits for TX normal descriptor (read format) */ -#define DWCEQOS_DMA_TDES2_IOC BIT(31) -#define DWCEQOS_DMA_TDES3_OWN BIT(31) -#define DWCEQOS_DMA_TDES3_CTXT BIT(30) -#define DWCEQOS_DMA_TDES3_FD BIT(29) -#define DWCEQOS_DMA_TDES3_LD BIT(28) -#define DWCEQOS_DMA_TDES3_CIPH BIT(16) -#define DWCEQOS_DMA_TDES3_CIPP BIT(17) -#define DWCEQOS_DMA_TDES3_CA 0x00030000 -#define DWCEQOS_DMA_TDES3_TSE BIT(18) -#define DWCEQOS_DMA_DES3_THL(x) ((x) << 19) -#define DWCEQOS_DMA_DES2_B2L(x) ((x) << 16) - -#define DWCEQOS_DMA_TDES3_TCMSSV BIT(26) - -/* DMA channel states */ -#define DMA_TX_CH_STOPPED 0 -#define DMA_TX_CH_SUSPENDED 6 - -#define DMA_GET_TX_STATE_CH0(status0) ((status0 & 0xF000) >> 12) - -/* MTL */ -#define REG_DWCEQOS_MTL_OPER 0x0c00 -#define REG_DWCEQOS_MTL_DEBUG_ST 0x0c0c -#define REG_DWCEQOS_MTL_TXQ0_DEBUG_ST 0x0d08 -#define REG_DWCEQOS_MTL_RXQ0_DEBUG_ST 0x0d38 - -#define REG_DWCEQOS_MTL_IS 0x0c20 -#define REG_DWCEQOS_MTL_TXQ0_OPER 0x0d00 -#define REG_DWCEQOS_MTL_RXQ0_OPER 0x0d30 -#define REG_DWCEQOS_MTL_RXQ0_MIS_CNT 0x0d34 -#define REG_DWCEQOS_MTL_RXQ0_CTRL 0x0d3c - -#define REG_DWCEQOS_MTL_Q0_ISCTRL 0x0d2c - -#define DWCEQOS_MTL_SCHALG_STRICT 0x00000060 - -#define DWCEQOS_MTL_TXQ_TXQEN BIT(3) -#define DWCEQOS_MTL_TXQ_TSF BIT(1) -#define DWCEQOS_MTL_TXQ_FTQ BIT(0) -#define DWCEQOS_MTL_TXQ_TTC512 0x00000070 - -#define DWCEQOS_MTL_TXQ_SIZE(x) ((((x) - 256) & 0xff00) << 8) - -#define DWCEQOS_MTL_RXQ_SIZE(x) ((((x) - 256) & 0xff00) << 12) -#define DWCEQOS_MTL_RXQ_EHFC BIT(7) -#define DWCEQOS_MTL_RXQ_DIS_TCP_EF BIT(6) -#define DWCEQOS_MTL_RXQ_FEP BIT(4) -#define DWCEQOS_MTL_RXQ_FUP BIT(3) -#define DWCEQOS_MTL_RXQ_RSF BIT(5) -#define DWCEQOS_MTL_RXQ_RTC32 BIT(0) - -/* MAC */ -#define REG_DWCEQOS_MAC_CFG 0x0000 -#define REG_DWCEQOS_MAC_EXT_CFG 0x0004 -#define REG_DWCEQOS_MAC_PKT_FILT 0x0008 -#define REG_DWCEQOS_MAC_WD_TO 0x000c -#define REG_DWCEQOS_HASTABLE_LO 0x0010 -#define REG_DWCEQOS_HASTABLE_HI 0x0014 -#define REG_DWCEQOS_MAC_IS 0x00b0 -#define REG_DWCEQOS_MAC_IE 0x00b4 -#define REG_DWCEQOS_MAC_STAT 0x00b8 -#define REG_DWCEQOS_MAC_MDIO_ADDR 0x0200 -#define REG_DWCEQOS_MAC_MDIO_DATA 0x0204 -#define REG_DWCEQOS_MAC_MAC_ADDR0_HI 0x0300 -#define REG_DWCEQOS_MAC_MAC_ADDR0_LO 0x0304 -#define REG_DWCEQOS_MAC_RXQ0_CTRL0 0x00a0 -#define REG_DWCEQOS_MAC_HW_FEATURE0 0x011c -#define REG_DWCEQOS_MAC_HW_FEATURE1 0x0120 -#define REG_DWCEQOS_MAC_HW_FEATURE2 0x0124 -#define REG_DWCEQOS_MAC_HASHTABLE_LO 0x0010 -#define REG_DWCEQOS_MAC_HASHTABLE_HI 0x0014 -#define REG_DWCEQOS_MAC_LPI_CTRL_STATUS 0x00d0 -#define REG_DWCEQOS_MAC_LPI_TIMERS_CTRL 0x00d4 -#define REG_DWCEQOS_MAC_LPI_ENTRY_TIMER 0x00d8 -#define REG_DWCEQOS_MAC_1US_TIC_COUNTER 0x00dc -#define REG_DWCEQOS_MAC_RX_FLOW_CTRL 0x0090 -#define REG_DWCEQOS_MAC_Q0_TX_FLOW 0x0070 - -#define DWCEQOS_MAC_CFG_ACS BIT(20) -#define DWCEQOS_MAC_CFG_JD BIT(17) -#define DWCEQOS_MAC_CFG_JE BIT(16) -#define DWCEQOS_MAC_CFG_PS BIT(15) -#define DWCEQOS_MAC_CFG_FES BIT(14) -#define DWCEQOS_MAC_CFG_DM BIT(13) -#define DWCEQOS_MAC_CFG_DO BIT(10) -#define DWCEQOS_MAC_CFG_TE BIT(1) -#define DWCEQOS_MAC_CFG_IPC BIT(27) -#define DWCEQOS_MAC_CFG_RE BIT(0) - -#define DWCEQOS_ADDR_HIGH(reg) (0x00000300 + (reg * 8)) -#define DWCEQOS_ADDR_LOW(reg) (0x00000304 + (reg * 8)) - -#define DWCEQOS_MAC_IS_LPI_INT BIT(5) -#define DWCEQOS_MAC_IS_MMC_INT BIT(8) - -#define DWCEQOS_MAC_RXQ_EN BIT(1) -#define DWCEQOS_MAC_MAC_ADDR_HI_EN BIT(31) -#define DWCEQOS_MAC_PKT_FILT_RA BIT(31) -#define DWCEQOS_MAC_PKT_FILT_HPF BIT(10) -#define DWCEQOS_MAC_PKT_FILT_SAF BIT(9) -#define DWCEQOS_MAC_PKT_FILT_SAIF BIT(8) -#define DWCEQOS_MAC_PKT_FILT_DBF BIT(5) -#define DWCEQOS_MAC_PKT_FILT_PM BIT(4) -#define DWCEQOS_MAC_PKT_FILT_DAIF BIT(3) -#define DWCEQOS_MAC_PKT_FILT_HMC BIT(2) -#define DWCEQOS_MAC_PKT_FILT_HUC BIT(1) -#define DWCEQOS_MAC_PKT_FILT_PR BIT(0) - -#define DWCEQOS_MAC_MDIO_ADDR_CR(x) (((x & 15)) << 8) -#define DWCEQOS_MAC_MDIO_ADDR_CR_20 2 -#define DWCEQOS_MAC_MDIO_ADDR_CR_35 3 -#define DWCEQOS_MAC_MDIO_ADDR_CR_60 0 -#define DWCEQOS_MAC_MDIO_ADDR_CR_100 1 -#define DWCEQOS_MAC_MDIO_ADDR_CR_150 4 -#define DWCEQOS_MAC_MDIO_ADDR_CR_250 5 -#define DWCEQOS_MAC_MDIO_ADDR_GOC_READ 0x0000000c -#define DWCEQOS_MAC_MDIO_ADDR_GOC_WRITE BIT(2) -#define DWCEQOS_MAC_MDIO_ADDR_GB BIT(0) - -#define DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIEN BIT(0) -#define DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIEX BIT(1) -#define DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIEN BIT(2) -#define DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIEX BIT(3) -#define DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIST BIT(8) -#define DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIST BIT(9) -#define DWCEQOS_MAC_LPI_CTRL_STATUS_LPIEN BIT(16) -#define DWCEQOS_MAC_LPI_CTRL_STATUS_PLS BIT(17) -#define DWCEQOS_MAC_LPI_CTRL_STATUS_PLSEN BIT(18) -#define DWCEQOS_MAC_LPI_CTRL_STATUS_LIPTXA BIT(19) -#define DWCEQOS_MAC_LPI_CTRL_STATUS_LPITE BIT(20) -#define DWCEQOS_MAC_LPI_CTRL_STATUS_LPITCSE BIT(21) - -#define DWCEQOS_MAC_1US_TIC_COUNTER_VAL(x) ((x) & GENMASK(11, 0)) - -#define DWCEQOS_LPI_CTRL_ENABLE_EEE (DWCEQOS_MAC_LPI_CTRL_STATUS_LPITE | \ - DWCEQOS_MAC_LPI_CTRL_STATUS_LIPTXA | \ - DWCEQOS_MAC_LPI_CTRL_STATUS_LPIEN) - -#define DWCEQOS_MAC_RX_FLOW_CTRL_RFE BIT(0) - -#define DWCEQOS_MAC_Q0_TX_FLOW_TFE BIT(1) -#define DWCEQOS_MAC_Q0_TX_FLOW_PT(time) ((time) << 16) -#define DWCEQOS_MAC_Q0_TX_FLOW_PLT_4_SLOTS (0 << 4) - -/* Features */ -#define DWCEQOS_MAC_HW_FEATURE0_RXCOESEL BIT(16) -#define DWCEQOS_MAC_HW_FEATURE0_TXCOESEL BIT(14) -#define DWCEQOS_MAC_HW_FEATURE0_HDSEL BIT(2) -#define DWCEQOS_MAC_HW_FEATURE0_EEESEL BIT(13) -#define DWCEQOS_MAC_HW_FEATURE0_GMIISEL BIT(1) -#define DWCEQOS_MAC_HW_FEATURE0_MIISEL BIT(0) - -#define DWCEQOS_MAC_HW_FEATURE1_TSOEN BIT(18) -#define DWCEQOS_MAC_HW_FEATURE1_TXFIFOSIZE(x) ((128 << ((x) & 0x7c0)) >> 6) -#define DWCEQOS_MAC_HW_FEATURE1_RXFIFOSIZE(x) (128 << ((x) & 0x1f)) - -#define DWCEQOS_MAX_PERFECT_ADDRESSES(feature1) \ - (1 + (((feature1) & 0x1fc0000) >> 18)) - -#define DWCEQOS_MDIO_PHYADDR(x) (((x) & 0x1f) << 21) -#define DWCEQOS_MDIO_PHYREG(x) (((x) & 0x1f) << 16) - -#define DWCEQOS_DMA_MODE_SWR BIT(0) - -#define DWCEQOS_DWCEQOS_RX_BUF_SIZE 2048 - -/* Mac Management Counters */ -#define REG_DWCEQOS_MMC_CTRL 0x0700 -#define REG_DWCEQOS_MMC_RXIRQ 0x0704 -#define REG_DWCEQOS_MMC_TXIRQ 0x0708 -#define REG_DWCEQOS_MMC_RXIRQMASK 0x070c -#define REG_DWCEQOS_MMC_TXIRQMASK 0x0710 - -#define DWCEQOS_MMC_CTRL_CNTRST BIT(0) -#define DWCEQOS_MMC_CTRL_RSTONRD BIT(2) - -#define DWC_MMC_TXLPITRANSCNTR 0x07F0 -#define DWC_MMC_TXLPIUSCNTR 0x07EC -#define DWC_MMC_TXOVERSIZE_G 0x0778 -#define DWC_MMC_TXVLANPACKETS_G 0x0774 -#define DWC_MMC_TXPAUSEPACKETS 0x0770 -#define DWC_MMC_TXEXCESSDEF 0x076C -#define DWC_MMC_TXPACKETCOUNT_G 0x0768 -#define DWC_MMC_TXOCTETCOUNT_G 0x0764 -#define DWC_MMC_TXCARRIERERROR 0x0760 -#define DWC_MMC_TXEXCESSCOL 0x075C -#define DWC_MMC_TXLATECOL 0x0758 -#define DWC_MMC_TXDEFERRED 0x0754 -#define DWC_MMC_TXMULTICOL_G 0x0750 -#define DWC_MMC_TXSINGLECOL_G 0x074C -#define DWC_MMC_TXUNDERFLOWERROR 0x0748 -#define DWC_MMC_TXBROADCASTPACKETS_GB 0x0744 -#define DWC_MMC_TXMULTICASTPACKETS_GB 0x0740 -#define DWC_MMC_TXUNICASTPACKETS_GB 0x073C -#define DWC_MMC_TX1024TOMAXOCTETS_GB 0x0738 -#define DWC_MMC_TX512TO1023OCTETS_GB 0x0734 -#define DWC_MMC_TX256TO511OCTETS_GB 0x0730 -#define DWC_MMC_TX128TO255OCTETS_GB 0x072C -#define DWC_MMC_TX65TO127OCTETS_GB 0x0728 -#define DWC_MMC_TX64OCTETS_GB 0x0724 -#define DWC_MMC_TXMULTICASTPACKETS_G 0x0720 -#define DWC_MMC_TXBROADCASTPACKETS_G 0x071C -#define DWC_MMC_TXPACKETCOUNT_GB 0x0718 -#define DWC_MMC_TXOCTETCOUNT_GB 0x0714 - -#define DWC_MMC_RXLPITRANSCNTR 0x07F8 -#define DWC_MMC_RXLPIUSCNTR 0x07F4 -#define DWC_MMC_RXCTRLPACKETS_G 0x07E4 -#define DWC_MMC_RXRCVERROR 0x07E0 -#define DWC_MMC_RXWATCHDOG 0x07DC -#define DWC_MMC_RXVLANPACKETS_GB 0x07D8 -#define DWC_MMC_RXFIFOOVERFLOW 0x07D4 -#define DWC_MMC_RXPAUSEPACKETS 0x07D0 -#define DWC_MMC_RXOUTOFRANGETYPE 0x07CC -#define DWC_MMC_RXLENGTHERROR 0x07C8 -#define DWC_MMC_RXUNICASTPACKETS_G 0x07C4 -#define DWC_MMC_RX1024TOMAXOCTETS_GB 0x07C0 -#define DWC_MMC_RX512TO1023OCTETS_GB 0x07BC -#define DWC_MMC_RX256TO511OCTETS_GB 0x07B8 -#define DWC_MMC_RX128TO255OCTETS_GB 0x07B4 -#define DWC_MMC_RX65TO127OCTETS_GB 0x07B0 -#define DWC_MMC_RX64OCTETS_GB 0x07AC -#define DWC_MMC_RXOVERSIZE_G 0x07A8 -#define DWC_MMC_RXUNDERSIZE_G 0x07A4 -#define DWC_MMC_RXJABBERERROR 0x07A0 -#define DWC_MMC_RXRUNTERROR 0x079C -#define DWC_MMC_RXALIGNMENTERROR 0x0798 -#define DWC_MMC_RXCRCERROR 0x0794 -#define DWC_MMC_RXMULTICASTPACKETS_G 0x0790 -#define DWC_MMC_RXBROADCASTPACKETS_G 0x078C -#define DWC_MMC_RXOCTETCOUNT_G 0x0788 -#define DWC_MMC_RXOCTETCOUNT_GB 0x0784 -#define DWC_MMC_RXPACKETCOUNT_GB 0x0780 - -static int debug = -1; -module_param(debug, int, 0); -MODULE_PARM_DESC(debug, "DWC_eth_qos debug level (0=none,...,16=all)"); - -/* DMA ring descriptor. These are used as support descriptors for the HW DMA */ -struct ring_desc { - struct sk_buff *skb; - dma_addr_t mapping; - size_t len; -}; - -/* DMA hardware descriptor */ -struct dwceqos_dma_desc { - u32 des0; - u32 des1; - u32 des2; - u32 des3; -} ____cacheline_aligned; - -struct dwceqos_mmc_counters { - __u64 txlpitranscntr; - __u64 txpiuscntr; - __u64 txoversize_g; - __u64 txvlanpackets_g; - __u64 txpausepackets; - __u64 txexcessdef; - __u64 txpacketcount_g; - __u64 txoctetcount_g; - __u64 txcarriererror; - __u64 txexcesscol; - __u64 txlatecol; - __u64 txdeferred; - __u64 txmulticol_g; - __u64 txsinglecol_g; - __u64 txunderflowerror; - __u64 txbroadcastpackets_gb; - __u64 txmulticastpackets_gb; - __u64 txunicastpackets_gb; - __u64 tx1024tomaxoctets_gb; - __u64 tx512to1023octets_gb; - __u64 tx256to511octets_gb; - __u64 tx128to255octets_gb; - __u64 tx65to127octets_gb; - __u64 tx64octets_gb; - __u64 txmulticastpackets_g; - __u64 txbroadcastpackets_g; - __u64 txpacketcount_gb; - __u64 txoctetcount_gb; - - __u64 rxlpitranscntr; - __u64 rxlpiuscntr; - __u64 rxctrlpackets_g; - __u64 rxrcverror; - __u64 rxwatchdog; - __u64 rxvlanpackets_gb; - __u64 rxfifooverflow; - __u64 rxpausepackets; - __u64 rxoutofrangetype; - __u64 rxlengtherror; - __u64 rxunicastpackets_g; - __u64 rx1024tomaxoctets_gb; - __u64 rx512to1023octets_gb; - __u64 rx256to511octets_gb; - __u64 rx128to255octets_gb; - __u64 rx65to127octets_gb; - __u64 rx64octets_gb; - __u64 rxoversize_g; - __u64 rxundersize_g; - __u64 rxjabbererror; - __u64 rxrunterror; - __u64 rxalignmenterror; - __u64 rxcrcerror; - __u64 rxmulticastpackets_g; - __u64 rxbroadcastpackets_g; - __u64 rxoctetcount_g; - __u64 rxoctetcount_gb; - __u64 rxpacketcount_gb; -}; - -/* Ethtool statistics */ - -struct dwceqos_stat { - const char stat_name[ETH_GSTRING_LEN]; - int offset; -}; - -#define STAT_ITEM(name, var) \ - {\ - name,\ - offsetof(struct dwceqos_mmc_counters, var),\ - } - -static const struct dwceqos_stat dwceqos_ethtool_stats[] = { - STAT_ITEM("tx_bytes", txoctetcount_gb), - STAT_ITEM("tx_packets", txpacketcount_gb), - STAT_ITEM("tx_unicst_packets", txunicastpackets_gb), - STAT_ITEM("tx_broadcast_packets", txbroadcastpackets_gb), - STAT_ITEM("tx_multicast_packets", txmulticastpackets_gb), - STAT_ITEM("tx_pause_packets", txpausepackets), - STAT_ITEM("tx_up_to_64_byte_packets", tx64octets_gb), - STAT_ITEM("tx_65_to_127_byte_packets", tx65to127octets_gb), - STAT_ITEM("tx_128_to_255_byte_packets", tx128to255octets_gb), - STAT_ITEM("tx_256_to_511_byte_packets", tx256to511octets_gb), - STAT_ITEM("tx_512_to_1023_byte_packets", tx512to1023octets_gb), - STAT_ITEM("tx_1024_to_maxsize_packets", tx1024tomaxoctets_gb), - STAT_ITEM("tx_underflow_errors", txunderflowerror), - STAT_ITEM("tx_lpi_count", txlpitranscntr), - - STAT_ITEM("rx_bytes", rxoctetcount_gb), - STAT_ITEM("rx_packets", rxpacketcount_gb), - STAT_ITEM("rx_unicast_packets", rxunicastpackets_g), - STAT_ITEM("rx_broadcast_packets", rxbroadcastpackets_g), - STAT_ITEM("rx_multicast_packets", rxmulticastpackets_g), - STAT_ITEM("rx_vlan_packets", rxvlanpackets_gb), - STAT_ITEM("rx_pause_packets", rxpausepackets), - STAT_ITEM("rx_up_to_64_byte_packets", rx64octets_gb), - STAT_ITEM("rx_65_to_127_byte_packets", rx65to127octets_gb), - STAT_ITEM("rx_128_to_255_byte_packets", rx128to255octets_gb), - STAT_ITEM("rx_256_to_511_byte_packets", rx256to511octets_gb), - STAT_ITEM("rx_512_to_1023_byte_packets", rx512to1023octets_gb), - STAT_ITEM("rx_1024_to_maxsize_packets", rx1024tomaxoctets_gb), - STAT_ITEM("rx_fifo_overflow_errors", rxfifooverflow), - STAT_ITEM("rx_oversize_packets", rxoversize_g), - STAT_ITEM("rx_undersize_packets", rxundersize_g), - STAT_ITEM("rx_jabbers", rxjabbererror), - STAT_ITEM("rx_align_errors", rxalignmenterror), - STAT_ITEM("rx_crc_errors", rxcrcerror), - STAT_ITEM("rx_lpi_count", rxlpitranscntr), -}; - -/* Configuration of AXI bus parameters. - * These values depend on the parameters set on the MAC core as well - * as the AXI interconnect. - */ -struct dwceqos_bus_cfg { - /* Enable AXI low-power interface. */ - bool en_lpi; - /* Limit on number of outstanding AXI write requests. */ - u32 write_requests; - /* Limit on number of outstanding AXI read requests. */ - u32 read_requests; - /* Bitmap of allowed AXI burst lengths, 4-256 beats. */ - u32 burst_map; - /* DMA Programmable burst length*/ - u32 tx_pbl; - u32 rx_pbl; -}; - -struct dwceqos_flowcontrol { - int autoneg; - int rx; - int rx_current; - int tx; - int tx_current; -}; - -struct net_local { - void __iomem *baseaddr; - struct clk *phy_ref_clk; - struct clk *apb_pclk; - - struct device_node *phy_node; - struct net_device *ndev; - struct platform_device *pdev; - - u32 msg_enable; - - struct tasklet_struct tx_bdreclaim_tasklet; - struct workqueue_struct *txtimeout_handler_wq; - struct work_struct txtimeout_reinit; - - phy_interface_t phy_interface; - struct mii_bus *mii_bus; - - unsigned int link; - unsigned int speed; - unsigned int duplex; - - struct napi_struct napi; - - /* DMA Descriptor Areas */ - struct ring_desc *rx_skb; - struct ring_desc *tx_skb; - - struct dwceqos_dma_desc *tx_descs; - struct dwceqos_dma_desc *rx_descs; - - /* DMA Mapped Descriptor areas*/ - dma_addr_t tx_descs_addr; - dma_addr_t rx_descs_addr; - dma_addr_t tx_descs_tail_addr; - dma_addr_t rx_descs_tail_addr; - - size_t tx_free; - size_t tx_next; - size_t rx_cur; - size_t tx_cur; - - /* Spinlocks for accessing DMA Descriptors */ - spinlock_t tx_lock; - - /* Spinlock for register read-modify-writes. */ - spinlock_t hw_lock; - - u32 feature0; - u32 feature1; - u32 feature2; - - struct dwceqos_bus_cfg bus_cfg; - bool en_tx_lpi_clockgating; - - int eee_enabled; - int eee_active; - int csr_val; - u32 gso_size; - - struct dwceqos_mmc_counters mmc_counters; - /* Protect the mmc_counter updates. */ - spinlock_t stats_lock; - u32 mmc_rx_counters_mask; - u32 mmc_tx_counters_mask; - - struct dwceqos_flowcontrol flowcontrol; - - /* Tracks the intermediate state of phy started but hardware - * init not finished yet. - */ - bool phy_defer; -}; - -static void dwceqos_read_mmc_counters(struct net_local *lp, u32 rx_mask, - u32 tx_mask); - -static void dwceqos_set_umac_addr(struct net_local *lp, unsigned char *addr, - unsigned int reg_n); -static int dwceqos_stop(struct net_device *ndev); -static int dwceqos_open(struct net_device *ndev); -static void dwceqos_tx_poll_demand(struct net_local *lp); - -static void dwceqos_set_rx_flowcontrol(struct net_local *lp, bool enable); -static void dwceqos_set_tx_flowcontrol(struct net_local *lp, bool enable); - -static void dwceqos_reset_state(struct net_local *lp); - -#define dwceqos_read(lp, reg) \ - readl_relaxed(((void __iomem *)((lp)->baseaddr)) + (reg)) -#define dwceqos_write(lp, reg, val) \ - writel_relaxed((val), ((void __iomem *)((lp)->baseaddr)) + (reg)) - -static void dwceqos_reset_state(struct net_local *lp) -{ - lp->link = 0; - lp->speed = 0; - lp->duplex = DUPLEX_UNKNOWN; - lp->flowcontrol.rx_current = 0; - lp->flowcontrol.tx_current = 0; - lp->eee_active = 0; - lp->eee_enabled = 0; -} - -static void print_descriptor(struct net_local *lp, int index, int tx) -{ - struct dwceqos_dma_desc *dd; - - if (tx) - dd = (struct dwceqos_dma_desc *)&lp->tx_descs[index]; - else - dd = (struct dwceqos_dma_desc *)&lp->rx_descs[index]; - - pr_info("%s DMA Descriptor #%d@%p Contents:\n", tx ? "TX" : "RX", - index, dd); - pr_info("0x%08x 0x%08x 0x%08x 0x%08x\n", dd->des0, dd->des1, dd->des2, - dd->des3); -} - -static void print_status(struct net_local *lp) -{ - size_t desci, i; - - pr_info("tx_free %zu, tx_cur %zu, tx_next %zu\n", lp->tx_free, - lp->tx_cur, lp->tx_next); - - print_descriptor(lp, lp->rx_cur, 0); - - for (desci = (lp->tx_cur - 10) % DWCEQOS_TX_DCNT, i = 0; - i < DWCEQOS_TX_DCNT; - ++i) { - print_descriptor(lp, desci, 1); - desci = (desci + 1) % DWCEQOS_TX_DCNT; - } - - pr_info("DMA_Debug_Status0: 0x%08x\n", - dwceqos_read(lp, REG_DWCEQOS_DMA_DEBUG_ST0)); - pr_info("DMA_CH0_Status: 0x%08x\n", - dwceqos_read(lp, REG_DWCEQOS_DMA_IS)); - pr_info("DMA_CH0_Current_App_TxDesc: 0x%08x\n", - dwceqos_read(lp, 0x1144)); - pr_info("DMA_CH0_Current_App_TxBuff: 0x%08x\n", - dwceqos_read(lp, 0x1154)); - pr_info("MTL_Debug_Status: 0x%08x\n", - dwceqos_read(lp, REG_DWCEQOS_MTL_DEBUG_ST)); - pr_info("MTL_TXQ0_Debug_Status: 0x%08x\n", - dwceqos_read(lp, REG_DWCEQOS_MTL_TXQ0_DEBUG_ST)); - pr_info("MTL_RXQ0_Debug_Status: 0x%08x\n", - dwceqos_read(lp, REG_DWCEQOS_MTL_RXQ0_DEBUG_ST)); - pr_info("Current TX DMA: 0x%08x, RX DMA: 0x%08x\n", - dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_CUR_TXDESC), - dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_CUR_RXDESC)); -} - -static void dwceqos_mdio_set_csr(struct net_local *lp) -{ - int rate = clk_get_rate(lp->apb_pclk); - - if (rate <= 20000000) - lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_20; - else if (rate <= 35000000) - lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_35; - else if (rate <= 60000000) - lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_60; - else if (rate <= 100000000) - lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_100; - else if (rate <= 150000000) - lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_150; - else if (rate <= 250000000) - lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_250; -} - -/* Simple MDIO functions implementing mii_bus */ -static int dwceqos_mdio_read(struct mii_bus *bus, int mii_id, int phyreg) -{ - struct net_local *lp = bus->priv; - u32 regval; - int i; - int data; - - regval = DWCEQOS_MDIO_PHYADDR(mii_id) | - DWCEQOS_MDIO_PHYREG(phyreg) | - DWCEQOS_MAC_MDIO_ADDR_CR(lp->csr_val) | - DWCEQOS_MAC_MDIO_ADDR_GB | - DWCEQOS_MAC_MDIO_ADDR_GOC_READ; - dwceqos_write(lp, REG_DWCEQOS_MAC_MDIO_ADDR, regval); - - for (i = 0; i < 5; ++i) { - usleep_range(64, 128); - if (!(dwceqos_read(lp, REG_DWCEQOS_MAC_MDIO_ADDR) & - DWCEQOS_MAC_MDIO_ADDR_GB)) - break; - } - - data = dwceqos_read(lp, REG_DWCEQOS_MAC_MDIO_DATA); - if (i == 5) { - netdev_warn(lp->ndev, "MDIO read timed out\n"); - data = 0xffff; - } - - return data & 0xffff; -} - -static int dwceqos_mdio_write(struct mii_bus *bus, int mii_id, int phyreg, - u16 value) -{ - struct net_local *lp = bus->priv; - u32 regval; - int i; - - dwceqos_write(lp, REG_DWCEQOS_MAC_MDIO_DATA, value); - - regval = DWCEQOS_MDIO_PHYADDR(mii_id) | - DWCEQOS_MDIO_PHYREG(phyreg) | - DWCEQOS_MAC_MDIO_ADDR_CR(lp->csr_val) | - DWCEQOS_MAC_MDIO_ADDR_GB | - DWCEQOS_MAC_MDIO_ADDR_GOC_WRITE; - dwceqos_write(lp, REG_DWCEQOS_MAC_MDIO_ADDR, regval); - - for (i = 0; i < 5; ++i) { - usleep_range(64, 128); - if (!(dwceqos_read(lp, REG_DWCEQOS_MAC_MDIO_ADDR) & - DWCEQOS_MAC_MDIO_ADDR_GB)) - break; - } - if (i == 5) - netdev_warn(lp->ndev, "MDIO write timed out\n"); - return 0; -} - -static int dwceqos_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) -{ - struct net_local *lp = netdev_priv(ndev); - struct phy_device *phydev = ndev->phydev; - - if (!netif_running(ndev)) - return -EINVAL; - - if (!phydev) - return -ENODEV; - - switch (cmd) { - case SIOCGMIIPHY: - case SIOCGMIIREG: - case SIOCSMIIREG: - return phy_mii_ioctl(phydev, rq, cmd); - default: - dev_info(&lp->pdev->dev, "ioctl %X not implemented.\n", cmd); - return -EOPNOTSUPP; - } -} - -static void dwceqos_link_down(struct net_local *lp) -{ - u32 regval; - unsigned long flags; - - /* Indicate link down to the LPI state machine */ - spin_lock_irqsave(&lp->hw_lock, flags); - regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS); - regval &= ~DWCEQOS_MAC_LPI_CTRL_STATUS_PLS; - dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval); - spin_unlock_irqrestore(&lp->hw_lock, flags); -} - -static void dwceqos_link_up(struct net_local *lp) -{ - struct net_device *ndev = lp->ndev; - u32 regval; - unsigned long flags; - - /* Indicate link up to the LPI state machine */ - spin_lock_irqsave(&lp->hw_lock, flags); - regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS); - regval |= DWCEQOS_MAC_LPI_CTRL_STATUS_PLS; - dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval); - spin_unlock_irqrestore(&lp->hw_lock, flags); - - lp->eee_active = !phy_init_eee(ndev->phydev, 0); - - /* Check for changed EEE capability */ - if (!lp->eee_active && lp->eee_enabled) { - lp->eee_enabled = 0; - - spin_lock_irqsave(&lp->hw_lock, flags); - regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS); - regval &= ~DWCEQOS_LPI_CTRL_ENABLE_EEE; - dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval); - spin_unlock_irqrestore(&lp->hw_lock, flags); - } -} - -static void dwceqos_set_speed(struct net_local *lp) -{ - struct net_device *ndev = lp->ndev; - struct phy_device *phydev = ndev->phydev; - u32 regval; - - regval = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG); - regval &= ~(DWCEQOS_MAC_CFG_PS | DWCEQOS_MAC_CFG_FES | - DWCEQOS_MAC_CFG_DM); - - if (phydev->duplex) - regval |= DWCEQOS_MAC_CFG_DM; - if (phydev->speed == SPEED_10) { - regval |= DWCEQOS_MAC_CFG_PS; - } else if (phydev->speed == SPEED_100) { - regval |= DWCEQOS_MAC_CFG_PS | - DWCEQOS_MAC_CFG_FES; - } else if (phydev->speed != SPEED_1000) { - netdev_err(lp->ndev, - "unknown PHY speed %d\n", - phydev->speed); - return; - } - - dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, regval); -} - -static void dwceqos_adjust_link(struct net_device *ndev) -{ - struct net_local *lp = netdev_priv(ndev); - struct phy_device *phydev = ndev->phydev; - int status_change = 0; - - if (lp->phy_defer) - return; - - if (phydev->link) { - if ((lp->speed != phydev->speed) || - (lp->duplex != phydev->duplex)) { - dwceqos_set_speed(lp); - - lp->speed = phydev->speed; - lp->duplex = phydev->duplex; - status_change = 1; - } - - if (lp->flowcontrol.autoneg) { - lp->flowcontrol.rx = phydev->pause || - phydev->asym_pause; - lp->flowcontrol.tx = phydev->pause || - phydev->asym_pause; - } - - if (lp->flowcontrol.rx != lp->flowcontrol.rx_current) { - if (netif_msg_link(lp)) - netdev_dbg(ndev, "set rx flow to %d\n", - lp->flowcontrol.rx); - dwceqos_set_rx_flowcontrol(lp, lp->flowcontrol.rx); - lp->flowcontrol.rx_current = lp->flowcontrol.rx; - } - if (lp->flowcontrol.tx != lp->flowcontrol.tx_current) { - if (netif_msg_link(lp)) - netdev_dbg(ndev, "set tx flow to %d\n", - lp->flowcontrol.tx); - dwceqos_set_tx_flowcontrol(lp, lp->flowcontrol.tx); - lp->flowcontrol.tx_current = lp->flowcontrol.tx; - } - } - - if (phydev->link != lp->link) { - lp->link = phydev->link; - status_change = 1; - } - - if (status_change) { - if (phydev->link) { - netif_trans_update(lp->ndev); - dwceqos_link_up(lp); - } else { - dwceqos_link_down(lp); - } - phy_print_status(phydev); - } -} - -static int dwceqos_mii_probe(struct net_device *ndev) -{ - struct net_local *lp = netdev_priv(ndev); - struct phy_device *phydev = NULL; - - if (lp->phy_node) { - phydev = of_phy_connect(lp->ndev, - lp->phy_node, - &dwceqos_adjust_link, - 0, - lp->phy_interface); - - if (!phydev) { - netdev_err(ndev, "no PHY found\n"); - return -1; - } - } else { - netdev_err(ndev, "no PHY configured\n"); - return -ENODEV; - } - - if (netif_msg_probe(lp)) - phy_attached_info(phydev); - - phydev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause | - SUPPORTED_Asym_Pause; - - lp->link = 0; - lp->speed = 0; - lp->duplex = DUPLEX_UNKNOWN; - lp->flowcontrol.autoneg = AUTONEG_ENABLE; - - return 0; -} - -static void dwceqos_alloc_rxring_desc(struct net_local *lp, int index) -{ - struct sk_buff *new_skb; - dma_addr_t new_skb_baddr = 0; - - new_skb = netdev_alloc_skb(lp->ndev, DWCEQOS_RX_BUF_SIZE); - if (!new_skb) { - netdev_err(lp->ndev, "alloc_skb error for desc %d\n", index); - goto err_out; - } - - new_skb_baddr = dma_map_single(lp->ndev->dev.parent, - new_skb->data, DWCEQOS_RX_BUF_SIZE, - DMA_FROM_DEVICE); - if (dma_mapping_error(lp->ndev->dev.parent, new_skb_baddr)) { - netdev_err(lp->ndev, "DMA map error\n"); - dev_kfree_skb(new_skb); - new_skb = NULL; - goto err_out; - } - - lp->rx_descs[index].des0 = new_skb_baddr; - lp->rx_descs[index].des1 = 0; - lp->rx_descs[index].des2 = 0; - lp->rx_descs[index].des3 = DWCEQOS_DMA_RDES3_INTE | - DWCEQOS_DMA_RDES3_BUF1V | - DWCEQOS_DMA_RDES3_OWN; - - lp->rx_skb[index].mapping = new_skb_baddr; - lp->rx_skb[index].len = DWCEQOS_RX_BUF_SIZE; - -err_out: - lp->rx_skb[index].skb = new_skb; -} - -static void dwceqos_clean_rings(struct net_local *lp) -{ - int i; - - if (lp->rx_skb) { - for (i = 0; i < DWCEQOS_RX_DCNT; i++) { - if (lp->rx_skb[i].skb) { - dma_unmap_single(lp->ndev->dev.parent, - lp->rx_skb[i].mapping, - lp->rx_skb[i].len, - DMA_FROM_DEVICE); - - dev_kfree_skb(lp->rx_skb[i].skb); - lp->rx_skb[i].skb = NULL; - lp->rx_skb[i].mapping = 0; - } - } - } - - if (lp->tx_skb) { - for (i = 0; i < DWCEQOS_TX_DCNT; i++) { - if (lp->tx_skb[i].skb) { - dev_kfree_skb(lp->tx_skb[i].skb); - lp->tx_skb[i].skb = NULL; - } - if (lp->tx_skb[i].mapping) { - dma_unmap_single(lp->ndev->dev.parent, - lp->tx_skb[i].mapping, - lp->tx_skb[i].len, - DMA_TO_DEVICE); - lp->tx_skb[i].mapping = 0; - } - } - } -} - -static void dwceqos_descriptor_free(struct net_local *lp) -{ - int size; - - dwceqos_clean_rings(lp); - - kfree(lp->tx_skb); - lp->tx_skb = NULL; - kfree(lp->rx_skb); - lp->rx_skb = NULL; - - size = DWCEQOS_RX_DCNT * sizeof(struct dwceqos_dma_desc); - if (lp->rx_descs) { - dma_free_coherent(lp->ndev->dev.parent, size, - (void *)(lp->rx_descs), lp->rx_descs_addr); - lp->rx_descs = NULL; - } - - size = DWCEQOS_TX_DCNT * sizeof(struct dwceqos_dma_desc); - if (lp->tx_descs) { - dma_free_coherent(lp->ndev->dev.parent, size, - (void *)(lp->tx_descs), lp->tx_descs_addr); - lp->tx_descs = NULL; - } -} - -static int dwceqos_descriptor_init(struct net_local *lp) -{ - int size; - u32 i; - - lp->gso_size = 0; - - lp->tx_skb = NULL; - lp->rx_skb = NULL; - lp->rx_descs = NULL; - lp->tx_descs = NULL; - - /* Reset the DMA indexes */ - lp->rx_cur = 0; - lp->tx_cur = 0; - lp->tx_next = 0; - lp->tx_free = DWCEQOS_TX_DCNT; - - /* Allocate Ring descriptors */ - size = DWCEQOS_RX_DCNT * sizeof(struct ring_desc); - lp->rx_skb = kzalloc(size, GFP_KERNEL); - if (!lp->rx_skb) - goto err_out; - - size = DWCEQOS_TX_DCNT * sizeof(struct ring_desc); - lp->tx_skb = kzalloc(size, GFP_KERNEL); - if (!lp->tx_skb) - goto err_out; - - /* Allocate DMA descriptors */ - size = DWCEQOS_RX_DCNT * sizeof(struct dwceqos_dma_desc); - lp->rx_descs = dma_alloc_coherent(lp->ndev->dev.parent, size, - &lp->rx_descs_addr, GFP_KERNEL); - if (!lp->rx_descs) - goto err_out; - lp->rx_descs_tail_addr = lp->rx_descs_addr + - sizeof(struct dwceqos_dma_desc) * DWCEQOS_RX_DCNT; - - size = DWCEQOS_TX_DCNT * sizeof(struct dwceqos_dma_desc); - lp->tx_descs = dma_alloc_coherent(lp->ndev->dev.parent, size, - &lp->tx_descs_addr, GFP_KERNEL); - if (!lp->tx_descs) - goto err_out; - lp->tx_descs_tail_addr = lp->tx_descs_addr + - sizeof(struct dwceqos_dma_desc) * DWCEQOS_TX_DCNT; - - /* Initialize RX Ring Descriptors and buffers */ - for (i = 0; i < DWCEQOS_RX_DCNT; ++i) { - dwceqos_alloc_rxring_desc(lp, i); - if (!(lp->rx_skb[lp->rx_cur].skb)) - goto err_out; - } - - /* Initialize TX Descriptors */ - for (i = 0; i < DWCEQOS_TX_DCNT; ++i) { - lp->tx_descs[i].des0 = 0; - lp->tx_descs[i].des1 = 0; - lp->tx_descs[i].des2 = 0; - lp->tx_descs[i].des3 = 0; - } - - /* Make descriptor writes visible to the DMA. */ - wmb(); - - return 0; - -err_out: - dwceqos_descriptor_free(lp); - return -ENOMEM; -} - -static int dwceqos_packet_avail(struct net_local *lp) -{ - return !(lp->rx_descs[lp->rx_cur].des3 & DWCEQOS_DMA_RDES3_OWN); -} - -static void dwceqos_get_hwfeatures(struct net_local *lp) -{ - lp->feature0 = dwceqos_read(lp, REG_DWCEQOS_MAC_HW_FEATURE0); - lp->feature1 = dwceqos_read(lp, REG_DWCEQOS_MAC_HW_FEATURE1); - lp->feature2 = dwceqos_read(lp, REG_DWCEQOS_MAC_HW_FEATURE2); -} - -static void dwceqos_dma_enable_txirq(struct net_local *lp) -{ - u32 regval; - unsigned long flags; - - spin_lock_irqsave(&lp->hw_lock, flags); - regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE); - regval |= DWCEQOS_DMA_CH0_IE_TIE; - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval); - spin_unlock_irqrestore(&lp->hw_lock, flags); -} - -static void dwceqos_dma_disable_txirq(struct net_local *lp) -{ - u32 regval; - unsigned long flags; - - spin_lock_irqsave(&lp->hw_lock, flags); - regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE); - regval &= ~DWCEQOS_DMA_CH0_IE_TIE; - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval); - spin_unlock_irqrestore(&lp->hw_lock, flags); -} - -static void dwceqos_dma_enable_rxirq(struct net_local *lp) -{ - u32 regval; - unsigned long flags; - - spin_lock_irqsave(&lp->hw_lock, flags); - regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE); - regval |= DWCEQOS_DMA_CH0_IE_RIE; - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval); - spin_unlock_irqrestore(&lp->hw_lock, flags); -} - -static void dwceqos_dma_disable_rxirq(struct net_local *lp) -{ - u32 regval; - unsigned long flags; - - spin_lock_irqsave(&lp->hw_lock, flags); - regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE); - regval &= ~DWCEQOS_DMA_CH0_IE_RIE; - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval); - spin_unlock_irqrestore(&lp->hw_lock, flags); -} - -static void dwceqos_enable_mmc_interrupt(struct net_local *lp) -{ - dwceqos_write(lp, REG_DWCEQOS_MMC_RXIRQMASK, 0); - dwceqos_write(lp, REG_DWCEQOS_MMC_TXIRQMASK, 0); -} - -static int dwceqos_mii_init(struct net_local *lp) -{ - int ret = -ENXIO; - struct resource res; - struct device_node *mdionode; - - mdionode = of_get_child_by_name(lp->pdev->dev.of_node, "mdio"); - - if (!mdionode) - return 0; - - lp->mii_bus = mdiobus_alloc(); - if (!lp->mii_bus) { - ret = -ENOMEM; - goto err_out; - } - - lp->mii_bus->name = "DWCEQOS MII bus"; - lp->mii_bus->read = &dwceqos_mdio_read; - lp->mii_bus->write = &dwceqos_mdio_write; - lp->mii_bus->priv = lp; - lp->mii_bus->parent = &lp->pdev->dev; - - of_address_to_resource(lp->pdev->dev.of_node, 0, &res); - snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%.8llx", - (unsigned long long)res.start); - if (of_mdiobus_register(lp->mii_bus, mdionode)) - goto err_out_free_mdiobus; - - return 0; - -err_out_free_mdiobus: - mdiobus_free(lp->mii_bus); -err_out: - of_node_put(mdionode); - return ret; -} - -/* DMA reset. When issued also resets all MTL and MAC registers as well */ -static void dwceqos_reset_hw(struct net_local *lp) -{ - /* Wait (at most) 0.5 seconds for DMA reset*/ - int i = 5000; - u32 reg; - - /* Force gigabit to guarantee a TX clock for GMII. */ - reg = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG); - reg &= ~(DWCEQOS_MAC_CFG_PS | DWCEQOS_MAC_CFG_FES); - reg |= DWCEQOS_MAC_CFG_DM; - dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, reg); - - dwceqos_write(lp, REG_DWCEQOS_DMA_MODE, DWCEQOS_DMA_MODE_SWR); - - do { - udelay(100); - i--; - reg = dwceqos_read(lp, REG_DWCEQOS_DMA_MODE); - } while ((reg & DWCEQOS_DMA_MODE_SWR) && i); - /* We might experience a timeout if the chip clock mux is broken */ - if (!i) - netdev_err(lp->ndev, "DMA reset timed out!\n"); -} - -static void dwceqos_fatal_bus_error(struct net_local *lp, u32 dma_status) -{ - if (dma_status & DWCEQOS_DMA_CH0_IS_TEB) { - netdev_err(lp->ndev, "txdma bus error %s %s (status=%08x)\n", - dma_status & DWCEQOS_DMA_CH0_IS_TX_ERR_READ ? - "read" : "write", - dma_status & DWCEQOS_DMA_CH0_IS_TX_ERR_DESCR ? - "descr" : "data", - dma_status); - - print_status(lp); - } - if (dma_status & DWCEQOS_DMA_CH0_IS_REB) { - netdev_err(lp->ndev, "rxdma bus error %s %s (status=%08x)\n", - dma_status & DWCEQOS_DMA_CH0_IS_RX_ERR_READ ? - "read" : "write", - dma_status & DWCEQOS_DMA_CH0_IS_RX_ERR_DESCR ? - "descr" : "data", - dma_status); - - print_status(lp); - } -} - -static void dwceqos_mmc_interrupt(struct net_local *lp) -{ - unsigned long flags; - - spin_lock_irqsave(&lp->stats_lock, flags); - - /* A latched mmc interrupt can not be masked, we must read - * all the counters with an interrupt pending. - */ - dwceqos_read_mmc_counters(lp, - dwceqos_read(lp, REG_DWCEQOS_MMC_RXIRQ), - dwceqos_read(lp, REG_DWCEQOS_MMC_TXIRQ)); - - spin_unlock_irqrestore(&lp->stats_lock, flags); -} - -static void dwceqos_mac_interrupt(struct net_local *lp) -{ - u32 cause; - - cause = dwceqos_read(lp, REG_DWCEQOS_MAC_IS); - - if (cause & DWCEQOS_MAC_IS_MMC_INT) - dwceqos_mmc_interrupt(lp); -} - -static irqreturn_t dwceqos_interrupt(int irq, void *dev_id) -{ - struct net_device *ndev = dev_id; - struct net_local *lp = netdev_priv(ndev); - - u32 cause; - u32 dma_status; - irqreturn_t ret = IRQ_NONE; - - cause = dwceqos_read(lp, REG_DWCEQOS_DMA_IS); - /* DMA Channel 0 Interrupt */ - if (cause & DWCEQOS_DMA_IS_DC0IS) { - dma_status = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_STA); - - /* Transmit Interrupt */ - if (dma_status & DWCEQOS_DMA_CH0_IS_TI) { - tasklet_schedule(&lp->tx_bdreclaim_tasklet); - dwceqos_dma_disable_txirq(lp); - } - - /* Receive Interrupt */ - if (dma_status & DWCEQOS_DMA_CH0_IS_RI) { - /* Disable RX IRQs */ - dwceqos_dma_disable_rxirq(lp); - napi_schedule(&lp->napi); - } - - /* Fatal Bus Error interrupt */ - if (unlikely(dma_status & DWCEQOS_DMA_CH0_IS_FBE)) { - dwceqos_fatal_bus_error(lp, dma_status); - - /* errata 9000831707 */ - dma_status |= DWCEQOS_DMA_CH0_IS_TEB | - DWCEQOS_DMA_CH0_IS_REB; - } - - /* Ack all DMA Channel 0 IRQs */ - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_STA, dma_status); - ret = IRQ_HANDLED; - } - - if (cause & DWCEQOS_DMA_IS_MTLIS) { - u32 val = dwceqos_read(lp, REG_DWCEQOS_MTL_Q0_ISCTRL); - - dwceqos_write(lp, REG_DWCEQOS_MTL_Q0_ISCTRL, val); - ret = IRQ_HANDLED; - } - - if (cause & DWCEQOS_DMA_IS_MACIS) { - dwceqos_mac_interrupt(lp); - ret = IRQ_HANDLED; - } - return ret; -} - -static void dwceqos_set_rx_flowcontrol(struct net_local *lp, bool enable) -{ - u32 regval; - unsigned long flags; - - spin_lock_irqsave(&lp->hw_lock, flags); - - regval = dwceqos_read(lp, REG_DWCEQOS_MAC_RX_FLOW_CTRL); - if (enable) - regval |= DWCEQOS_MAC_RX_FLOW_CTRL_RFE; - else - regval &= ~DWCEQOS_MAC_RX_FLOW_CTRL_RFE; - dwceqos_write(lp, REG_DWCEQOS_MAC_RX_FLOW_CTRL, regval); - - spin_unlock_irqrestore(&lp->hw_lock, flags); -} - -static void dwceqos_set_tx_flowcontrol(struct net_local *lp, bool enable) -{ - u32 regval; - unsigned long flags; - - spin_lock_irqsave(&lp->hw_lock, flags); - - /* MTL flow control */ - regval = dwceqos_read(lp, REG_DWCEQOS_MTL_RXQ0_OPER); - if (enable) - regval |= DWCEQOS_MTL_RXQ_EHFC; - else - regval &= ~DWCEQOS_MTL_RXQ_EHFC; - - dwceqos_write(lp, REG_DWCEQOS_MTL_RXQ0_OPER, regval); - - /* MAC flow control */ - regval = dwceqos_read(lp, REG_DWCEQOS_MAC_Q0_TX_FLOW); - if (enable) - regval |= DWCEQOS_MAC_Q0_TX_FLOW_TFE; - else - regval &= ~DWCEQOS_MAC_Q0_TX_FLOW_TFE; - dwceqos_write(lp, REG_DWCEQOS_MAC_Q0_TX_FLOW, regval); - - spin_unlock_irqrestore(&lp->hw_lock, flags); -} - -static void dwceqos_configure_flow_control(struct net_local *lp) -{ - u32 regval; - unsigned long flags; - int RQS, RFD, RFA; - - spin_lock_irqsave(&lp->hw_lock, flags); - - regval = dwceqos_read(lp, REG_DWCEQOS_MTL_RXQ0_OPER); - - /* The queue size is in units of 256 bytes. We want 512 bytes units for - * the threshold fields. - */ - RQS = ((regval >> 20) & 0x3FF) + 1; - RQS /= 2; - - /* The thresholds are relative to a full queue, with a bias - * of 1 KiByte below full. - */ - RFD = RQS / 2 - 2; - RFA = RQS / 8 - 2; - - regval = (regval & 0xFFF000FF) | (RFD << 14) | (RFA << 8); - - if (RFD >= 0 && RFA >= 0) { - dwceqos_write(lp, REG_DWCEQOS_MTL_RXQ0_OPER, regval); - } else { - netdev_warn(lp->ndev, - "FIFO too small for flow control."); - } - - regval = DWCEQOS_MAC_Q0_TX_FLOW_PT(256) | - DWCEQOS_MAC_Q0_TX_FLOW_PLT_4_SLOTS; - - dwceqos_write(lp, REG_DWCEQOS_MAC_Q0_TX_FLOW, regval); - - spin_unlock_irqrestore(&lp->hw_lock, flags); -} - -static void dwceqos_configure_clock(struct net_local *lp) -{ - unsigned long rate_mhz = clk_get_rate(lp->apb_pclk) / 1000000; - - BUG_ON(!rate_mhz); - - dwceqos_write(lp, - REG_DWCEQOS_MAC_1US_TIC_COUNTER, - DWCEQOS_MAC_1US_TIC_COUNTER_VAL(rate_mhz - 1)); -} - -static void dwceqos_configure_bus(struct net_local *lp) -{ - u32 sysbus_reg; - - /* N.B. We do not support the Fixed Burst mode because it - * opens a race window by making HW access to DMA descriptors - * non-atomic. - */ - - sysbus_reg = DWCEQOS_DMA_SYSBUS_MODE_AAL; - - if (lp->bus_cfg.en_lpi) - sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_EN_LPI; - - if (lp->bus_cfg.burst_map) - sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_BURST( - lp->bus_cfg.burst_map); - else - sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_BURST( - DWCEQOS_DMA_SYSBUS_MODE_BURST_DEFAULT); - - if (lp->bus_cfg.read_requests) - sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT( - lp->bus_cfg.read_requests - 1); - else - sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT( - DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT_DEFAULT); - - if (lp->bus_cfg.write_requests) - sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT( - lp->bus_cfg.write_requests - 1); - else - sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT( - DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT_DEFAULT); - - if (netif_msg_hw(lp)) - netdev_dbg(lp->ndev, "SysbusMode %#X\n", sysbus_reg); - - dwceqos_write(lp, REG_DWCEQOS_DMA_SYSBUS_MODE, sysbus_reg); -} - -static void dwceqos_init_hw(struct net_local *lp) -{ - struct net_device *ndev = lp->ndev; - u32 regval; - u32 buswidth; - u32 dma_skip; - - /* Software reset */ - dwceqos_reset_hw(lp); - - dwceqos_configure_bus(lp); - - /* Probe data bus width, 32/64/128 bits. */ - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL, 0xF); - regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL); - buswidth = (regval ^ 0xF) + 1; - - /* Cache-align dma descriptors. */ - dma_skip = (sizeof(struct dwceqos_dma_desc) - 16) / buswidth; - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_CTRL, - DWCEQOS_DMA_CH_CTRL_DSL(dma_skip) | - DWCEQOS_DMA_CH_CTRL_PBLX8); - - /* Initialize DMA Channel 0 */ - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_LEN, DWCEQOS_TX_DCNT - 1); - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_LEN, DWCEQOS_RX_DCNT - 1); - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_LIST, - (u32)lp->tx_descs_addr); - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_LIST, - (u32)lp->rx_descs_addr); - - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL, - lp->tx_descs_tail_addr); - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_TAIL, - lp->rx_descs_tail_addr); - - if (lp->bus_cfg.tx_pbl) - regval = DWCEQOS_DMA_CH_CTRL_PBL(lp->bus_cfg.tx_pbl); - else - regval = DWCEQOS_DMA_CH_CTRL_PBL(2); - - /* Enable TSO if the HW support it */ - if (lp->feature1 & DWCEQOS_MAC_HW_FEATURE1_TSOEN) - regval |= DWCEQOS_DMA_CH_TX_TSE; - - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TX_CTRL, regval); - - if (lp->bus_cfg.rx_pbl) - regval = DWCEQOS_DMA_CH_CTRL_PBL(lp->bus_cfg.rx_pbl); - else - regval = DWCEQOS_DMA_CH_CTRL_PBL(2); - - regval |= DWCEQOS_DMA_CH_RX_CTRL_BUFSIZE(DWCEQOS_DWCEQOS_RX_BUF_SIZE); - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RX_CTRL, regval); - - regval |= DWCEQOS_DMA_CH_CTRL_START; - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RX_CTRL, regval); - - /* Initialize MTL Queues */ - regval = DWCEQOS_MTL_SCHALG_STRICT; - dwceqos_write(lp, REG_DWCEQOS_MTL_OPER, regval); - - regval = DWCEQOS_MTL_TXQ_SIZE( - DWCEQOS_MAC_HW_FEATURE1_TXFIFOSIZE(lp->feature1)) | - DWCEQOS_MTL_TXQ_TXQEN | DWCEQOS_MTL_TXQ_TSF | - DWCEQOS_MTL_TXQ_TTC512; - dwceqos_write(lp, REG_DWCEQOS_MTL_TXQ0_OPER, regval); - - regval = DWCEQOS_MTL_RXQ_SIZE( - DWCEQOS_MAC_HW_FEATURE1_RXFIFOSIZE(lp->feature1)) | - DWCEQOS_MTL_RXQ_FUP | DWCEQOS_MTL_RXQ_FEP | DWCEQOS_MTL_RXQ_RSF; - dwceqos_write(lp, REG_DWCEQOS_MTL_RXQ0_OPER, regval); - - dwceqos_configure_flow_control(lp); - - /* Initialize MAC */ - dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0); - - lp->eee_enabled = 0; - - dwceqos_configure_clock(lp); - - /* MMC counters */ - - /* probe implemented counters */ - dwceqos_write(lp, REG_DWCEQOS_MMC_RXIRQMASK, ~0u); - dwceqos_write(lp, REG_DWCEQOS_MMC_TXIRQMASK, ~0u); - lp->mmc_rx_counters_mask = dwceqos_read(lp, REG_DWCEQOS_MMC_RXIRQMASK); - lp->mmc_tx_counters_mask = dwceqos_read(lp, REG_DWCEQOS_MMC_TXIRQMASK); - - dwceqos_write(lp, REG_DWCEQOS_MMC_CTRL, DWCEQOS_MMC_CTRL_CNTRST | - DWCEQOS_MMC_CTRL_RSTONRD); - dwceqos_enable_mmc_interrupt(lp); - - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, 0); - dwceqos_write(lp, REG_DWCEQOS_MAC_IE, 0); - - dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, DWCEQOS_MAC_CFG_IPC | - DWCEQOS_MAC_CFG_DM | DWCEQOS_MAC_CFG_TE | DWCEQOS_MAC_CFG_RE); - - /* Start TX DMA */ - regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_TX_CTRL); - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TX_CTRL, - regval | DWCEQOS_DMA_CH_CTRL_START); - - /* Enable MAC TX/RX */ - regval = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG); - dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, - regval | DWCEQOS_MAC_CFG_TE | DWCEQOS_MAC_CFG_RE); - - lp->phy_defer = false; - mutex_lock(&ndev->phydev->lock); - phy_read_status(ndev->phydev); - dwceqos_adjust_link(lp->ndev); - mutex_unlock(&ndev->phydev->lock); -} - -static void dwceqos_tx_reclaim(unsigned long data) -{ - struct net_device *ndev = (struct net_device *)data; - struct net_local *lp = netdev_priv(ndev); - unsigned int tx_bytes = 0; - unsigned int tx_packets = 0; - - spin_lock(&lp->tx_lock); - - while (lp->tx_free < DWCEQOS_TX_DCNT) { - struct dwceqos_dma_desc *dd = &lp->tx_descs[lp->tx_cur]; - struct ring_desc *rd = &lp->tx_skb[lp->tx_cur]; - - /* Descriptor still being held by DMA ? */ - if (dd->des3 & DWCEQOS_DMA_TDES3_OWN) - break; - - if (rd->mapping) - dma_unmap_single(ndev->dev.parent, rd->mapping, rd->len, - DMA_TO_DEVICE); - - if (unlikely(rd->skb)) { - ++tx_packets; - tx_bytes += rd->skb->len; - dev_consume_skb_any(rd->skb); - } - - rd->skb = NULL; - rd->mapping = 0; - lp->tx_free++; - lp->tx_cur = (lp->tx_cur + 1) % DWCEQOS_TX_DCNT; - - if ((dd->des3 & DWCEQOS_DMA_TDES3_LD) && - (dd->des3 & DWCEQOS_DMA_RDES3_ES)) { - if (netif_msg_tx_err(lp)) - netdev_err(ndev, "TX Error, TDES3 = 0x%x\n", - dd->des3); - if (netif_msg_hw(lp)) - print_status(lp); - } - } - spin_unlock(&lp->tx_lock); - - netdev_completed_queue(ndev, tx_packets, tx_bytes); - - dwceqos_dma_enable_txirq(lp); - netif_wake_queue(ndev); -} - -static int dwceqos_rx(struct net_local *lp, int budget) -{ - struct sk_buff *skb; - u32 tot_size = 0; - unsigned int n_packets = 0; - unsigned int n_descs = 0; - u32 len; - - struct dwceqos_dma_desc *dd; - struct sk_buff *new_skb; - dma_addr_t new_skb_baddr = 0; - - while (n_descs < budget) { - if (!dwceqos_packet_avail(lp)) - break; - - new_skb = netdev_alloc_skb(lp->ndev, DWCEQOS_RX_BUF_SIZE); - if (!new_skb) { - netdev_err(lp->ndev, "no memory for new sk_buff\n"); - break; - } - - /* Get dma handle of skb->data */ - new_skb_baddr = (u32)dma_map_single(lp->ndev->dev.parent, - new_skb->data, - DWCEQOS_RX_BUF_SIZE, - DMA_FROM_DEVICE); - if (dma_mapping_error(lp->ndev->dev.parent, new_skb_baddr)) { - netdev_err(lp->ndev, "DMA map error\n"); - dev_kfree_skb(new_skb); - break; - } - - /* Read descriptor data after reading owner bit. */ - dma_rmb(); - - dd = &lp->rx_descs[lp->rx_cur]; - len = DWCEQOS_DMA_RDES3_PL(dd->des3); - skb = lp->rx_skb[lp->rx_cur].skb; - - /* Unmap old buffer */ - dma_unmap_single(lp->ndev->dev.parent, - lp->rx_skb[lp->rx_cur].mapping, - lp->rx_skb[lp->rx_cur].len, DMA_FROM_DEVICE); - - /* Discard packet on reception error or bad checksum */ - if ((dd->des3 & DWCEQOS_DMA_RDES3_ES) || - (dd->des1 & DWCEQOS_DMA_RDES1_IPCE)) { - dev_kfree_skb(skb); - skb = NULL; - } else { - skb_put(skb, len); - skb->protocol = eth_type_trans(skb, lp->ndev); - switch (dd->des1 & DWCEQOS_DMA_RDES1_PT) { - case DWCEQOS_DMA_RDES1_PT_UDP: - case DWCEQOS_DMA_RDES1_PT_TCP: - case DWCEQOS_DMA_RDES1_PT_ICMP: - skb->ip_summed = CHECKSUM_UNNECESSARY; - break; - default: - skb->ip_summed = CHECKSUM_NONE; - break; - } - } - - if (unlikely(!skb)) { - if (netif_msg_rx_err(lp)) - netdev_dbg(lp->ndev, "rx error: des3=%X\n", - lp->rx_descs[lp->rx_cur].des3); - } else { - tot_size += skb->len; - n_packets++; - - netif_receive_skb(skb); - } - - lp->rx_descs[lp->rx_cur].des0 = new_skb_baddr; - lp->rx_descs[lp->rx_cur].des1 = 0; - lp->rx_descs[lp->rx_cur].des2 = 0; - /* The DMA must observe des0/1/2 written before des3. */ - wmb(); - lp->rx_descs[lp->rx_cur].des3 = DWCEQOS_DMA_RDES3_INTE | - DWCEQOS_DMA_RDES3_OWN | - DWCEQOS_DMA_RDES3_BUF1V; - - lp->rx_skb[lp->rx_cur].mapping = new_skb_baddr; - lp->rx_skb[lp->rx_cur].len = DWCEQOS_RX_BUF_SIZE; - lp->rx_skb[lp->rx_cur].skb = new_skb; - - n_descs++; - lp->rx_cur = (lp->rx_cur + 1) % DWCEQOS_RX_DCNT; - } - - /* Make sure any ownership update is written to the descriptors before - * DMA wakeup. - */ - wmb(); - - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_STA, DWCEQOS_DMA_CH0_IS_RI); - /* Wake up RX by writing tail pointer */ - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_TAIL, - lp->rx_descs_tail_addr); - - return n_descs; -} - -static int dwceqos_rx_poll(struct napi_struct *napi, int budget) -{ - struct net_local *lp = container_of(napi, struct net_local, napi); - int work_done = 0; - - work_done = dwceqos_rx(lp, budget - work_done); - - if (!dwceqos_packet_avail(lp) && work_done < budget) { - napi_complete(napi); - dwceqos_dma_enable_rxirq(lp); - } else { - work_done = budget; - } - - return work_done; -} - -/* Reinitialize function if a TX timed out */ -static void dwceqos_reinit_for_txtimeout(struct work_struct *data) -{ - struct net_local *lp = container_of(data, struct net_local, - txtimeout_reinit); - - netdev_err(lp->ndev, "transmit timeout %d s, resetting...\n", - DWCEQOS_TX_TIMEOUT); - - if (netif_msg_hw(lp)) - print_status(lp); - - rtnl_lock(); - dwceqos_stop(lp->ndev); - dwceqos_open(lp->ndev); - rtnl_unlock(); -} - -/* DT Probing function called by main probe */ -static inline int dwceqos_probe_config_dt(struct platform_device *pdev) -{ - struct net_device *ndev; - struct net_local *lp; - const void *mac_address; - struct dwceqos_bus_cfg *bus_cfg; - struct device_node *np = pdev->dev.of_node; - - ndev = platform_get_drvdata(pdev); - lp = netdev_priv(ndev); - bus_cfg = &lp->bus_cfg; - - /* Set the MAC address. */ - mac_address = of_get_mac_address(pdev->dev.of_node); - if (mac_address) - ether_addr_copy(ndev->dev_addr, mac_address); - - /* These are all optional parameters */ - lp->en_tx_lpi_clockgating = of_property_read_bool(np, - "snps,en-tx-lpi-clockgating"); - bus_cfg->en_lpi = of_property_read_bool(np, "snps,en-lpi"); - of_property_read_u32(np, "snps,write-requests", - &bus_cfg->write_requests); - of_property_read_u32(np, "snps,read-requests", &bus_cfg->read_requests); - of_property_read_u32(np, "snps,burst-map", &bus_cfg->burst_map); - of_property_read_u32(np, "snps,txpbl", &bus_cfg->tx_pbl); - of_property_read_u32(np, "snps,rxpbl", &bus_cfg->rx_pbl); - - netdev_dbg(ndev, "BusCfg: lpi:%u wr:%u rr:%u bm:%X rxpbl:%u txpbl:%d\n", - bus_cfg->en_lpi, - bus_cfg->write_requests, - bus_cfg->read_requests, - bus_cfg->burst_map, - bus_cfg->rx_pbl, - bus_cfg->tx_pbl); - - return 0; -} - -static int dwceqos_open(struct net_device *ndev) -{ - struct net_local *lp = netdev_priv(ndev); - int res; - - dwceqos_reset_state(lp); - res = dwceqos_descriptor_init(lp); - if (res) { - netdev_err(ndev, "Unable to allocate DMA memory, rc %d\n", res); - return res; - } - netdev_reset_queue(ndev); - - /* The dwceqos reset state machine requires all phy clocks to complete, - * hence the unusual init order with phy_start first. - */ - lp->phy_defer = true; - phy_start(ndev->phydev); - dwceqos_init_hw(lp); - napi_enable(&lp->napi); - - netif_start_queue(ndev); - tasklet_enable(&lp->tx_bdreclaim_tasklet); - - /* Enable Interrupts -- do this only after we enable NAPI and the - * tasklet. - */ - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, - DWCEQOS_DMA_CH0_IE_NIE | - DWCEQOS_DMA_CH0_IE_RIE | DWCEQOS_DMA_CH0_IE_TIE | - DWCEQOS_DMA_CH0_IE_AIE | - DWCEQOS_DMA_CH0_IE_FBEE); - - return 0; -} - -static bool dweqos_is_tx_dma_suspended(struct net_local *lp) -{ - u32 reg; - - reg = dwceqos_read(lp, REG_DWCEQOS_DMA_DEBUG_ST0); - reg = DMA_GET_TX_STATE_CH0(reg); - - return reg == DMA_TX_CH_SUSPENDED; -} - -static void dwceqos_drain_dma(struct net_local *lp) -{ - /* Wait for all pending TX buffers to be sent. Upper limit based - * on max frame size on a 10 Mbit link. - */ - size_t limit = (DWCEQOS_TX_DCNT * 1250) / 100; - - while (!dweqos_is_tx_dma_suspended(lp) && limit--) - usleep_range(100, 200); -} - -static int dwceqos_stop(struct net_device *ndev) -{ - struct net_local *lp = netdev_priv(ndev); - - tasklet_disable(&lp->tx_bdreclaim_tasklet); - napi_disable(&lp->napi); - - /* Stop all tx before we drain the tx dma. */ - netif_tx_lock_bh(lp->ndev); - netif_stop_queue(ndev); - netif_tx_unlock_bh(lp->ndev); - - dwceqos_drain_dma(lp); - dwceqos_reset_hw(lp); - phy_stop(ndev->phydev); - - dwceqos_descriptor_free(lp); - - return 0; -} - -static void dwceqos_dmadesc_set_ctx(struct net_local *lp, - unsigned short gso_size) -{ - struct dwceqos_dma_desc *dd = &lp->tx_descs[lp->tx_next]; - - dd->des0 = 0; - dd->des1 = 0; - dd->des2 = gso_size; - dd->des3 = DWCEQOS_DMA_TDES3_CTXT | DWCEQOS_DMA_TDES3_TCMSSV; - - lp->tx_next = (lp->tx_next + 1) % DWCEQOS_TX_DCNT; -} - -static void dwceqos_tx_poll_demand(struct net_local *lp) -{ - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL, - lp->tx_descs_tail_addr); -} - -struct dwceqos_tx { - size_t nr_descriptors; - size_t initial_descriptor; - size_t last_descriptor; - size_t prev_gso_size; - size_t network_header_len; -}; - -static void dwceqos_tx_prepare(struct sk_buff *skb, struct net_local *lp, - struct dwceqos_tx *tx) -{ - size_t n = 1; - size_t i; - - if (skb_is_gso(skb) && skb_shinfo(skb)->gso_size != lp->gso_size) - ++n; - - for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) { - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - - n += (skb_frag_size(frag) + BYTES_PER_DMA_DESC - 1) / - BYTES_PER_DMA_DESC; - } - - tx->nr_descriptors = n; - tx->initial_descriptor = lp->tx_next; - tx->last_descriptor = lp->tx_next; - tx->prev_gso_size = lp->gso_size; - - tx->network_header_len = skb_transport_offset(skb); - if (skb_is_gso(skb)) - tx->network_header_len += tcp_hdrlen(skb); -} - -static int dwceqos_tx_linear(struct sk_buff *skb, struct net_local *lp, - struct dwceqos_tx *tx) -{ - struct ring_desc *rd; - struct dwceqos_dma_desc *dd; - size_t payload_len; - dma_addr_t dma_handle; - - if (skb_is_gso(skb) && skb_shinfo(skb)->gso_size != lp->gso_size) { - dwceqos_dmadesc_set_ctx(lp, skb_shinfo(skb)->gso_size); - lp->gso_size = skb_shinfo(skb)->gso_size; - } - - dma_handle = dma_map_single(lp->ndev->dev.parent, skb->data, - skb_headlen(skb), DMA_TO_DEVICE); - - if (dma_mapping_error(lp->ndev->dev.parent, dma_handle)) { - netdev_err(lp->ndev, "TX DMA Mapping error\n"); - return -ENOMEM; - } - - rd = &lp->tx_skb[lp->tx_next]; - dd = &lp->tx_descs[lp->tx_next]; - - rd->skb = NULL; - rd->len = skb_headlen(skb); - rd->mapping = dma_handle; - - /* Set up DMA Descriptor */ - dd->des0 = dma_handle; - - if (skb_is_gso(skb)) { - payload_len = skb_headlen(skb) - tx->network_header_len; - - if (payload_len) - dd->des1 = dma_handle + tx->network_header_len; - dd->des2 = tx->network_header_len | - DWCEQOS_DMA_DES2_B2L(payload_len); - dd->des3 = DWCEQOS_DMA_TDES3_TSE | - DWCEQOS_DMA_DES3_THL((tcp_hdrlen(skb) / 4)) | - (skb->len - tx->network_header_len); - } else { - dd->des1 = 0; - dd->des2 = skb_headlen(skb); - dd->des3 = skb->len; - - switch (skb->ip_summed) { - case CHECKSUM_PARTIAL: - dd->des3 |= DWCEQOS_DMA_TDES3_CA; - case CHECKSUM_NONE: - case CHECKSUM_UNNECESSARY: - case CHECKSUM_COMPLETE: - default: - break; - } - } - - dd->des3 |= DWCEQOS_DMA_TDES3_FD; - if (lp->tx_next != tx->initial_descriptor) - dd->des3 |= DWCEQOS_DMA_TDES3_OWN; - - tx->last_descriptor = lp->tx_next; - lp->tx_next = (lp->tx_next + 1) % DWCEQOS_TX_DCNT; - - return 0; -} - -static int dwceqos_tx_frags(struct sk_buff *skb, struct net_local *lp, - struct dwceqos_tx *tx) -{ - struct ring_desc *rd = NULL; - struct dwceqos_dma_desc *dd; - dma_addr_t dma_handle; - size_t i; - - /* Setup more ring and DMA descriptor if the packet is fragmented */ - for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) { - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - size_t frag_size; - size_t consumed_size; - - /* Map DMA Area */ - dma_handle = skb_frag_dma_map(lp->ndev->dev.parent, frag, 0, - skb_frag_size(frag), - DMA_TO_DEVICE); - if (dma_mapping_error(lp->ndev->dev.parent, dma_handle)) { - netdev_err(lp->ndev, "DMA Mapping error\n"); - return -ENOMEM; - } - - /* order-3 fragments span more than one descriptor. */ - frag_size = skb_frag_size(frag); - consumed_size = 0; - while (consumed_size < frag_size) { - size_t dma_size = min_t(size_t, 16376, - frag_size - consumed_size); - - rd = &lp->tx_skb[lp->tx_next]; - memset(rd, 0, sizeof(*rd)); - - dd = &lp->tx_descs[lp->tx_next]; - - /* Set DMA Descriptor fields */ - dd->des0 = dma_handle + consumed_size; - dd->des1 = 0; - dd->des2 = dma_size; - - if (skb_is_gso(skb)) - dd->des3 = (skb->len - tx->network_header_len); - else - dd->des3 = skb->len; - - dd->des3 |= DWCEQOS_DMA_TDES3_OWN; - - tx->last_descriptor = lp->tx_next; - lp->tx_next = (lp->tx_next + 1) % DWCEQOS_TX_DCNT; - consumed_size += dma_size; - } - - rd->len = skb_frag_size(frag); - rd->mapping = dma_handle; - } - - return 0; -} - -static void dwceqos_tx_finalize(struct sk_buff *skb, struct net_local *lp, - struct dwceqos_tx *tx) -{ - lp->tx_descs[tx->last_descriptor].des3 |= DWCEQOS_DMA_TDES3_LD; - lp->tx_descs[tx->last_descriptor].des2 |= DWCEQOS_DMA_TDES2_IOC; - - lp->tx_skb[tx->last_descriptor].skb = skb; - - /* Make all descriptor updates visible to the DMA before setting the - * owner bit. - */ - wmb(); - - lp->tx_descs[tx->initial_descriptor].des3 |= DWCEQOS_DMA_TDES3_OWN; - - /* Make the owner bit visible before TX wakeup. */ - wmb(); - - dwceqos_tx_poll_demand(lp); -} - -static void dwceqos_tx_rollback(struct net_local *lp, struct dwceqos_tx *tx) -{ - size_t i = tx->initial_descriptor; - - while (i != lp->tx_next) { - if (lp->tx_skb[i].mapping) - dma_unmap_single(lp->ndev->dev.parent, - lp->tx_skb[i].mapping, - lp->tx_skb[i].len, - DMA_TO_DEVICE); - - lp->tx_skb[i].mapping = 0; - lp->tx_skb[i].skb = NULL; - - memset(&lp->tx_descs[i], 0, sizeof(lp->tx_descs[i])); - - i = (i + 1) % DWCEQOS_TX_DCNT; - } - - lp->tx_next = tx->initial_descriptor; - lp->gso_size = tx->prev_gso_size; -} - -static int dwceqos_start_xmit(struct sk_buff *skb, struct net_device *ndev) -{ - struct net_local *lp = netdev_priv(ndev); - struct dwceqos_tx trans; - int err; - - dwceqos_tx_prepare(skb, lp, &trans); - if (lp->tx_free < trans.nr_descriptors) { - netif_stop_queue(ndev); - return NETDEV_TX_BUSY; - } - - err = dwceqos_tx_linear(skb, lp, &trans); - if (err) - goto tx_error; - - err = dwceqos_tx_frags(skb, lp, &trans); - if (err) - goto tx_error; - - WARN_ON(lp->tx_next != - ((trans.initial_descriptor + trans.nr_descriptors) % - DWCEQOS_TX_DCNT)); - - spin_lock_bh(&lp->tx_lock); - lp->tx_free -= trans.nr_descriptors; - dwceqos_tx_finalize(skb, lp, &trans); - netdev_sent_queue(ndev, skb->len); - spin_unlock_bh(&lp->tx_lock); - - netif_trans_update(ndev); - return 0; - -tx_error: - dwceqos_tx_rollback(lp, &trans); - dev_kfree_skb_any(skb); - return 0; -} - -/* Set MAC address and then update HW accordingly */ -static int dwceqos_set_mac_address(struct net_device *ndev, void *addr) -{ - struct net_local *lp = netdev_priv(ndev); - struct sockaddr *hwaddr = (struct sockaddr *)addr; - - if (netif_running(ndev)) - return -EBUSY; - - if (!is_valid_ether_addr(hwaddr->sa_data)) - return -EADDRNOTAVAIL; - - memcpy(ndev->dev_addr, hwaddr->sa_data, ndev->addr_len); - - dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0); - return 0; -} - -static void dwceqos_tx_timeout(struct net_device *ndev) -{ - struct net_local *lp = netdev_priv(ndev); - - queue_work(lp->txtimeout_handler_wq, &lp->txtimeout_reinit); -} - -static void dwceqos_set_umac_addr(struct net_local *lp, unsigned char *addr, - unsigned int reg_n) -{ - unsigned long data; - - data = (addr[5] << 8) | addr[4]; - dwceqos_write(lp, DWCEQOS_ADDR_HIGH(reg_n), - data | DWCEQOS_MAC_MAC_ADDR_HI_EN); - data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0]; - dwceqos_write(lp, DWCEQOS_ADDR_LOW(reg_n), data); -} - -static void dwceqos_disable_umac_addr(struct net_local *lp, unsigned int reg_n) -{ - /* Do not disable MAC address 0 */ - if (reg_n != 0) - dwceqos_write(lp, DWCEQOS_ADDR_HIGH(reg_n), 0); -} - -static void dwceqos_set_rx_mode(struct net_device *ndev) -{ - struct net_local *lp = netdev_priv(ndev); - u32 regval = 0; - u32 mc_filter[2]; - int reg = 1; - struct netdev_hw_addr *ha; - unsigned int max_mac_addr; - - max_mac_addr = DWCEQOS_MAX_PERFECT_ADDRESSES(lp->feature1); - - if (ndev->flags & IFF_PROMISC) { - regval = DWCEQOS_MAC_PKT_FILT_PR; - } else if (((netdev_mc_count(ndev) > DWCEQOS_HASH_TABLE_SIZE) || - (ndev->flags & IFF_ALLMULTI))) { - regval = DWCEQOS_MAC_PKT_FILT_PM; - dwceqos_write(lp, REG_DWCEQOS_HASTABLE_LO, 0xffffffff); - dwceqos_write(lp, REG_DWCEQOS_HASTABLE_HI, 0xffffffff); - } else if (!netdev_mc_empty(ndev)) { - regval = DWCEQOS_MAC_PKT_FILT_HMC; - memset(mc_filter, 0, sizeof(mc_filter)); - netdev_for_each_mc_addr(ha, ndev) { - /* The upper 6 bits of the calculated CRC are used to - * index the contens of the hash table - */ - int bit_nr = bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26; - /* The most significant bit determines the register - * to use (H/L) while the other 5 bits determine - * the bit within the register. - */ - mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); - } - dwceqos_write(lp, REG_DWCEQOS_HASTABLE_LO, mc_filter[0]); - dwceqos_write(lp, REG_DWCEQOS_HASTABLE_HI, mc_filter[1]); - } - if (netdev_uc_count(ndev) > max_mac_addr) { - regval |= DWCEQOS_MAC_PKT_FILT_PR; - } else { - netdev_for_each_uc_addr(ha, ndev) { - dwceqos_set_umac_addr(lp, ha->addr, reg); - reg++; - } - for (; reg < DWCEQOS_MAX_PERFECT_ADDRESSES(lp->feature1); reg++) - dwceqos_disable_umac_addr(lp, reg); - } - dwceqos_write(lp, REG_DWCEQOS_MAC_PKT_FILT, regval); -} - -#ifdef CONFIG_NET_POLL_CONTROLLER -static void dwceqos_poll_controller(struct net_device *ndev) -{ - disable_irq(ndev->irq); - dwceqos_interrupt(ndev->irq, ndev); - enable_irq(ndev->irq); -} -#endif - -static void dwceqos_read_mmc_counters(struct net_local *lp, u32 rx_mask, - u32 tx_mask) -{ - if (tx_mask & BIT(27)) - lp->mmc_counters.txlpitranscntr += - dwceqos_read(lp, DWC_MMC_TXLPITRANSCNTR); - if (tx_mask & BIT(26)) - lp->mmc_counters.txpiuscntr += - dwceqos_read(lp, DWC_MMC_TXLPIUSCNTR); - if (tx_mask & BIT(25)) - lp->mmc_counters.txoversize_g += - dwceqos_read(lp, DWC_MMC_TXOVERSIZE_G); - if (tx_mask & BIT(24)) - lp->mmc_counters.txvlanpackets_g += - dwceqos_read(lp, DWC_MMC_TXVLANPACKETS_G); - if (tx_mask & BIT(23)) - lp->mmc_counters.txpausepackets += - dwceqos_read(lp, DWC_MMC_TXPAUSEPACKETS); - if (tx_mask & BIT(22)) - lp->mmc_counters.txexcessdef += - dwceqos_read(lp, DWC_MMC_TXEXCESSDEF); - if (tx_mask & BIT(21)) - lp->mmc_counters.txpacketcount_g += - dwceqos_read(lp, DWC_MMC_TXPACKETCOUNT_G); - if (tx_mask & BIT(20)) - lp->mmc_counters.txoctetcount_g += - dwceqos_read(lp, DWC_MMC_TXOCTETCOUNT_G); - if (tx_mask & BIT(19)) - lp->mmc_counters.txcarriererror += - dwceqos_read(lp, DWC_MMC_TXCARRIERERROR); - if (tx_mask & BIT(18)) - lp->mmc_counters.txexcesscol += - dwceqos_read(lp, DWC_MMC_TXEXCESSCOL); - if (tx_mask & BIT(17)) - lp->mmc_counters.txlatecol += - dwceqos_read(lp, DWC_MMC_TXLATECOL); - if (tx_mask & BIT(16)) - lp->mmc_counters.txdeferred += - dwceqos_read(lp, DWC_MMC_TXDEFERRED); - if (tx_mask & BIT(15)) - lp->mmc_counters.txmulticol_g += - dwceqos_read(lp, DWC_MMC_TXMULTICOL_G); - if (tx_mask & BIT(14)) - lp->mmc_counters.txsinglecol_g += - dwceqos_read(lp, DWC_MMC_TXSINGLECOL_G); - if (tx_mask & BIT(13)) - lp->mmc_counters.txunderflowerror += - dwceqos_read(lp, DWC_MMC_TXUNDERFLOWERROR); - if (tx_mask & BIT(12)) - lp->mmc_counters.txbroadcastpackets_gb += - dwceqos_read(lp, DWC_MMC_TXBROADCASTPACKETS_GB); - if (tx_mask & BIT(11)) - lp->mmc_counters.txmulticastpackets_gb += - dwceqos_read(lp, DWC_MMC_TXMULTICASTPACKETS_GB); - if (tx_mask & BIT(10)) - lp->mmc_counters.txunicastpackets_gb += - dwceqos_read(lp, DWC_MMC_TXUNICASTPACKETS_GB); - if (tx_mask & BIT(9)) - lp->mmc_counters.tx1024tomaxoctets_gb += - dwceqos_read(lp, DWC_MMC_TX1024TOMAXOCTETS_GB); - if (tx_mask & BIT(8)) - lp->mmc_counters.tx512to1023octets_gb += - dwceqos_read(lp, DWC_MMC_TX512TO1023OCTETS_GB); - if (tx_mask & BIT(7)) - lp->mmc_counters.tx256to511octets_gb += - dwceqos_read(lp, DWC_MMC_TX256TO511OCTETS_GB); - if (tx_mask & BIT(6)) - lp->mmc_counters.tx128to255octets_gb += - dwceqos_read(lp, DWC_MMC_TX128TO255OCTETS_GB); - if (tx_mask & BIT(5)) - lp->mmc_counters.tx65to127octets_gb += - dwceqos_read(lp, DWC_MMC_TX65TO127OCTETS_GB); - if (tx_mask & BIT(4)) - lp->mmc_counters.tx64octets_gb += - dwceqos_read(lp, DWC_MMC_TX64OCTETS_GB); - if (tx_mask & BIT(3)) - lp->mmc_counters.txmulticastpackets_g += - dwceqos_read(lp, DWC_MMC_TXMULTICASTPACKETS_G); - if (tx_mask & BIT(2)) - lp->mmc_counters.txbroadcastpackets_g += - dwceqos_read(lp, DWC_MMC_TXBROADCASTPACKETS_G); - if (tx_mask & BIT(1)) - lp->mmc_counters.txpacketcount_gb += - dwceqos_read(lp, DWC_MMC_TXPACKETCOUNT_GB); - if (tx_mask & BIT(0)) - lp->mmc_counters.txoctetcount_gb += - dwceqos_read(lp, DWC_MMC_TXOCTETCOUNT_GB); - - if (rx_mask & BIT(27)) - lp->mmc_counters.rxlpitranscntr += - dwceqos_read(lp, DWC_MMC_RXLPITRANSCNTR); - if (rx_mask & BIT(26)) - lp->mmc_counters.rxlpiuscntr += - dwceqos_read(lp, DWC_MMC_RXLPIUSCNTR); - if (rx_mask & BIT(25)) - lp->mmc_counters.rxctrlpackets_g += - dwceqos_read(lp, DWC_MMC_RXCTRLPACKETS_G); - if (rx_mask & BIT(24)) - lp->mmc_counters.rxrcverror += - dwceqos_read(lp, DWC_MMC_RXRCVERROR); - if (rx_mask & BIT(23)) - lp->mmc_counters.rxwatchdog += - dwceqos_read(lp, DWC_MMC_RXWATCHDOG); - if (rx_mask & BIT(22)) - lp->mmc_counters.rxvlanpackets_gb += - dwceqos_read(lp, DWC_MMC_RXVLANPACKETS_GB); - if (rx_mask & BIT(21)) - lp->mmc_counters.rxfifooverflow += - dwceqos_read(lp, DWC_MMC_RXFIFOOVERFLOW); - if (rx_mask & BIT(20)) - lp->mmc_counters.rxpausepackets += - dwceqos_read(lp, DWC_MMC_RXPAUSEPACKETS); - if (rx_mask & BIT(19)) - lp->mmc_counters.rxoutofrangetype += - dwceqos_read(lp, DWC_MMC_RXOUTOFRANGETYPE); - if (rx_mask & BIT(18)) - lp->mmc_counters.rxlengtherror += - dwceqos_read(lp, DWC_MMC_RXLENGTHERROR); - if (rx_mask & BIT(17)) - lp->mmc_counters.rxunicastpackets_g += - dwceqos_read(lp, DWC_MMC_RXUNICASTPACKETS_G); - if (rx_mask & BIT(16)) - lp->mmc_counters.rx1024tomaxoctets_gb += - dwceqos_read(lp, DWC_MMC_RX1024TOMAXOCTETS_GB); - if (rx_mask & BIT(15)) - lp->mmc_counters.rx512to1023octets_gb += - dwceqos_read(lp, DWC_MMC_RX512TO1023OCTETS_GB); - if (rx_mask & BIT(14)) - lp->mmc_counters.rx256to511octets_gb += - dwceqos_read(lp, DWC_MMC_RX256TO511OCTETS_GB); - if (rx_mask & BIT(13)) - lp->mmc_counters.rx128to255octets_gb += - dwceqos_read(lp, DWC_MMC_RX128TO255OCTETS_GB); - if (rx_mask & BIT(12)) - lp->mmc_counters.rx65to127octets_gb += - dwceqos_read(lp, DWC_MMC_RX65TO127OCTETS_GB); - if (rx_mask & BIT(11)) - lp->mmc_counters.rx64octets_gb += - dwceqos_read(lp, DWC_MMC_RX64OCTETS_GB); - if (rx_mask & BIT(10)) - lp->mmc_counters.rxoversize_g += - dwceqos_read(lp, DWC_MMC_RXOVERSIZE_G); - if (rx_mask & BIT(9)) - lp->mmc_counters.rxundersize_g += - dwceqos_read(lp, DWC_MMC_RXUNDERSIZE_G); - if (rx_mask & BIT(8)) - lp->mmc_counters.rxjabbererror += - dwceqos_read(lp, DWC_MMC_RXJABBERERROR); - if (rx_mask & BIT(7)) - lp->mmc_counters.rxrunterror += - dwceqos_read(lp, DWC_MMC_RXRUNTERROR); - if (rx_mask & BIT(6)) - lp->mmc_counters.rxalignmenterror += - dwceqos_read(lp, DWC_MMC_RXALIGNMENTERROR); - if (rx_mask & BIT(5)) - lp->mmc_counters.rxcrcerror += - dwceqos_read(lp, DWC_MMC_RXCRCERROR); - if (rx_mask & BIT(4)) - lp->mmc_counters.rxmulticastpackets_g += - dwceqos_read(lp, DWC_MMC_RXMULTICASTPACKETS_G); - if (rx_mask & BIT(3)) - lp->mmc_counters.rxbroadcastpackets_g += - dwceqos_read(lp, DWC_MMC_RXBROADCASTPACKETS_G); - if (rx_mask & BIT(2)) - lp->mmc_counters.rxoctetcount_g += - dwceqos_read(lp, DWC_MMC_RXOCTETCOUNT_G); - if (rx_mask & BIT(1)) - lp->mmc_counters.rxoctetcount_gb += - dwceqos_read(lp, DWC_MMC_RXOCTETCOUNT_GB); - if (rx_mask & BIT(0)) - lp->mmc_counters.rxpacketcount_gb += - dwceqos_read(lp, DWC_MMC_RXPACKETCOUNT_GB); -} - -static struct rtnl_link_stats64* -dwceqos_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *s) -{ - unsigned long flags; - struct net_local *lp = netdev_priv(ndev); - struct dwceqos_mmc_counters *hwstats = &lp->mmc_counters; - - spin_lock_irqsave(&lp->stats_lock, flags); - dwceqos_read_mmc_counters(lp, lp->mmc_rx_counters_mask, - lp->mmc_tx_counters_mask); - spin_unlock_irqrestore(&lp->stats_lock, flags); - - s->rx_packets = hwstats->rxpacketcount_gb; - s->rx_bytes = hwstats->rxoctetcount_gb; - s->rx_errors = hwstats->rxpacketcount_gb - - hwstats->rxbroadcastpackets_g - - hwstats->rxmulticastpackets_g - - hwstats->rxunicastpackets_g; - s->multicast = hwstats->rxmulticastpackets_g; - s->rx_length_errors = hwstats->rxlengtherror; - s->rx_crc_errors = hwstats->rxcrcerror; - s->rx_fifo_errors = hwstats->rxfifooverflow; - - s->tx_packets = hwstats->txpacketcount_gb; - s->tx_bytes = hwstats->txoctetcount_gb; - - if (lp->mmc_tx_counters_mask & BIT(21)) - s->tx_errors = hwstats->txpacketcount_gb - - hwstats->txpacketcount_g; - else - s->tx_errors = hwstats->txunderflowerror + - hwstats->txcarriererror; - - return s; -} - -static void -dwceqos_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *ed) -{ - const struct net_local *lp = netdev_priv(ndev); - - strcpy(ed->driver, lp->pdev->dev.driver->name); - strcpy(ed->version, DRIVER_VERSION); -} - -static void dwceqos_get_pauseparam(struct net_device *ndev, - struct ethtool_pauseparam *pp) -{ - const struct net_local *lp = netdev_priv(ndev); - - pp->autoneg = lp->flowcontrol.autoneg; - pp->tx_pause = lp->flowcontrol.tx; - pp->rx_pause = lp->flowcontrol.rx; -} - -static int dwceqos_set_pauseparam(struct net_device *ndev, - struct ethtool_pauseparam *pp) -{ - struct net_local *lp = netdev_priv(ndev); - int ret = 0; - - lp->flowcontrol.autoneg = pp->autoneg; - if (pp->autoneg) { - ndev->phydev->advertising |= ADVERTISED_Pause; - ndev->phydev->advertising |= ADVERTISED_Asym_Pause; - } else { - ndev->phydev->advertising &= ~ADVERTISED_Pause; - ndev->phydev->advertising &= ~ADVERTISED_Asym_Pause; - lp->flowcontrol.rx = pp->rx_pause; - lp->flowcontrol.tx = pp->tx_pause; - } - - if (netif_running(ndev)) - ret = phy_start_aneg(ndev->phydev); - - return ret; -} - -static void dwceqos_get_strings(struct net_device *ndev, u32 stringset, - u8 *data) -{ - size_t i; - - if (stringset != ETH_SS_STATS) - return; - - for (i = 0; i < ARRAY_SIZE(dwceqos_ethtool_stats); ++i) { - memcpy(data, dwceqos_ethtool_stats[i].stat_name, - ETH_GSTRING_LEN); - data += ETH_GSTRING_LEN; - } -} - -static void dwceqos_get_ethtool_stats(struct net_device *ndev, - struct ethtool_stats *stats, u64 *data) -{ - struct net_local *lp = netdev_priv(ndev); - unsigned long flags; - size_t i; - u8 *mmcstat = (u8 *)&lp->mmc_counters; - - spin_lock_irqsave(&lp->stats_lock, flags); - dwceqos_read_mmc_counters(lp, lp->mmc_rx_counters_mask, - lp->mmc_tx_counters_mask); - spin_unlock_irqrestore(&lp->stats_lock, flags); - - for (i = 0; i < ARRAY_SIZE(dwceqos_ethtool_stats); ++i) { - memcpy(data, - mmcstat + dwceqos_ethtool_stats[i].offset, - sizeof(u64)); - data++; - } -} - -static int dwceqos_get_sset_count(struct net_device *ndev, int sset) -{ - if (sset == ETH_SS_STATS) - return ARRAY_SIZE(dwceqos_ethtool_stats); - - return -EOPNOTSUPP; -} - -static void dwceqos_get_regs(struct net_device *dev, struct ethtool_regs *regs, - void *space) -{ - const struct net_local *lp = netdev_priv(dev); - u32 *reg_space = (u32 *)space; - int reg_offset; - int reg_ix = 0; - - /* MAC registers */ - for (reg_offset = START_MAC_REG_OFFSET; - reg_offset <= MAX_DMA_REG_OFFSET; reg_offset += 4) { - reg_space[reg_ix] = dwceqos_read(lp, reg_offset); - reg_ix++; - } - /* MTL registers */ - for (reg_offset = START_MTL_REG_OFFSET; - reg_offset <= MAX_MTL_REG_OFFSET; reg_offset += 4) { - reg_space[reg_ix] = dwceqos_read(lp, reg_offset); - reg_ix++; - } - - /* DMA registers */ - for (reg_offset = START_DMA_REG_OFFSET; - reg_offset <= MAX_DMA_REG_OFFSET; reg_offset += 4) { - reg_space[reg_ix] = dwceqos_read(lp, reg_offset); - reg_ix++; - } - - BUG_ON(4 * reg_ix > REG_SPACE_SIZE); -} - -static int dwceqos_get_regs_len(struct net_device *dev) -{ - return REG_SPACE_SIZE; -} - -static inline const char *dwceqos_get_rx_lpi_state(u32 lpi_ctrl) -{ - return (lpi_ctrl & DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIST) ? "on" : "off"; -} - -static inline const char *dwceqos_get_tx_lpi_state(u32 lpi_ctrl) -{ - return (lpi_ctrl & DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIST) ? "on" : "off"; -} - -static int dwceqos_get_eee(struct net_device *ndev, struct ethtool_eee *edata) -{ - struct net_local *lp = netdev_priv(ndev); - u32 lpi_status; - u32 lpi_enabled; - - if (!(lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_EEESEL)) - return -EOPNOTSUPP; - - edata->eee_active = lp->eee_active; - edata->eee_enabled = lp->eee_enabled; - edata->tx_lpi_timer = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_ENTRY_TIMER); - lpi_status = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS); - lpi_enabled = !!(lpi_status & DWCEQOS_MAC_LPI_CTRL_STATUS_LIPTXA); - edata->tx_lpi_enabled = lpi_enabled; - - if (netif_msg_hw(lp)) { - u32 regval; - - regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS); - - netdev_info(lp->ndev, "MAC LPI State: RX:%s TX:%s\n", - dwceqos_get_rx_lpi_state(regval), - dwceqos_get_tx_lpi_state(regval)); - } - - return phy_ethtool_get_eee(ndev->phydev, edata); -} - -static int dwceqos_set_eee(struct net_device *ndev, struct ethtool_eee *edata) -{ - struct net_local *lp = netdev_priv(ndev); - u32 regval; - unsigned long flags; - - if (!(lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_EEESEL)) - return -EOPNOTSUPP; - - if (edata->eee_enabled && !lp->eee_active) - return -EOPNOTSUPP; - - if (edata->tx_lpi_enabled) { - if (edata->tx_lpi_timer < DWCEQOS_LPI_TIMER_MIN || - edata->tx_lpi_timer > DWCEQOS_LPI_TIMER_MAX) - return -EINVAL; - } - - lp->eee_enabled = edata->eee_enabled; - - if (edata->eee_enabled && edata->tx_lpi_enabled) { - dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_ENTRY_TIMER, - edata->tx_lpi_timer); - - spin_lock_irqsave(&lp->hw_lock, flags); - regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS); - regval |= DWCEQOS_LPI_CTRL_ENABLE_EEE; - if (lp->en_tx_lpi_clockgating) - regval |= DWCEQOS_MAC_LPI_CTRL_STATUS_LPITCSE; - dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval); - spin_unlock_irqrestore(&lp->hw_lock, flags); - } else { - spin_lock_irqsave(&lp->hw_lock, flags); - regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS); - regval &= ~DWCEQOS_LPI_CTRL_ENABLE_EEE; - dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval); - spin_unlock_irqrestore(&lp->hw_lock, flags); - } - - return phy_ethtool_set_eee(ndev->phydev, edata); -} - -static u32 dwceqos_get_msglevel(struct net_device *ndev) -{ - const struct net_local *lp = netdev_priv(ndev); - - return lp->msg_enable; -} - -static void dwceqos_set_msglevel(struct net_device *ndev, u32 msglevel) -{ - struct net_local *lp = netdev_priv(ndev); - - lp->msg_enable = msglevel; -} - -static const struct ethtool_ops dwceqos_ethtool_ops = { - .get_drvinfo = dwceqos_get_drvinfo, - .get_link = ethtool_op_get_link, - .get_pauseparam = dwceqos_get_pauseparam, - .set_pauseparam = dwceqos_set_pauseparam, - .get_strings = dwceqos_get_strings, - .get_ethtool_stats = dwceqos_get_ethtool_stats, - .get_sset_count = dwceqos_get_sset_count, - .get_regs = dwceqos_get_regs, - .get_regs_len = dwceqos_get_regs_len, - .get_eee = dwceqos_get_eee, - .set_eee = dwceqos_set_eee, - .get_msglevel = dwceqos_get_msglevel, - .set_msglevel = dwceqos_set_msglevel, - .get_link_ksettings = phy_ethtool_get_link_ksettings, - .set_link_ksettings = phy_ethtool_set_link_ksettings, -}; - -static const struct net_device_ops netdev_ops = { - .ndo_open = dwceqos_open, - .ndo_stop = dwceqos_stop, - .ndo_start_xmit = dwceqos_start_xmit, - .ndo_set_rx_mode = dwceqos_set_rx_mode, - .ndo_set_mac_address = dwceqos_set_mac_address, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = dwceqos_poll_controller, -#endif - .ndo_do_ioctl = dwceqos_ioctl, - .ndo_tx_timeout = dwceqos_tx_timeout, - .ndo_get_stats64 = dwceqos_get_stats64, -}; - -static const struct of_device_id dwceq_of_match[] = { - { .compatible = "snps,dwc-qos-ethernet-4.10", }, - {} -}; -MODULE_DEVICE_TABLE(of, dwceq_of_match); - -static int dwceqos_probe(struct platform_device *pdev) -{ - struct resource *r_mem = NULL; - struct net_device *ndev; - struct net_local *lp; - int ret = -ENXIO; - - r_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!r_mem) { - dev_err(&pdev->dev, "no IO resource defined.\n"); - return -ENXIO; - } - - ndev = alloc_etherdev(sizeof(*lp)); - if (!ndev) { - dev_err(&pdev->dev, "etherdev allocation failed.\n"); - return -ENOMEM; - } - - SET_NETDEV_DEV(ndev, &pdev->dev); - - lp = netdev_priv(ndev); - lp->ndev = ndev; - lp->pdev = pdev; - lp->msg_enable = netif_msg_init(debug, DWCEQOS_MSG_DEFAULT); - - spin_lock_init(&lp->tx_lock); - spin_lock_init(&lp->hw_lock); - spin_lock_init(&lp->stats_lock); - - lp->apb_pclk = devm_clk_get(&pdev->dev, "apb_pclk"); - if (IS_ERR(lp->apb_pclk)) { - dev_err(&pdev->dev, "apb_pclk clock not found.\n"); - ret = PTR_ERR(lp->apb_pclk); - goto err_out_free_netdev; - } - - ret = clk_prepare_enable(lp->apb_pclk); - if (ret) { - dev_err(&pdev->dev, "Unable to enable APER clock.\n"); - goto err_out_free_netdev; - } - - lp->baseaddr = devm_ioremap_resource(&pdev->dev, r_mem); - if (IS_ERR(lp->baseaddr)) { - dev_err(&pdev->dev, "failed to map baseaddress.\n"); - ret = PTR_ERR(lp->baseaddr); - goto err_out_clk_dis_aper; - } - - ndev->irq = platform_get_irq(pdev, 0); - ndev->watchdog_timeo = DWCEQOS_TX_TIMEOUT * HZ; - ndev->netdev_ops = &netdev_ops; - ndev->ethtool_ops = &dwceqos_ethtool_ops; - ndev->base_addr = r_mem->start; - - dwceqos_get_hwfeatures(lp); - dwceqos_mdio_set_csr(lp); - - ndev->hw_features = NETIF_F_SG; - - if (lp->feature1 & DWCEQOS_MAC_HW_FEATURE1_TSOEN) - ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; - - if (lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_TXCOESEL) - ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; - - if (lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_RXCOESEL) - ndev->hw_features |= NETIF_F_RXCSUM; - - ndev->features = ndev->hw_features; - - lp->phy_ref_clk = devm_clk_get(&pdev->dev, "phy_ref_clk"); - if (IS_ERR(lp->phy_ref_clk)) { - dev_err(&pdev->dev, "phy_ref_clk clock not found.\n"); - ret = PTR_ERR(lp->phy_ref_clk); - goto err_out_clk_dis_aper; - } - - ret = clk_prepare_enable(lp->phy_ref_clk); - if (ret) { - dev_err(&pdev->dev, "Unable to enable device clock.\n"); - goto err_out_clk_dis_aper; - } - - lp->phy_node = of_parse_phandle(lp->pdev->dev.of_node, - "phy-handle", 0); - if (!lp->phy_node && of_phy_is_fixed_link(lp->pdev->dev.of_node)) { - ret = of_phy_register_fixed_link(lp->pdev->dev.of_node); - if (ret < 0) { - dev_err(&pdev->dev, "invalid fixed-link"); - goto err_out_clk_dis_phy; - } - - lp->phy_node = of_node_get(lp->pdev->dev.of_node); - } - - ret = of_get_phy_mode(lp->pdev->dev.of_node); - if (ret < 0) { - dev_err(&lp->pdev->dev, "error in getting phy i/f\n"); - goto err_out_deregister_fixed_link; - } - - lp->phy_interface = ret; - - ret = dwceqos_mii_init(lp); - if (ret) { - dev_err(&lp->pdev->dev, "error in dwceqos_mii_init\n"); - goto err_out_deregister_fixed_link; - } - - ret = dwceqos_mii_probe(ndev); - if (ret != 0) { - netdev_err(ndev, "mii_probe fail.\n"); - ret = -ENXIO; - goto err_out_deregister_fixed_link; - } - - dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0); - - tasklet_init(&lp->tx_bdreclaim_tasklet, dwceqos_tx_reclaim, - (unsigned long)ndev); - tasklet_disable(&lp->tx_bdreclaim_tasklet); - - lp->txtimeout_handler_wq = alloc_workqueue(DRIVER_NAME, - WQ_MEM_RECLAIM, 0); - INIT_WORK(&lp->txtimeout_reinit, dwceqos_reinit_for_txtimeout); - - platform_set_drvdata(pdev, ndev); - ret = dwceqos_probe_config_dt(pdev); - if (ret) { - dev_err(&lp->pdev->dev, "Unable to retrieve DT, error %d\n", - ret); - goto err_out_deregister_fixed_link; - } - dev_info(&lp->pdev->dev, "pdev->id %d, baseaddr 0x%08lx, irq %d\n", - pdev->id, ndev->base_addr, ndev->irq); - - ret = devm_request_irq(&pdev->dev, ndev->irq, &dwceqos_interrupt, 0, - ndev->name, ndev); - if (ret) { - dev_err(&lp->pdev->dev, "Unable to request IRQ %d, error %d\n", - ndev->irq, ret); - goto err_out_deregister_fixed_link; - } - - if (netif_msg_probe(lp)) - netdev_dbg(ndev, "net_local@%p\n", lp); - - netif_napi_add(ndev, &lp->napi, dwceqos_rx_poll, NAPI_POLL_WEIGHT); - - ret = register_netdev(ndev); - if (ret) { - dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); - goto err_out_deregister_fixed_link; - } - - return 0; - -err_out_deregister_fixed_link: - if (of_phy_is_fixed_link(pdev->dev.of_node)) - of_phy_deregister_fixed_link(pdev->dev.of_node); -err_out_clk_dis_phy: - clk_disable_unprepare(lp->phy_ref_clk); -err_out_clk_dis_aper: - clk_disable_unprepare(lp->apb_pclk); -err_out_free_netdev: - of_node_put(lp->phy_node); - free_netdev(ndev); - platform_set_drvdata(pdev, NULL); - return ret; -} - -static int dwceqos_remove(struct platform_device *pdev) -{ - struct net_device *ndev = platform_get_drvdata(pdev); - struct net_local *lp; - - if (ndev) { - lp = netdev_priv(ndev); - - if (ndev->phydev) { - phy_disconnect(ndev->phydev); - if (of_phy_is_fixed_link(pdev->dev.of_node)) - of_phy_deregister_fixed_link(pdev->dev.of_node); - } - mdiobus_unregister(lp->mii_bus); - mdiobus_free(lp->mii_bus); - - unregister_netdev(ndev); - - clk_disable_unprepare(lp->phy_ref_clk); - clk_disable_unprepare(lp->apb_pclk); - - free_netdev(ndev); - } - - return 0; -} - -static struct platform_driver dwceqos_driver = { - .probe = dwceqos_probe, - .remove = dwceqos_remove, - .driver = { - .name = DRIVER_NAME, - .of_match_table = dwceq_of_match, - }, -}; - -module_platform_driver(dwceqos_driver); - -MODULE_DESCRIPTION("DWC Ethernet QoS v4.10a driver"); -MODULE_LICENSE("GPL v2"); -MODULE_AUTHOR("Andreas Irestaal <andreas.irestal@axis.com>"); -MODULE_AUTHOR("Lars Persson <lars.persson@axis.com>"); diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c index baa3e4a5731c..f864fd0663db 100644 --- a/drivers/net/ethernet/tehuti/tehuti.c +++ b/drivers/net/ethernet/tehuti/tehuti.c @@ -303,7 +303,7 @@ static int bdx_poll(struct napi_struct *napi, int budget) * device lock and allow waiting tasks (eg rmmod) to advance) */ priv->napi_stop = 0; - napi_complete(napi); + napi_complete_done(napi, work_done); bdx_enable_interrupts(priv); } return work_done; diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 65088224c207..9f3d9c67e3fe 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -145,6 +145,7 @@ do { \ cpsw->data.active_slave) #define IRQ_NUM 2 #define CPSW_MAX_QUEUES 8 +#define CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT 256 static int debug_level; module_param(debug_level, int, 0); @@ -158,6 +159,10 @@ static int rx_packet_max = CPSW_MAX_PACKET_SIZE; module_param(rx_packet_max, int, 0); MODULE_PARM_DESC(rx_packet_max, "maximum receive packet size (bytes)"); +static int descs_pool_size = CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT; +module_param(descs_pool_size, int, 0444); +MODULE_PARM_DESC(descs_pool_size, "Number of CPDMA CPPI descriptors in pool"); + struct cpsw_wr_regs { u32 id_ver; u32 soft_reset; @@ -352,7 +357,6 @@ struct cpsw_slave { struct phy_device *phy; struct net_device *ndev; u32 port_vlan; - u32 open_stat; }; static inline u32 slave_read(struct cpsw_slave *slave, u32 offset) @@ -395,6 +399,7 @@ struct cpsw_common { struct cpts *cpts; int rx_ch_num, tx_ch_num; int speed; + int usage_count; }; struct cpsw_priv { @@ -699,18 +704,9 @@ static void cpsw_rx_handler(void *token, int len, int status) cpsw_dual_emac_src_port_detect(cpsw, status, ndev, skb); if (unlikely(status < 0) || unlikely(!netif_running(ndev))) { - bool ndev_status = false; - struct cpsw_slave *slave = cpsw->slaves; - int n; - - if (cpsw->data.dual_emac) { - /* In dual emac mode check for all interfaces */ - for (n = cpsw->data.slaves; n; n--, slave++) - if (netif_running(slave->ndev)) - ndev_status = true; - } - - if (ndev_status && (status >= 0)) { + /* In dual emac mode check for all interfaces */ + if (cpsw->data.dual_emac && cpsw->usage_count && + (status >= 0)) { /* The packet received is for the interface which * is already down and the other interface is up * and running, instead of freeing which results @@ -934,7 +930,7 @@ static int cpsw_rx_poll(struct napi_struct *napi_rx, int budget) } if (num_rx < budget) { - napi_complete(napi_rx); + napi_complete_done(napi_rx, num_rx); writel(0xff, &cpsw->wr_regs->rx_en); if (cpsw->quirk_irq && cpsw->rx_irq_disabled) { cpsw->rx_irq_disabled = false; @@ -1230,21 +1226,6 @@ static void cpsw_get_ethtool_stats(struct net_device *ndev, } } -static int cpsw_common_res_usage_state(struct cpsw_common *cpsw) -{ - u32 i; - u32 usage_count = 0; - - if (!cpsw->data.dual_emac) - return 0; - - for (i = 0; i < cpsw->data.slaves; i++) - if (cpsw->slaves[i].open_stat) - usage_count++; - - return usage_count; -} - static inline int cpsw_tx_packet_submit(struct cpsw_priv *priv, struct sk_buff *skb, struct cpdma_chan *txch) @@ -1478,8 +1459,6 @@ static int cpsw_ndo_open(struct net_device *ndev) return ret; } - if (!cpsw_common_res_usage_state(cpsw)) - cpsw_intr_disable(cpsw); netif_carrier_off(ndev); /* Notify the stack of the actual queue counts. */ @@ -1501,8 +1480,8 @@ static int cpsw_ndo_open(struct net_device *ndev) CPSW_MAJOR_VERSION(reg), CPSW_MINOR_VERSION(reg), CPSW_RTL_VERSION(reg)); - /* initialize host and slave ports */ - if (!cpsw_common_res_usage_state(cpsw)) + /* Initialize host and slave ports */ + if (!cpsw->usage_count) cpsw_init_host_port(priv); for_each_slave(priv, cpsw_slave_open, priv); @@ -1513,7 +1492,8 @@ static int cpsw_ndo_open(struct net_device *ndev) cpsw_ale_add_vlan(cpsw->ale, cpsw->data.default_vlan, ALE_ALL_PORTS, ALE_ALL_PORTS, 0, 0); - if (!cpsw_common_res_usage_state(cpsw)) { + /* initialize shared resources for every ndev */ + if (!cpsw->usage_count) { /* disable priority elevation */ __raw_writel(0, &cpsw->regs->ptype); @@ -1555,9 +1535,7 @@ static int cpsw_ndo_open(struct net_device *ndev) cpdma_ctlr_start(cpsw->dma); cpsw_intr_enable(cpsw); - - if (cpsw->data.dual_emac) - cpsw->slaves[priv->emac_port].open_stat = true; + cpsw->usage_count++; return 0; @@ -1578,7 +1556,7 @@ static int cpsw_ndo_stop(struct net_device *ndev) netif_tx_stop_all_queues(priv->ndev); netif_carrier_off(priv->ndev); - if (cpsw_common_res_usage_state(cpsw) <= 1) { + if (cpsw->usage_count <= 1) { napi_disable(&cpsw->napi_rx); napi_disable(&cpsw->napi_tx); cpts_unregister(cpsw->cpts); @@ -1591,9 +1569,8 @@ static int cpsw_ndo_stop(struct net_device *ndev) if (cpsw_need_resplit(cpsw)) cpsw_split_res(ndev); + cpsw->usage_count--; pm_runtime_put_sync(cpsw->dev); - if (cpsw->data.dual_emac) - cpsw->slaves[priv->emac_port].open_stat = false; return 0; } @@ -1606,12 +1583,10 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb, struct cpdma_chan *txch; int ret, q_idx; - netif_trans_update(ndev); - if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) { cpsw_err(priv, tx_err, "packet pad failed\n"); ndev->stats.tx_dropped++; - return NETDEV_TX_OK; + return NET_XMIT_DROP; } if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && @@ -2363,17 +2338,11 @@ static int cpsw_update_channels(struct cpsw_priv *priv, return 0; } -static int cpsw_set_channels(struct net_device *ndev, - struct ethtool_channels *chs) +static void cpsw_suspend_data_pass(struct net_device *ndev) { - struct cpsw_priv *priv = netdev_priv(ndev); - struct cpsw_common *cpsw = priv->cpsw; + struct cpsw_common *cpsw = ndev_to_cpsw(ndev); struct cpsw_slave *slave; - int i, ret; - - ret = cpsw_check_ch_settings(cpsw, chs); - if (ret < 0) - return ret; + int i; /* Disable NAPI scheduling */ cpsw_intr_disable(cpsw); @@ -2391,6 +2360,51 @@ static int cpsw_set_channels(struct net_device *ndev, /* Handle rest of tx packets and stop cpdma channels */ cpdma_ctlr_stop(cpsw->dma); +} + +static int cpsw_resume_data_pass(struct net_device *ndev) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; + struct cpsw_slave *slave; + int i, ret; + + /* Allow rx packets handling */ + for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) + if (slave->ndev && netif_running(slave->ndev)) + netif_dormant_off(slave->ndev); + + /* After this receive is started */ + if (cpsw->usage_count) { + ret = cpsw_fill_rx_channels(priv); + if (ret) + return ret; + + cpdma_ctlr_start(cpsw->dma); + cpsw_intr_enable(cpsw); + } + + /* Resume transmit for every affected interface */ + for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) + if (slave->ndev && netif_running(slave->ndev)) + netif_tx_start_all_queues(slave->ndev); + + return 0; +} + +static int cpsw_set_channels(struct net_device *ndev, + struct ethtool_channels *chs) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; + struct cpsw_slave *slave; + int i, ret; + + ret = cpsw_check_ch_settings(cpsw, chs); + if (ret < 0) + return ret; + + cpsw_suspend_data_pass(ndev); ret = cpsw_update_channels(priv, chs); if (ret) goto err; @@ -2413,30 +2427,14 @@ static int cpsw_set_channels(struct net_device *ndev, dev_err(priv->dev, "cannot set real number of rx queues\n"); goto err; } - - /* Enable rx packets handling */ - netif_dormant_off(slave->ndev); } - if (cpsw_common_res_usage_state(cpsw)) { - ret = cpsw_fill_rx_channels(priv); - if (ret) - goto err; - + if (cpsw->usage_count) cpsw_split_res(ndev); - /* After this receive is started */ - cpdma_ctlr_start(cpsw->dma); - cpsw_intr_enable(cpsw); - } - - /* Resume transmit for every affected interface */ - for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) { - if (!(slave->ndev && netif_running(slave->ndev))) - continue; - netif_tx_start_all_queues(slave->ndev); - } - return 0; + ret = cpsw_resume_data_pass(ndev); + if (!ret) + return 0; err: dev_err(priv->dev, "cannot update channels number, closing device\n"); dev_close(ndev); @@ -2479,6 +2477,52 @@ static int cpsw_nway_reset(struct net_device *ndev) return -EOPNOTSUPP; } +static void cpsw_get_ringparam(struct net_device *ndev, + struct ethtool_ringparam *ering) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; + + /* not supported */ + ering->tx_max_pending = 0; + ering->tx_pending = cpdma_get_num_tx_descs(cpsw->dma); + ering->rx_max_pending = descs_pool_size - CPSW_MAX_QUEUES; + ering->rx_pending = cpdma_get_num_rx_descs(cpsw->dma); +} + +static int cpsw_set_ringparam(struct net_device *ndev, + struct ethtool_ringparam *ering) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; + int ret; + + /* ignore ering->tx_pending - only rx_pending adjustment is supported */ + + if (ering->rx_mini_pending || ering->rx_jumbo_pending || + ering->rx_pending < CPSW_MAX_QUEUES || + ering->rx_pending > (descs_pool_size - CPSW_MAX_QUEUES)) + return -EINVAL; + + if (ering->rx_pending == cpdma_get_num_rx_descs(cpsw->dma)) + return 0; + + cpsw_suspend_data_pass(ndev); + + cpdma_set_num_rx_descs(cpsw->dma, ering->rx_pending); + + if (cpsw->usage_count) + cpdma_chan_split_pool(cpsw->dma); + + ret = cpsw_resume_data_pass(ndev); + if (!ret) + return 0; + + dev_err(&ndev->dev, "cannot set ring params, closing device\n"); + dev_close(ndev); + return ret; +} + static const struct ethtool_ops cpsw_ethtool_ops = { .get_drvinfo = cpsw_get_drvinfo, .get_msglevel = cpsw_get_msglevel, @@ -2505,6 +2549,8 @@ static const struct ethtool_ops cpsw_ethtool_ops = { .get_eee = cpsw_get_eee, .set_eee = cpsw_set_eee, .nway_reset = cpsw_nway_reset, + .get_ringparam = cpsw_get_ringparam, + .set_ringparam = cpsw_set_ringparam, }; static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_common *cpsw, @@ -2969,6 +3015,7 @@ static int cpsw_probe(struct platform_device *pdev) dma_params.has_ext_regs = true; dma_params.desc_hw_addr = dma_params.desc_mem_phys; dma_params.bus_freq_mhz = cpsw->bus_freq_mhz; + dma_params.descs_pool_size = descs_pool_size; cpsw->dma = cpdma_ctlr_create(&dma_params); if (!cpsw->dma) { @@ -2985,7 +3032,7 @@ static int cpsw_probe(struct platform_device *pdev) goto clean_dma_ret; } - ale_params.dev = &ndev->dev; + ale_params.dev = &pdev->dev; ale_params.ale_ageout = ale_ageout; ale_params.ale_entries = data->ale_entries; ale_params.ale_ports = data->slaves; @@ -3072,9 +3119,9 @@ static int cpsw_probe(struct platform_device *pdev) goto clean_ale_ret; } - cpsw_notice(priv, probe, "initialized device (regs %pa, irq %d)\n", - &ss_res->start, ndev->irq); - + cpsw_notice(priv, probe, + "initialized device (regs %pa, irq %d, pool size %d)\n", + &ss_res->start, ndev->irq, dma_params.descs_pool_size); if (cpsw->data.dual_emac) { ret = cpsw_probe_dual_emac(priv); if (ret) { diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c index 43b061bd8e07..ddd43e09111e 100644 --- a/drivers/net/ethernet/ti/cpsw_ale.c +++ b/drivers/net/ethernet/ti/cpsw_ale.c @@ -1,5 +1,5 @@ /* - * Texas Instruments 3-Port Ethernet Switch Address Lookup Engine + * Texas Instruments N-Port Ethernet Switch Address Lookup Engine * * Copyright (C) 2012 Texas Instruments * @@ -27,11 +27,14 @@ #define BITMASK(bits) (BIT(bits) - 1) -#define ALE_VERSION_MAJOR(rev) ((rev >> 8) & 0xff) +#define ALE_VERSION_MAJOR(rev, mask) (((rev) >> 8) & (mask)) #define ALE_VERSION_MINOR(rev) (rev & 0xff) +#define ALE_VERSION_1R3 0x0103 +#define ALE_VERSION_1R4 0x0104 /* ALE Registers */ #define ALE_IDVER 0x00 +#define ALE_STATUS 0x04 #define ALE_CONTROL 0x08 #define ALE_PRESCALE 0x10 #define ALE_UNKNOWNVLAN 0x18 @@ -39,6 +42,13 @@ #define ALE_TABLE 0x34 #define ALE_PORTCTL 0x40 +/* ALE NetCP NU switch specific Registers */ +#define ALE_UNKNOWNVLAN_MEMBER 0x90 +#define ALE_UNKNOWNVLAN_UNREG_MCAST_FLOOD 0x94 +#define ALE_UNKNOWNVLAN_REG_MCAST_FLOOD 0x98 +#define ALE_UNKNOWNVLAN_FORCE_UNTAG_EGRESS 0x9C +#define ALE_VLAN_MASK_MUX(reg) (0xc0 + (0x4 * (reg))) + #define ALE_TABLE_WRITE BIT(31) #define ALE_TYPE_FREE 0 @@ -51,6 +61,10 @@ #define ALE_UCAST_OUI 2 #define ALE_UCAST_TOUCHED 3 +#define ALE_TABLE_SIZE_MULTIPLIER 1024 +#define ALE_STATUS_SIZE_MASK 0x1f +#define ALE_TABLE_SIZE_DEFAULT 64 + static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits) { int idx; @@ -84,20 +98,34 @@ static inline void cpsw_ale_set_##name(u32 *ale_entry, u32 value) \ cpsw_ale_set_field(ale_entry, start, bits, value); \ } +#define DEFINE_ALE_FIELD1(name, start) \ +static inline int cpsw_ale_get_##name(u32 *ale_entry, u32 bits) \ +{ \ + return cpsw_ale_get_field(ale_entry, start, bits); \ +} \ +static inline void cpsw_ale_set_##name(u32 *ale_entry, u32 value, \ + u32 bits) \ +{ \ + cpsw_ale_set_field(ale_entry, start, bits, value); \ +} + DEFINE_ALE_FIELD(entry_type, 60, 2) DEFINE_ALE_FIELD(vlan_id, 48, 12) DEFINE_ALE_FIELD(mcast_state, 62, 2) -DEFINE_ALE_FIELD(port_mask, 66, 3) +DEFINE_ALE_FIELD1(port_mask, 66) DEFINE_ALE_FIELD(super, 65, 1) DEFINE_ALE_FIELD(ucast_type, 62, 2) -DEFINE_ALE_FIELD(port_num, 66, 2) +DEFINE_ALE_FIELD1(port_num, 66) DEFINE_ALE_FIELD(blocked, 65, 1) DEFINE_ALE_FIELD(secure, 64, 1) -DEFINE_ALE_FIELD(vlan_untag_force, 24, 3) -DEFINE_ALE_FIELD(vlan_reg_mcast, 16, 3) -DEFINE_ALE_FIELD(vlan_unreg_mcast, 8, 3) -DEFINE_ALE_FIELD(vlan_member_list, 0, 3) +DEFINE_ALE_FIELD1(vlan_untag_force, 24) +DEFINE_ALE_FIELD1(vlan_reg_mcast, 16) +DEFINE_ALE_FIELD1(vlan_unreg_mcast, 8) +DEFINE_ALE_FIELD1(vlan_member_list, 0) DEFINE_ALE_FIELD(mcast, 40, 1) +/* ALE NetCP nu switch specific */ +DEFINE_ALE_FIELD(vlan_unreg_mcast_idx, 20, 3) +DEFINE_ALE_FIELD(vlan_reg_mcast_idx, 44, 3) /* The MAC address field in the ALE entry cannot be macroized as above */ static inline void cpsw_ale_get_addr(u32 *ale_entry, u8 *addr) @@ -223,14 +251,16 @@ static void cpsw_ale_flush_mcast(struct cpsw_ale *ale, u32 *ale_entry, { int mask; - mask = cpsw_ale_get_port_mask(ale_entry); + mask = cpsw_ale_get_port_mask(ale_entry, + ale->port_mask_bits); if ((mask & port_mask) == 0) return; /* ports dont intersect, not interested */ mask &= ~port_mask; /* free if only remaining port is host port */ if (mask) - cpsw_ale_set_port_mask(ale_entry, mask); + cpsw_ale_set_port_mask(ale_entry, mask, + ale->port_mask_bits); else cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE); } @@ -291,7 +321,7 @@ int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, cpsw_ale_set_ucast_type(ale_entry, ALE_UCAST_PERSISTANT); cpsw_ale_set_secure(ale_entry, (flags & ALE_SECURE) ? 1 : 0); cpsw_ale_set_blocked(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0); - cpsw_ale_set_port_num(ale_entry, port); + cpsw_ale_set_port_num(ale_entry, port, ale->port_num_bits); idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0); if (idx < 0) @@ -338,9 +368,11 @@ int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask, cpsw_ale_set_super(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0); cpsw_ale_set_mcast_state(ale_entry, mcast_state); - mask = cpsw_ale_get_port_mask(ale_entry); + mask = cpsw_ale_get_port_mask(ale_entry, + ale->port_mask_bits); port_mask |= mask; - cpsw_ale_set_port_mask(ale_entry, port_mask); + cpsw_ale_set_port_mask(ale_entry, port_mask, + ale->port_mask_bits); if (idx < 0) idx = cpsw_ale_match_free(ale); @@ -367,7 +399,8 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask, cpsw_ale_read(ale, idx, ale_entry); if (port_mask) - cpsw_ale_set_port_mask(ale_entry, port_mask); + cpsw_ale_set_port_mask(ale_entry, port_mask, + ale->port_mask_bits); else cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE); @@ -376,6 +409,21 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask, } EXPORT_SYMBOL_GPL(cpsw_ale_del_mcast); +/* ALE NetCP NU switch specific vlan functions */ +static void cpsw_ale_set_vlan_mcast(struct cpsw_ale *ale, u32 *ale_entry, + int reg_mcast, int unreg_mcast) +{ + int idx; + + /* Set VLAN registered multicast flood mask */ + idx = cpsw_ale_get_vlan_reg_mcast_idx(ale_entry); + writel(reg_mcast, ale->params.ale_regs + ALE_VLAN_MASK_MUX(idx)); + + /* Set VLAN unregistered multicast flood mask */ + idx = cpsw_ale_get_vlan_unreg_mcast_idx(ale_entry); + writel(unreg_mcast, ale->params.ale_regs + ALE_VLAN_MASK_MUX(idx)); +} + int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag, int reg_mcast, int unreg_mcast) { @@ -389,10 +437,16 @@ int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag, cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_VLAN); cpsw_ale_set_vlan_id(ale_entry, vid); - cpsw_ale_set_vlan_untag_force(ale_entry, untag); - cpsw_ale_set_vlan_reg_mcast(ale_entry, reg_mcast); - cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_mcast); - cpsw_ale_set_vlan_member_list(ale_entry, port); + cpsw_ale_set_vlan_untag_force(ale_entry, untag, ale->vlan_field_bits); + if (!ale->params.nu_switch_ale) { + cpsw_ale_set_vlan_reg_mcast(ale_entry, reg_mcast, + ale->vlan_field_bits); + cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_mcast, + ale->vlan_field_bits); + } else { + cpsw_ale_set_vlan_mcast(ale, ale_entry, reg_mcast, unreg_mcast); + } + cpsw_ale_set_vlan_member_list(ale_entry, port, ale->vlan_field_bits); if (idx < 0) idx = cpsw_ale_match_free(ale); @@ -418,7 +472,8 @@ int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask) cpsw_ale_read(ale, idx, ale_entry); if (port_mask) - cpsw_ale_set_vlan_member_list(ale_entry, port_mask); + cpsw_ale_set_vlan_member_list(ale_entry, port_mask, + ale->vlan_field_bits); else cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE); @@ -446,12 +501,15 @@ void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti) if (type != ALE_TYPE_VLAN) continue; - unreg_mcast = cpsw_ale_get_vlan_unreg_mcast(ale_entry); + unreg_mcast = + cpsw_ale_get_vlan_unreg_mcast(ale_entry, + ale->vlan_field_bits); if (allmulti) unreg_mcast |= 1; else unreg_mcast &= ~1; - cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_mcast); + cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_mcast, + ale->vlan_field_bits); cpsw_ale_write(ale, idx, ale_entry); } } @@ -464,7 +522,7 @@ struct ale_control_info { int bits; }; -static const struct ale_control_info ale_controls[ALE_NUM_CONTROLS] = { +static struct ale_control_info ale_controls[ALE_NUM_CONTROLS] = { [ALE_ENABLE] = { .name = "enable", .offset = ALE_CONTROL, @@ -721,11 +779,83 @@ static void cpsw_ale_timer(unsigned long arg) void cpsw_ale_start(struct cpsw_ale *ale) { - u32 rev; + u32 rev, ale_entries; rev = __raw_readl(ale->params.ale_regs + ALE_IDVER); - dev_dbg(ale->params.dev, "initialized cpsw ale revision %d.%d\n", - ALE_VERSION_MAJOR(rev), ALE_VERSION_MINOR(rev)); + if (!ale->params.major_ver_mask) + ale->params.major_ver_mask = 0xff; + ale->version = + (ALE_VERSION_MAJOR(rev, ale->params.major_ver_mask) << 8) | + ALE_VERSION_MINOR(rev); + dev_info(ale->params.dev, "initialized cpsw ale version %d.%d\n", + ALE_VERSION_MAJOR(rev, ale->params.major_ver_mask), + ALE_VERSION_MINOR(rev)); + + if (!ale->params.ale_entries) { + ale_entries = + __raw_readl(ale->params.ale_regs + ALE_STATUS) & + ALE_STATUS_SIZE_MASK; + /* ALE available on newer NetCP switches has introduced + * a register, ALE_STATUS, to indicate the size of ALE + * table which shows the size as a multiple of 1024 entries. + * For these, params.ale_entries will be set to zero. So + * read the register and update the value of ale_entries. + * ALE table on NetCP lite, is much smaller and is indicated + * by a value of zero in ALE_STATUS. So use a default value + * of ALE_TABLE_SIZE_DEFAULT for this. Caller is expected + * to set the value of ale_entries for all other versions + * of ALE. + */ + if (!ale_entries) + ale_entries = ALE_TABLE_SIZE_DEFAULT; + else + ale_entries *= ALE_TABLE_SIZE_MULTIPLIER; + ale->params.ale_entries = ale_entries; + } + dev_info(ale->params.dev, + "ALE Table size %ld\n", ale->params.ale_entries); + + /* set default bits for existing h/w */ + ale->port_mask_bits = 3; + ale->port_num_bits = 2; + ale->vlan_field_bits = 3; + + /* Set defaults override for ALE on NetCP NU switch and for version + * 1R3 + */ + if (ale->params.nu_switch_ale) { + /* Separate registers for unknown vlan configuration. + * Also there are N bits, where N is number of ale + * ports and shift value should be 0 + */ + ale_controls[ALE_PORT_UNKNOWN_VLAN_MEMBER].bits = + ale->params.ale_ports; + ale_controls[ALE_PORT_UNKNOWN_VLAN_MEMBER].offset = + ALE_UNKNOWNVLAN_MEMBER; + ale_controls[ALE_PORT_UNKNOWN_MCAST_FLOOD].bits = + ale->params.ale_ports; + ale_controls[ALE_PORT_UNKNOWN_MCAST_FLOOD].shift = 0; + ale_controls[ALE_PORT_UNKNOWN_MCAST_FLOOD].offset = + ALE_UNKNOWNVLAN_UNREG_MCAST_FLOOD; + ale_controls[ALE_PORT_UNKNOWN_REG_MCAST_FLOOD].bits = + ale->params.ale_ports; + ale_controls[ALE_PORT_UNKNOWN_REG_MCAST_FLOOD].shift = 0; + ale_controls[ALE_PORT_UNKNOWN_REG_MCAST_FLOOD].offset = + ALE_UNKNOWNVLAN_REG_MCAST_FLOOD; + ale_controls[ALE_PORT_UNTAGGED_EGRESS].bits = + ale->params.ale_ports; + ale_controls[ALE_PORT_UNTAGGED_EGRESS].shift = 0; + ale_controls[ALE_PORT_UNTAGGED_EGRESS].offset = + ALE_UNKNOWNVLAN_FORCE_UNTAG_EGRESS; + ale->port_mask_bits = ale->params.ale_ports; + ale->port_num_bits = ale->params.ale_ports - 1; + ale->vlan_field_bits = ale->params.ale_ports; + } else if (ale->version == ALE_VERSION_1R3) { + ale->port_mask_bits = ale->params.ale_ports; + ale->port_num_bits = 3; + ale->vlan_field_bits = ale->params.ale_ports; + } + cpsw_ale_control_set(ale, 0, ALE_ENABLE, 1); cpsw_ale_control_set(ale, 0, ALE_CLEAR, 1); diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h index a7001894f3da..25d24e8d0904 100644 --- a/drivers/net/ethernet/ti/cpsw_ale.h +++ b/drivers/net/ethernet/ti/cpsw_ale.h @@ -1,5 +1,5 @@ /* - * Texas Instruments 3-Port Ethernet Switch Address Lookup Engine APIs + * Texas Instruments N-Port Ethernet Switch Address Lookup Engine APIs * * Copyright (C) 2012 Texas Instruments * @@ -21,6 +21,16 @@ struct cpsw_ale_params { unsigned long ale_ageout; /* in secs */ unsigned long ale_entries; unsigned long ale_ports; + /* NU Switch has specific handling as number of bits in ALE entries + * are different than other versions of ALE. Also there are specific + * registers for unknown vlan specific fields. So use nu_switch_ale + * to identify this hardware. + */ + bool nu_switch_ale; + /* mask bit used in NU Switch ALE is 3 bits instead of 8 bits. So + * pass it from caller. + */ + u32 major_ver_mask; }; struct cpsw_ale { @@ -28,6 +38,11 @@ struct cpsw_ale { struct timer_list timer; unsigned long ageout; int allmulti; + u32 version; + /* These bits are different on NetCP NU Switch ALE */ + u32 port_mask_bits; + u32 port_num_bits; + u32 vlan_field_bits; }; enum cpsw_ale_control { diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c index 36518fc5c7cc..7ecc6b70e7e8 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.c +++ b/drivers/net/ethernet/ti/davinci_cpdma.c @@ -108,6 +108,8 @@ struct cpdma_ctlr { spinlock_t lock; struct cpdma_chan *channels[2 * CPDMA_MAX_CHANNELS]; int chan_num; + int num_rx_desc; /* RX descriptors number */ + int num_tx_desc; /* TX descriptors number */ }; struct cpdma_chan { @@ -166,12 +168,12 @@ static struct cpdma_control_info controls[] = { #define num_chan params.num_chan /* various accessors */ -#define dma_reg_read(ctlr, ofs) __raw_readl((ctlr)->dmaregs + (ofs)) -#define chan_read(chan, fld) __raw_readl((chan)->fld) -#define desc_read(desc, fld) __raw_readl(&(desc)->fld) -#define dma_reg_write(ctlr, ofs, v) __raw_writel(v, (ctlr)->dmaregs + (ofs)) -#define chan_write(chan, fld, v) __raw_writel(v, (chan)->fld) -#define desc_write(desc, fld, v) __raw_writel((u32)(v), &(desc)->fld) +#define dma_reg_read(ctlr, ofs) readl((ctlr)->dmaregs + (ofs)) +#define chan_read(chan, fld) readl((chan)->fld) +#define desc_read(desc, fld) readl(&(desc)->fld) +#define dma_reg_write(ctlr, ofs, v) writel(v, (ctlr)->dmaregs + (ofs)) +#define chan_write(chan, fld, v) writel(v, (chan)->fld) +#define desc_write(desc, fld, v) writel((u32)(v), &(desc)->fld) #define cpdma_desc_to_port(chan, mode, directed) \ do { \ @@ -181,8 +183,10 @@ static struct cpdma_control_info controls[] = { (directed << CPDMA_TO_PORT_SHIFT)); \ } while (0) -static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool) +static void cpdma_desc_pool_destroy(struct cpdma_ctlr *ctlr) { + struct cpdma_desc_pool *pool = ctlr->pool; + if (!pool) return; @@ -191,10 +195,8 @@ static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool) gen_pool_size(pool->gen_pool), gen_pool_avail(pool->gen_pool)); if (pool->cpumap) - dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap, + dma_free_coherent(ctlr->dev, pool->mem_size, pool->cpumap, pool->phys); - else - iounmap(pool->iomap); } /* @@ -203,37 +205,50 @@ static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool) * devices (e.g. cpsw switches) use plain old memory. Descriptor pools * abstract out these details */ -static struct cpdma_desc_pool * -cpdma_desc_pool_create(struct device *dev, u32 phys, dma_addr_t hw_addr, - int size, int align) +int cpdma_desc_pool_create(struct cpdma_ctlr *ctlr) { + struct cpdma_params *cpdma_params = &ctlr->params; struct cpdma_desc_pool *pool; - int ret; + int ret = -ENOMEM; - pool = devm_kzalloc(dev, sizeof(*pool), GFP_KERNEL); + pool = devm_kzalloc(ctlr->dev, sizeof(*pool), GFP_KERNEL); if (!pool) goto gen_pool_create_fail; + ctlr->pool = pool; + + pool->mem_size = cpdma_params->desc_mem_size; + pool->desc_size = ALIGN(sizeof(struct cpdma_desc), + cpdma_params->desc_align); + pool->num_desc = pool->mem_size / pool->desc_size; + + if (cpdma_params->descs_pool_size) { + /* recalculate memory size required cpdma descriptor pool + * basing on number of descriptors specified by user and + * if memory size > CPPI internal RAM size (desc_mem_size) + * then switch to use DDR + */ + pool->num_desc = cpdma_params->descs_pool_size; + pool->mem_size = pool->desc_size * pool->num_desc; + if (pool->mem_size > cpdma_params->desc_mem_size) + cpdma_params->desc_mem_phys = 0; + } - pool->dev = dev; - pool->mem_size = size; - pool->desc_size = ALIGN(sizeof(struct cpdma_desc), align); - pool->num_desc = size / pool->desc_size; - - pool->gen_pool = devm_gen_pool_create(dev, ilog2(pool->desc_size), -1, - "cpdma"); + pool->gen_pool = devm_gen_pool_create(ctlr->dev, ilog2(pool->desc_size), + -1, "cpdma"); if (IS_ERR(pool->gen_pool)) { - dev_err(dev, "pool create failed %ld\n", - PTR_ERR(pool->gen_pool)); + ret = PTR_ERR(pool->gen_pool); + dev_err(ctlr->dev, "pool create failed %d\n", ret); goto gen_pool_create_fail; } - if (phys) { - pool->phys = phys; - pool->iomap = ioremap(phys, size); /* should be memremap? */ - pool->hw_addr = hw_addr; + if (cpdma_params->desc_mem_phys) { + pool->phys = cpdma_params->desc_mem_phys; + pool->iomap = devm_ioremap(ctlr->dev, pool->phys, + pool->mem_size); + pool->hw_addr = cpdma_params->desc_hw_addr; } else { - pool->cpumap = dma_alloc_coherent(dev, size, &pool->hw_addr, - GFP_KERNEL); + pool->cpumap = dma_alloc_coherent(ctlr->dev, pool->mem_size, + &pool->hw_addr, GFP_KERNEL); pool->iomap = (void __iomem __force *)pool->cpumap; pool->phys = pool->hw_addr; /* assumes no IOMMU, don't use this value */ } @@ -244,16 +259,17 @@ cpdma_desc_pool_create(struct device *dev, u32 phys, dma_addr_t hw_addr, ret = gen_pool_add_virt(pool->gen_pool, (unsigned long)pool->iomap, pool->phys, pool->mem_size, -1); if (ret < 0) { - dev_err(dev, "pool add failed %d\n", ret); + dev_err(ctlr->dev, "pool add failed %d\n", ret); goto gen_pool_add_virt_fail; } - return pool; + return 0; gen_pool_add_virt_fail: - cpdma_desc_pool_destroy(pool); + cpdma_desc_pool_destroy(ctlr); gen_pool_create_fail: - return NULL; + ctlr->pool = NULL; + return ret; } static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool, @@ -502,13 +518,11 @@ struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params) ctlr->chan_num = 0; spin_lock_init(&ctlr->lock); - ctlr->pool = cpdma_desc_pool_create(ctlr->dev, - ctlr->params.desc_mem_phys, - ctlr->params.desc_hw_addr, - ctlr->params.desc_mem_size, - ctlr->params.desc_align); - if (!ctlr->pool) + if (cpdma_desc_pool_create(ctlr)) return NULL; + /* split pool equally between RX/TX by default */ + ctlr->num_tx_desc = ctlr->pool->num_desc / 2; + ctlr->num_rx_desc = ctlr->pool->num_desc - ctlr->num_tx_desc; if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS)) ctlr->num_chan = CPDMA_MAX_CHANNELS; @@ -542,10 +556,10 @@ int cpdma_ctlr_start(struct cpdma_ctlr *ctlr) } for (i = 0; i < ctlr->num_chan; i++) { - __raw_writel(0, ctlr->params.txhdp + 4 * i); - __raw_writel(0, ctlr->params.rxhdp + 4 * i); - __raw_writel(0, ctlr->params.txcp + 4 * i); - __raw_writel(0, ctlr->params.rxcp + 4 * i); + writel(0, ctlr->params.txhdp + 4 * i); + writel(0, ctlr->params.rxhdp + 4 * i); + writel(0, ctlr->params.txcp + 4 * i); + writel(0, ctlr->params.rxcp + 4 * i); } dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff); @@ -623,7 +637,7 @@ int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr) for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) cpdma_chan_destroy(ctlr->channels[i]); - cpdma_desc_pool_destroy(ctlr->pool); + cpdma_desc_pool_destroy(ctlr); return ret; } EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy); @@ -708,22 +722,22 @@ static void cpdma_chan_set_descs(struct cpdma_ctlr *ctlr, } } /* use remains */ - most_chan->desc_num += desc_cnt; + if (most_chan) + most_chan->desc_num += desc_cnt; } /** * cpdma_chan_split_pool - Splits ctrl pool between all channels. * Has to be called under ctlr lock */ -static int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr) +int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr) { int tx_per_ch_desc = 0, rx_per_ch_desc = 0; - struct cpdma_desc_pool *pool = ctlr->pool; int free_rx_num = 0, free_tx_num = 0; int rx_weight = 0, tx_weight = 0; int tx_desc_num, rx_desc_num; struct cpdma_chan *chan; - int i, tx_num = 0; + int i; if (!ctlr->chan_num) return 0; @@ -741,15 +755,14 @@ static int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr) if (!chan->weight) free_tx_num++; tx_weight += chan->weight; - tx_num++; } } if (rx_weight > 100 || tx_weight > 100) return -EINVAL; - tx_desc_num = (tx_num * pool->num_desc) / ctlr->chan_num; - rx_desc_num = pool->num_desc - tx_desc_num; + tx_desc_num = ctlr->num_tx_desc; + rx_desc_num = ctlr->num_rx_desc; if (free_tx_num) { tx_per_ch_desc = tx_desc_num - (tx_weight * tx_desc_num) / 100; @@ -765,6 +778,8 @@ static int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr) return 0; } +EXPORT_SYMBOL_GPL(cpdma_chan_split_pool); + /* cpdma_chan_set_weight - set weight of a channel in percentage. * Tx and Rx channels have separate weights. That is 100% for RX @@ -820,8 +835,8 @@ EXPORT_SYMBOL_GPL(cpdma_chan_get_min_rate); */ int cpdma_chan_set_rate(struct cpdma_chan *ch, u32 rate) { - struct cpdma_ctlr *ctlr = ch->ctlr; unsigned long flags, ch_flags; + struct cpdma_ctlr *ctlr; int ret, prio_mode; u32 rmask; @@ -831,6 +846,7 @@ int cpdma_chan_set_rate(struct cpdma_chan *ch, u32 rate) if (ch->rate == rate) return rate; + ctlr = ch->ctlr; spin_lock_irqsave(&ctlr->lock, flags); spin_lock_irqsave(&ch->lock, ch_flags); @@ -898,7 +914,6 @@ struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num, chan->chan_num = chan_num; chan->handler = handler; chan->rate = 0; - chan->desc_num = ctlr->pool->num_desc / 2; chan->weight = 0; if (is_rx_chan(chan)) { @@ -1061,13 +1076,17 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data, mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP; cpdma_desc_to_port(chan, mode, directed); - desc_write(desc, hw_next, 0); - desc_write(desc, hw_buffer, buffer); - desc_write(desc, hw_len, len); - desc_write(desc, hw_mode, mode | len); - desc_write(desc, sw_token, token); - desc_write(desc, sw_buffer, buffer); - desc_write(desc, sw_len, len); + /* Relaxed IO accessors can be used here as there is read barrier + * at the end of write sequence. + */ + writel_relaxed(0, &desc->hw_next); + writel_relaxed(buffer, &desc->hw_buffer); + writel_relaxed(len, &desc->hw_len); + writel_relaxed(mode | len, &desc->hw_mode); + writel_relaxed(token, &desc->sw_token); + writel_relaxed(buffer, &desc->sw_buffer); + writel_relaxed(len, &desc->sw_len); + desc_read(desc, sw_len); __cpdma_chan_submit(chan, desc); @@ -1136,7 +1155,7 @@ static int __cpdma_chan_process(struct cpdma_chan *chan) } desc_dma = desc_phys(pool, desc); - status = __raw_readl(&desc->hw_mode); + status = desc_read(desc, hw_mode); outlen = status & 0x7ff; if (status & CPDMA_DESC_OWNER) { chan->stats.busy_dequeue++; @@ -1155,7 +1174,7 @@ static int __cpdma_chan_process(struct cpdma_chan *chan) chan->count--; chan->stats.good_dequeue++; - if (status & CPDMA_DESC_EOQ) { + if ((status & CPDMA_DESC_EOQ) && chan->head) { chan->stats.requeue++; chan_write(chan, hdp, desc_phys(pool, chan->head)); } @@ -1316,4 +1335,23 @@ int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value) } EXPORT_SYMBOL_GPL(cpdma_control_set); +int cpdma_get_num_rx_descs(struct cpdma_ctlr *ctlr) +{ + return ctlr->num_rx_desc; +} +EXPORT_SYMBOL_GPL(cpdma_get_num_rx_descs); + +int cpdma_get_num_tx_descs(struct cpdma_ctlr *ctlr) +{ + return ctlr->num_tx_desc; +} +EXPORT_SYMBOL_GPL(cpdma_get_num_tx_descs); + +void cpdma_set_num_rx_descs(struct cpdma_ctlr *ctlr, int num_rx_desc) +{ + ctlr->num_rx_desc = num_rx_desc; + ctlr->num_tx_desc = ctlr->pool->num_desc - ctlr->num_rx_desc; +} +EXPORT_SYMBOL_GPL(cpdma_set_num_rx_descs); + MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/ti/davinci_cpdma.h b/drivers/net/ethernet/ti/davinci_cpdma.h index 4a167db2abab..fd65ce2b83de 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.h +++ b/drivers/net/ethernet/ti/davinci_cpdma.h @@ -37,6 +37,7 @@ struct cpdma_params { int desc_mem_size; int desc_align; u32 bus_freq_mhz; + u32 descs_pool_size; /* * Some instances of embedded cpdma controllers have extra control and @@ -113,5 +114,9 @@ enum cpdma_control { int cpdma_control_get(struct cpdma_ctlr *ctlr, int control); int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value); +int cpdma_get_num_rx_descs(struct cpdma_ctlr *ctlr); +void cpdma_set_num_rx_descs(struct cpdma_ctlr *ctlr, int num_rx_desc); +int cpdma_get_num_tx_descs(struct cpdma_ctlr *ctlr); +int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr); #endif diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index 481c7bf0395b..64d5527feb2a 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -1295,7 +1295,7 @@ static int emac_poll(struct napi_struct *napi, int budget) &emac_rxhost_errcodes[cause][0], ch); } } else if (num_rx_pkts < budget) { - napi_complete(napi); + napi_complete_done(napi, num_rx_pkts); emac_int_enable(priv); } diff --git a/drivers/net/ethernet/ti/netcp.h b/drivers/net/ethernet/ti/netcp.h index 0f58c584ae09..8900a6fad318 100644 --- a/drivers/net/ethernet/ti/netcp.h +++ b/drivers/net/ethernet/ti/netcp.h @@ -23,6 +23,7 @@ #include <linux/netdevice.h> #include <linux/soc/ti/knav_dma.h> +#include <linux/u64_stats_sync.h> /* Maximum Ethernet frame size supported by Keystone switch */ #define NETCP_MAX_FRAME_SIZE 9504 @@ -68,6 +69,20 @@ struct netcp_addr { struct list_head node; }; +struct netcp_stats { + struct u64_stats_sync syncp_rx ____cacheline_aligned_in_smp; + u64 rx_packets; + u64 rx_bytes; + u32 rx_errors; + u32 rx_dropped; + + struct u64_stats_sync syncp_tx ____cacheline_aligned_in_smp; + u64 tx_packets; + u64 tx_bytes; + u32 tx_errors; + u32 tx_dropped; +}; + struct netcp_intf { struct device *dev; struct device *ndev_dev; @@ -87,6 +102,11 @@ struct netcp_intf { void *rx_fdq[KNAV_DMA_FDQ_PER_CHAN]; struct napi_struct rx_napi; struct napi_struct tx_napi; +#define ETH_SW_CAN_REMOVE_ETH_FCS BIT(0) + u32 hw_cap; + + /* 64-bit netcp stats */ + struct netcp_stats stats; void *rx_channel; const char *dma_chan_name; @@ -115,6 +135,7 @@ struct netcp_packet { struct sk_buff *skb; __le32 *epib; u32 *psdata; + u32 eflags; unsigned int psdata_len; struct netcp_intf *netcp; struct netcp_tx_pipe *tx_pipe; diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index c243335ed649..7c7ae0890e90 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -122,6 +122,13 @@ static void get_pkt_info(dma_addr_t *buff, u32 *buff_len, dma_addr_t *ndesc, *ndesc = le32_to_cpu(desc->next_desc); } +static void get_desc_info(u32 *desc_info, u32 *pkt_info, + struct knav_dma_desc *desc) +{ + *desc_info = le32_to_cpu(desc->desc_info); + *pkt_info = le32_to_cpu(desc->packet_info); +} + static u32 get_sw_data(int index, struct knav_dma_desc *desc) { /* No Endian conversion needed as this data is untouched by hw */ @@ -622,6 +629,7 @@ static void netcp_free_rx_desc_chain(struct netcp_intf *netcp, static void netcp_empty_rx_queue(struct netcp_intf *netcp) { + struct netcp_stats *rx_stats = &netcp->stats; struct knav_dma_desc *desc; unsigned int dma_sz; dma_addr_t dma; @@ -635,16 +643,17 @@ static void netcp_empty_rx_queue(struct netcp_intf *netcp) if (unlikely(!desc)) { dev_err(netcp->ndev_dev, "%s: failed to unmap Rx desc\n", __func__); - netcp->ndev->stats.rx_errors++; + rx_stats->rx_errors++; continue; } netcp_free_rx_desc_chain(netcp, desc); - netcp->ndev->stats.rx_dropped++; + rx_stats->rx_dropped++; } } static int netcp_process_one_rx_packet(struct netcp_intf *netcp) { + struct netcp_stats *rx_stats = &netcp->stats; unsigned int dma_sz, buf_len, org_buf_len; struct knav_dma_desc *desc, *ndesc; unsigned int pkt_sz = 0, accum_sz; @@ -653,6 +662,7 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp) struct netcp_packet p_info; struct sk_buff *skb; void *org_buf_ptr; + u32 tmp; dma_desc = knav_queue_pop(netcp->rx_queue, &dma_sz); if (!dma_desc) @@ -724,21 +734,27 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp) knav_pool_desc_put(netcp->rx_pool, ndesc); } - /* Free the primary descriptor */ - knav_pool_desc_put(netcp->rx_pool, desc); - /* check for packet len and warn */ if (unlikely(pkt_sz != accum_sz)) dev_dbg(netcp->ndev_dev, "mismatch in packet size(%d) & sum of fragments(%d)\n", pkt_sz, accum_sz); - /* Remove ethernet FCS from the packet */ - __pskb_trim(skb, skb->len - ETH_FCS_LEN); + /* Newer version of the Ethernet switch can trim the Ethernet FCS + * from the packet and is indicated in hw_cap. So trim it only for + * older h/w + */ + if (!(netcp->hw_cap & ETH_SW_CAN_REMOVE_ETH_FCS)) + __pskb_trim(skb, skb->len - ETH_FCS_LEN); /* Call each of the RX hooks */ p_info.skb = skb; skb->dev = netcp->ndev; p_info.rxtstamp_complete = false; + get_desc_info(&tmp, &p_info.eflags, desc); + p_info.epib = desc->epib; + p_info.psdata = (u32 __force *)desc->psdata; + p_info.eflags = ((p_info.eflags >> KNAV_DMA_DESC_EFLAGS_SHIFT) & + KNAV_DMA_DESC_EFLAGS_MASK); list_for_each_entry(rx_hook, &netcp->rxhook_list_head, list) { int ret; @@ -747,14 +763,20 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp) if (unlikely(ret)) { dev_err(netcp->ndev_dev, "RX hook %d failed: %d\n", rx_hook->order, ret); - netcp->ndev->stats.rx_errors++; + /* Free the primary descriptor */ + rx_stats->rx_dropped++; + knav_pool_desc_put(netcp->rx_pool, desc); dev_kfree_skb(skb); return 0; } } + /* Free the primary descriptor */ + knav_pool_desc_put(netcp->rx_pool, desc); - netcp->ndev->stats.rx_packets++; - netcp->ndev->stats.rx_bytes += skb->len; + u64_stats_update_begin(&rx_stats->syncp_rx); + rx_stats->rx_packets++; + rx_stats->rx_bytes += skb->len; + u64_stats_update_end(&rx_stats->syncp_rx); /* push skb up the stack */ skb->protocol = eth_type_trans(skb, netcp->ndev); @@ -763,7 +785,7 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp) free_desc: netcp_free_rx_desc_chain(netcp, desc); - netcp->ndev->stats.rx_errors++; + rx_stats->rx_errors++; return 0; } @@ -947,7 +969,7 @@ static int netcp_rx_poll(struct napi_struct *napi, int budget) netcp_rxpool_refill(netcp); if (packets < budget) { - napi_complete(&netcp->rx_napi); + napi_complete_done(&netcp->rx_napi, packets); knav_queue_enable_notify(netcp->rx_queue); } @@ -994,6 +1016,7 @@ static void netcp_free_tx_desc_chain(struct netcp_intf *netcp, static int netcp_process_tx_compl_packets(struct netcp_intf *netcp, unsigned int budget) { + struct netcp_stats *tx_stats = &netcp->stats; struct knav_dma_desc *desc; struct netcp_tx_cb *tx_cb; struct sk_buff *skb; @@ -1008,7 +1031,7 @@ static int netcp_process_tx_compl_packets(struct netcp_intf *netcp, desc = knav_pool_desc_unmap(netcp->tx_pool, dma, dma_sz); if (unlikely(!desc)) { dev_err(netcp->ndev_dev, "failed to unmap Tx desc\n"); - netcp->ndev->stats.tx_errors++; + tx_stats->tx_errors++; continue; } @@ -1019,7 +1042,7 @@ static int netcp_process_tx_compl_packets(struct netcp_intf *netcp, netcp_free_tx_desc_chain(netcp, desc, dma_sz); if (!skb) { dev_err(netcp->ndev_dev, "No skb in Tx desc\n"); - netcp->ndev->stats.tx_errors++; + tx_stats->tx_errors++; continue; } @@ -1036,8 +1059,10 @@ static int netcp_process_tx_compl_packets(struct netcp_intf *netcp, netif_wake_subqueue(netcp->ndev, subqueue); } - netcp->ndev->stats.tx_packets++; - netcp->ndev->stats.tx_bytes += skb->len; + u64_stats_update_begin(&tx_stats->syncp_tx); + tx_stats->tx_packets++; + tx_stats->tx_bytes += skb->len; + u64_stats_update_end(&tx_stats->syncp_tx); dev_kfree_skb(skb); pkts++; } @@ -1212,9 +1237,9 @@ static int netcp_tx_submit_skb(struct netcp_intf *netcp, /* psdata points to both native-endian and device-endian data */ __le32 *psdata = (void __force *)p_info.psdata; - memmove(p_info.psdata, p_info.psdata + p_info.psdata_len, - p_info.psdata_len); - set_words(p_info.psdata, p_info.psdata_len, psdata); + set_words((u32 *)psdata + + (KNAV_DMA_NUM_PS_WORDS - p_info.psdata_len), + p_info.psdata_len, psdata); tmp |= (p_info.psdata_len & KNAV_DMA_DESC_PSLEN_MASK) << KNAV_DMA_DESC_PSLEN_SHIFT; } @@ -1258,6 +1283,7 @@ out: static int netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct netcp_intf *netcp = netdev_priv(ndev); + struct netcp_stats *tx_stats = &netcp->stats; int subqueue = skb_get_queue_mapping(skb); struct knav_dma_desc *desc; int desc_count, ret = 0; @@ -1273,7 +1299,7 @@ static int netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev) /* If we get here, the skb has already been dropped */ dev_warn(netcp->ndev_dev, "padding failed (%d), packet dropped\n", ret); - ndev->stats.tx_dropped++; + tx_stats->tx_dropped++; return ret; } skb->len = NETCP_MIN_PACKET_SIZE; @@ -1290,8 +1316,6 @@ static int netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev) if (ret) goto drop; - netif_trans_update(ndev); - /* Check Tx pool count & stop subqueue if needed */ desc_count = knav_pool_count(netcp->tx_pool); if (desc_count < netcp->tx_pause_threshold) { @@ -1301,7 +1325,7 @@ static int netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev) return NETDEV_TX_OK; drop: - ndev->stats.tx_dropped++; + tx_stats->tx_dropped++; if (desc) netcp_free_tx_desc_chain(netcp, desc, sizeof(*desc)); dev_kfree_skb(skb); @@ -1883,12 +1907,44 @@ static int netcp_setup_tc(struct net_device *dev, u32 handle, __be16 proto, return 0; } +static void +netcp_get_stats(struct net_device *ndev, struct rtnl_link_stats64 *stats) +{ + struct netcp_intf *netcp = netdev_priv(ndev); + struct netcp_stats *p = &netcp->stats; + u64 rxpackets, rxbytes, txpackets, txbytes; + unsigned int start; + + do { + start = u64_stats_fetch_begin_irq(&p->syncp_rx); + rxpackets = p->rx_packets; + rxbytes = p->rx_bytes; + } while (u64_stats_fetch_retry_irq(&p->syncp_rx, start)); + + do { + start = u64_stats_fetch_begin_irq(&p->syncp_tx); + txpackets = p->tx_packets; + txbytes = p->tx_bytes; + } while (u64_stats_fetch_retry_irq(&p->syncp_tx, start)); + + stats->rx_packets = rxpackets; + stats->rx_bytes = rxbytes; + stats->tx_packets = txpackets; + stats->tx_bytes = txbytes; + + /* The following are stored as 32 bit */ + stats->rx_errors = p->rx_errors; + stats->rx_dropped = p->rx_dropped; + stats->tx_dropped = p->tx_dropped; +} + static const struct net_device_ops netcp_netdev_ops = { .ndo_open = netcp_ndo_open, .ndo_stop = netcp_ndo_stop, .ndo_start_xmit = netcp_ndo_start_xmit, .ndo_set_rx_mode = netcp_set_rx_mode, .ndo_do_ioctl = netcp_ndo_ioctl, + .ndo_get_stats64 = netcp_get_stats, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_vlan_rx_add_vid = netcp_rx_add_vid, @@ -1935,6 +1991,8 @@ static int netcp_create_interface(struct netcp_device *netcp_device, INIT_LIST_HEAD(&netcp->txhook_list_head); INIT_LIST_HEAD(&netcp->rxhook_list_head); INIT_LIST_HEAD(&netcp->addr_list); + u64_stats_init(&netcp->stats.syncp_rx); + u64_stats_init(&netcp->stats.syncp_tx); netcp->netcp_device = netcp_device; netcp->dev = netcp_device->device; netcp->ndev = ndev; diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c index 7d9e36f66735..eece3e2eec14 100644 --- a/drivers/net/ethernet/ti/netcp_ethss.c +++ b/drivers/net/ethernet/ti/netcp_ethss.c @@ -81,7 +81,6 @@ #define GBENU_CPTS_OFFSET 0x1d000 #define GBENU_ALE_OFFSET 0x1e000 #define GBENU_HOST_PORT_NUM 0 -#define GBENU_NUM_ALE_ENTRIES 1024 #define GBENU_SGMII_MODULE_SIZE 0x100 /* 10G Ethernet SS defines */ @@ -103,7 +102,7 @@ #define XGBE10_ALE_OFFSET 0x700 #define XGBE10_HW_STATS_OFFSET 0x800 #define XGBE10_HOST_PORT_NUM 0 -#define XGBE10_NUM_ALE_ENTRIES 1024 +#define XGBE10_NUM_ALE_ENTRIES 2048 #define GBE_TIMER_INTERVAL (HZ / 2) @@ -122,6 +121,7 @@ #define MACSL_FULLDUPLEX BIT(0) #define GBE_CTL_P0_ENABLE BIT(2) +#define ETH_SW_CTL_P0_TX_CRC_REMOVE BIT(13) #define GBE13_REG_VAL_STAT_ENABLE_ALL 0xff #define XGBE_REG_VAL_STAT_ENABLE_ALL 0xf #define GBE_STATS_CD_SEL BIT(28) @@ -2313,7 +2313,6 @@ static int gbe_slave_open(struct gbe_intf *gbe_intf) dev_dbg(priv->dev, "phy found: id is: 0x%s\n", phydev_name(slave->phy)); phy_start(slave->phy); - phy_read_status(slave->phy); } return 0; } @@ -2821,7 +2820,7 @@ static int gbe_open(void *intf_priv, struct net_device *ndev) struct netcp_intf *netcp = netdev_priv(ndev); struct gbe_slave *slave = gbe_intf->slave; int port_num = slave->port_num; - u32 reg; + u32 reg, val; int ret; reg = readl(GBE_REG_ADDR(gbe_dev, switch_regs, id_ver)); @@ -2851,7 +2850,12 @@ static int gbe_open(void *intf_priv, struct net_device *ndev) writel(0, GBE_REG_ADDR(gbe_dev, switch_regs, ptype)); /* Control register */ - writel(GBE_CTL_P0_ENABLE, GBE_REG_ADDR(gbe_dev, switch_regs, control)); + val = GBE_CTL_P0_ENABLE; + if (IS_SS_ID_MU(gbe_dev)) { + val |= ETH_SW_CTL_P0_TX_CRC_REMOVE; + netcp->hw_cap = ETH_SW_CAN_REMOVE_ETH_FCS; + } + writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, control)); /* All statistics enabled and STAT AB visible by default */ writel(gbe_dev->stats_en_mask, GBE_REG_ADDR(gbe_dev, switch_regs, @@ -2930,7 +2934,9 @@ static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave, } slave->open = false; - slave->phy_node = of_parse_phandle(node, "phy-handle", 0); + if ((slave->link_interface == SGMII_LINK_MAC_PHY) || + (slave->link_interface == XGMII_LINK_MAC_PHY)) + slave->phy_node = of_parse_phandle(node, "phy-handle", 0); slave->port_num = gbe_get_slave_port(gbe_dev, slave->slave_num); if (slave->link_interface >= XGMII_LINK_MAC_PHY) @@ -3112,7 +3118,6 @@ static void init_secondary_ports(struct gbe_priv *gbe_dev, dev_dbg(dev, "phy found: id is: 0x%s\n", phydev_name(slave->phy)); phy_start(slave->phy); - phy_read_status(slave->phy); } } } @@ -3433,7 +3438,6 @@ static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev, gbe_dev->ale_reg = gbe_dev->switch_regs + GBENU_ALE_OFFSET; gbe_dev->ale_ports = gbe_dev->max_num_ports; gbe_dev->host_port = GBENU_HOST_PORT_NUM; - gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES; gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1; /* Subsystem registers */ @@ -3601,7 +3605,10 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev, ale_params.ale_ageout = GBE_DEFAULT_ALE_AGEOUT; ale_params.ale_entries = gbe_dev->ale_entries; ale_params.ale_ports = gbe_dev->ale_ports; - + if (IS_SS_ID_MU(gbe_dev)) { + ale_params.major_ver_mask = 0x7; + ale_params.nu_switch_ale = true; + } gbe_dev->ale = cpsw_ale_create(&ale_params); if (!gbe_dev->ale) { dev_err(gbe_dev->dev, "error initializing ale engine\n"); diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c index 2255f9a6f3bc..7c634bc75615 100644 --- a/drivers/net/ethernet/tile/tilegx.c +++ b/drivers/net/ethernet/tile/tilegx.c @@ -681,7 +681,7 @@ static int tile_net_poll(struct napi_struct *napi, int budget) } /* There are no packets left. */ - napi_complete(&info_mpipe->napi); + napi_complete_done(&info_mpipe->napi, work); md = &mpipe_data[instance]; /* Re-enable hypervisor interrupts. */ diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c index 0a3b7dafa3ba..49ccee4b9aec 100644 --- a/drivers/net/ethernet/tile/tilepro.c +++ b/drivers/net/ethernet/tile/tilepro.c @@ -842,7 +842,7 @@ static int tile_net_poll(struct napi_struct *napi, int budget) } } - napi_complete(&info->napi); + napi_complete_done(&info->napi, work); if (!priv->active) goto done; @@ -2047,8 +2047,8 @@ static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) * * Returns the address of the device statistics structure. */ -static struct rtnl_link_stats64 *tile_net_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *stats) +static void tile_net_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) { struct tile_net_priv *priv = netdev_priv(dev); u64 rx_packets = 0, tx_packets = 0; @@ -2090,12 +2090,8 @@ static struct rtnl_link_stats64 *tile_net_get_stats64(struct net_device *dev, stats->tx_bytes = tx_bytes; stats->rx_errors = rx_errors; stats->rx_dropped = rx_dropped; - - return stats; } - - /* * Change the Ethernet Address of the NIC. * diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c index 345316c749e7..72013314bba8 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c +++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c @@ -1109,7 +1109,7 @@ static int gelic_net_poll(struct napi_struct *napi, int budget) } if (packets_done < budget) { - napi_complete(napi); + napi_complete_done(napi, packets_done); gelic_card_rx_irq_on(card); } return packets_done; diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c index cb341dfe65ad..cec9e70ab995 100644 --- a/drivers/net/ethernet/toshiba/spider_net.c +++ b/drivers/net/ethernet/toshiba/spider_net.c @@ -1270,7 +1270,7 @@ static int spider_net_poll(struct napi_struct *napi, int budget) /* if all packets are in the stack, enable interrupts and return 0 */ /* if not, return 1 */ if (packets_done < budget) { - napi_complete(napi); + napi_complete_done(napi, packets_done); spider_net_rx_irq_on(card); card->ignore_rx_ramfull = 0; } diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c index 3be61ed28741..a45f98fa4aa7 100644 --- a/drivers/net/ethernet/toshiba/tc35815.c +++ b/drivers/net/ethernet/toshiba/tc35815.c @@ -1638,7 +1638,7 @@ static int tc35815_poll(struct napi_struct *napi, int budget) spin_unlock(&lp->rx_lock); if (received < budget) { - napi_complete(napi); + napi_complete_done(napi, received); /* enable interrupts */ tc_writel(tc_readl(&tr->DMA_Ctl) & ~DMA_IntMask, &tr->DMA_Ctl); } diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c index f153ad729ce5..c5583991da4a 100644 --- a/drivers/net/ethernet/tundra/tsi108_eth.c +++ b/drivers/net/ethernet/tundra/tsi108_eth.c @@ -887,7 +887,7 @@ static int tsi108_poll(struct napi_struct *napi, int budget) if (num_received < budget) { data->rxpending = 0; - napi_complete(napi); + napi_complete_done(napi, num_received); TSI_WRITE(TSI108_EC_INTMASK, TSI_READ(TSI108_EC_INTMASK) diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c index 0a6c4e804eed..c068c58428f7 100644 --- a/drivers/net/ethernet/via/via-rhine.c +++ b/drivers/net/ethernet/via/via-rhine.c @@ -513,8 +513,8 @@ static irqreturn_t rhine_interrupt(int irq, void *dev_instance); static void rhine_tx(struct net_device *dev); static int rhine_rx(struct net_device *dev, int limit); static void rhine_set_rx_mode(struct net_device *dev); -static struct rtnl_link_stats64 *rhine_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *stats); +static void rhine_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats); static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static const struct ethtool_ops netdev_ethtool_ops; static int rhine_close(struct net_device *dev); @@ -861,7 +861,7 @@ static int rhine_napipoll(struct napi_struct *napi, int budget) } if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); iowrite16(enable_mask, ioaddr + IntrEnable); mmiowb(); } @@ -2221,7 +2221,7 @@ out_unlock: mutex_unlock(&rp->task_lock); } -static struct rtnl_link_stats64 * +static void rhine_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct rhine_private *rp = netdev_priv(dev); @@ -2244,8 +2244,6 @@ rhine_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) stats->tx_packets = rp->tx_stats.packets; stats->tx_bytes = rp->tx_stats.bytes; } while (u64_stats_fetch_retry_irq(&rp->tx_stats.syncp, start)); - - return stats; } static void rhine_set_rx_mode(struct net_device *dev) diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c index 4716e60e2ccb..d088788b27a7 100644 --- a/drivers/net/ethernet/via/via-velocity.c +++ b/drivers/net/ethernet/via/via-velocity.c @@ -2160,7 +2160,7 @@ static int velocity_poll(struct napi_struct *napi, int budget) velocity_tx_srv(vptr); /* If budget not fully consumed, exit the polling mode */ if (rx_done < budget) { - napi_complete(napi); + napi_complete_done(napi, rx_done); mac_enable_int(vptr->mac_regs); } spin_unlock_irqrestore(&vptr->lock, flags); diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c index e1296ef2cf66..f90267f0519f 100644 --- a/drivers/net/ethernet/wiznet/w5100.c +++ b/drivers/net/ethernet/wiznet/w5100.c @@ -915,7 +915,7 @@ static int w5100_napi_poll(struct napi_struct *napi, int budget) } if (rx_count < budget) { - napi_complete(napi); + napi_complete_done(napi, rx_count); w5100_enable_intr(priv); } diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c index 724fabd38a23..56ae573001e8 100644 --- a/drivers/net/ethernet/wiznet/w5300.c +++ b/drivers/net/ethernet/wiznet/w5300.c @@ -417,7 +417,7 @@ static int w5300_napi_poll(struct napi_struct *napi, int budget) } if (rx_count < budget) { - napi_complete(napi); + napi_complete_done(napi, rx_count); w5300_write(priv, W5300_IMR, IR_S0); mmiowb(); } diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index aa02a03a6d8d..69e31ceccfae 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -1041,20 +1041,6 @@ static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev) } /** - * xemaclite_remove_ndev - Free the network device - * @ndev: Pointer to the network device to be freed - * - * This function un maps the IO region of the Emaclite device and frees the net - * device. - */ -static void xemaclite_remove_ndev(struct net_device *ndev) -{ - if (ndev) { - free_netdev(ndev); - } -} - -/** * get_bool - Get a parameter from the OF device * @ofdev: Pointer to OF device structure * @s: Property to be retrieved @@ -1077,7 +1063,7 @@ static bool get_bool(struct platform_device *ofdev, const char *s) } } -static struct net_device_ops xemaclite_netdev_ops; +static const struct net_device_ops xemaclite_netdev_ops; /** * xemaclite_of_probe - Probe method for the Emaclite device. @@ -1184,7 +1170,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev) return 0; error: - xemaclite_remove_ndev(ndev); + free_netdev(ndev); return rc; } @@ -1216,7 +1202,7 @@ static int xemaclite_of_remove(struct platform_device *of_dev) of_node_put(lp->phy_node); lp->phy_node = NULL; - xemaclite_remove_ndev(ndev); + free_netdev(ndev); return 0; } @@ -1231,7 +1217,7 @@ xemaclite_poll_controller(struct net_device *ndev) } #endif -static struct net_device_ops xemaclite_netdev_ops = { +static const struct net_device_ops xemaclite_netdev_ops = { .ndo_open = xemaclite_open, .ndo_stop = xemaclite_close, .ndo_start_xmit = xemaclite_send, |