diff options
author | Ingo Molnar <mingo@kernel.org> | 2019-12-10 10:11:00 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2019-12-10 10:11:00 +0100 |
commit | 2040cf9f59037aa8aec749363e69ead165b67b43 (patch) | |
tree | e9c15448e841cc493bc80b9f658d7955623e86dd /drivers/net/ethernet | |
parent | kprobes: Set unoptimized flag after unoptimizing code (diff) | |
parent | Linux 5.5-rc1 (diff) | |
download | linux-2040cf9f59037aa8aec749363e69ead165b67b43.tar.xz linux-2040cf9f59037aa8aec749363e69ead165b67b43.zip |
Merge tag 'v5.5-rc1' into core/kprobes, to resolve conflicts
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'drivers/net/ethernet')
38 files changed, 456 insertions, 275 deletions
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index d5ae2e1e0b0e..9c767ee252ac 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -4422,6 +4422,7 @@ static int macb_remove(struct platform_device *pdev) mdiobus_free(bp->mii_bus); unregister_netdev(dev); + tasklet_kill(&bp->hresp_err_tasklet); pm_runtime_disable(&pdev->dev); pm_runtime_dont_use_autosuspend(&pdev->dev); if (!pm_runtime_suspended(&pdev->dev)) { diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index 1e09fdb63c4f..c4f6ec0cd183 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c @@ -1115,7 +1115,7 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid) phy_interface_mode(lmac->lmac_type))) return -ENODEV; - phy_start_aneg(lmac->phydev); + phy_start(lmac->phydev); return 0; } diff --git a/drivers/net/ethernet/emulex/benet/Kconfig b/drivers/net/ethernet/emulex/benet/Kconfig index 17d300ea9955..f51dca1526c6 100644 --- a/drivers/net/ethernet/emulex/benet/Kconfig +++ b/drivers/net/ethernet/emulex/benet/Kconfig @@ -49,4 +49,4 @@ config BE2NET_SKYHAWK comment "WARNING: be2net is useless without any enabled chip" depends on BE2NET_BE2=n && BE2NET_BE3=n && BE2NET_LANCER=n && \ - BE2NET_SKYHAWK=n && BE2NET + BE2NET_SKYHAWK=n && BE2NET diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index a6f2063f1475..8ed85037f021 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -1858,7 +1858,7 @@ static int ftgmac100_probe(struct platform_device *pdev) } /* Indicate that we support PAUSE frames (see comment in - * Documentation/networking/phy.txt) + * Documentation/networking/phy.rst) */ phy_support_asym_pause(phy); diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index 9db1b96ed9b9..17739906c966 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -1332,6 +1332,7 @@ static int enetc_phy_connect(struct net_device *ndev) { struct enetc_ndev_priv *priv = netdev_priv(ndev); struct phy_device *phydev; + struct ethtool_eee edata; if (!priv->phy_node) return 0; /* phy-less mode */ @@ -1345,6 +1346,10 @@ static int enetc_phy_connect(struct net_device *ndev) phy_attached_info(phydev); + /* disable EEE autoneg, until ENETC driver supports it */ + memset(&edata, 0, sizeof(struct ethtool_eee)); + phy_ethtool_set_eee(phydev, &edata); + return 0; } diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c index aca95f64bde8..9b7a8db9860f 100644 --- a/drivers/net/ethernet/google/gve/gve_main.c +++ b/drivers/net/ethernet/google/gve/gve_main.c @@ -544,7 +544,7 @@ static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id, } qpl->id = id; - qpl->num_entries = pages; + qpl->num_entries = 0; qpl->pages = kvzalloc(pages * sizeof(*qpl->pages), GFP_KERNEL); /* caller handles clean up */ if (!qpl->pages) @@ -562,6 +562,7 @@ static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id, /* caller handles clean up */ if (err) return -ENOMEM; + qpl->num_entries++; } priv->num_registered_pages += pages; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index ba0536802b13..69545dd6c938 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -1287,30 +1287,25 @@ static bool hns3_skb_need_linearized(struct sk_buff *skb, unsigned int *bd_size, } static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring, - struct sk_buff **out_skb) + struct net_device *netdev, + struct sk_buff *skb) { + struct hns3_nic_priv *priv = netdev_priv(netdev); unsigned int bd_size[HNS3_MAX_TSO_BD_NUM + 1U]; - struct sk_buff *skb = *out_skb; unsigned int bd_num; bd_num = hns3_tx_bd_num(skb, bd_size); if (unlikely(bd_num > HNS3_MAX_NON_TSO_BD_NUM)) { - struct sk_buff *new_skb; - if (bd_num <= HNS3_MAX_TSO_BD_NUM && skb_is_gso(skb) && !hns3_skb_need_linearized(skb, bd_size, bd_num)) goto out; - /* manual split the send packet */ - new_skb = skb_copy(skb, GFP_ATOMIC); - if (!new_skb) + if (__skb_linearize(skb)) return -ENOMEM; - dev_kfree_skb_any(skb); - *out_skb = new_skb; - bd_num = hns3_tx_bd_count(new_skb->len); - if ((skb_is_gso(new_skb) && bd_num > HNS3_MAX_TSO_BD_NUM) || - (!skb_is_gso(new_skb) && + bd_num = hns3_tx_bd_count(skb->len); + if ((skb_is_gso(skb) && bd_num > HNS3_MAX_TSO_BD_NUM) || + (!skb_is_gso(skb) && bd_num > HNS3_MAX_NON_TSO_BD_NUM)) return -ENOMEM; @@ -1320,10 +1315,23 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring, } out: - if (unlikely(ring_space(ring) < bd_num)) - return -EBUSY; + if (likely(ring_space(ring) >= bd_num)) + return bd_num; - return bd_num; + netif_stop_subqueue(netdev, ring->queue_index); + smp_mb(); /* Memory barrier before checking ring_space */ + + /* Start queue in case hns3_clean_tx_ring has just made room + * available and has not seen the queue stopped state performed + * by netif_stop_subqueue above. + */ + if (ring_space(ring) >= bd_num && netif_carrier_ok(netdev) && + !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) { + netif_start_subqueue(netdev, ring->queue_index); + return bd_num; + } + + return -EBUSY; } static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig) @@ -1400,13 +1408,13 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) /* Prefetch the data used later */ prefetch(skb->data); - ret = hns3_nic_maybe_stop_tx(ring, &skb); + ret = hns3_nic_maybe_stop_tx(ring, netdev, skb); if (unlikely(ret <= 0)) { if (ret == -EBUSY) { u64_stats_update_begin(&ring->syncp); ring->stats.tx_busy++; u64_stats_update_end(&ring->syncp); - goto out_net_tx_busy; + return NETDEV_TX_BUSY; } else if (ret == -ENOMEM) { u64_stats_update_begin(&ring->syncp); ring->stats.sw_err_cnt++; @@ -1457,12 +1465,6 @@ fill_err: out_err_tx_ok: dev_kfree_skb_any(skb); return NETDEV_TX_OK; - -out_net_tx_busy: - netif_stop_subqueue(netdev, ring->queue_index); - smp_mb(); /* Commit all data before submit */ - - return NETDEV_TX_BUSY; } static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p) @@ -2519,7 +2521,7 @@ void hns3_clean_tx_ring(struct hns3_enet_ring *ring) dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index); netdev_tx_completed_queue(dev_queue, pkts, bytes); - if (unlikely(pkts && netif_carrier_ok(netdev) && + if (unlikely(netif_carrier_ok(netdev) && ring_space(ring) > HNS3_MAX_TSO_BD_NUM)) { /* Make sure that anybody stopping the queue after this * sees the new next_to_clean. diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 7c7038676d6d..d862e9ba27e1 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -8438,13 +8438,16 @@ static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid, if (hdev->pdev->revision == 0x20) return -EOPNOTSUPP; + vport = hclge_get_vf_vport(hdev, vfid); + if (!vport) + return -EINVAL; + /* qos is a 3 bits value, so can not be bigger than 7 */ - if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7) + if (vlan > VLAN_N_VID - 1 || qos > 7) return -EINVAL; if (proto != htons(ETH_P_8021Q)) return -EPROTONOSUPPORT; - vport = &hdev->vport[vfid]; state = hclge_get_port_base_vlan_state(vport, vport->port_base_vlan_cfg.state, vlan); @@ -8455,21 +8458,12 @@ static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid, vlan_info.qos = qos; vlan_info.vlan_proto = ntohs(proto); - /* update port based VLAN for PF */ - if (!vfid) { - hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); - ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info); - hclge_notify_client(hdev, HNAE3_UP_CLIENT); - - return ret; - } - if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) { return hclge_update_port_base_vlan_cfg(vport, state, &vlan_info); } else { ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0], - (u8)vfid, state, + vport->vport_id, state, vlan, qos, ntohs(proto)); return ret; diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 0686ded7ad3a..c90080781924 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -159,6 +159,40 @@ static long h_reg_sub_crq(unsigned long unit_address, unsigned long token, return rc; } +/** + * ibmvnic_wait_for_completion - Check device state and wait for completion + * @adapter: private device data + * @comp_done: completion structure to wait for + * @timeout: time to wait in milliseconds + * + * Wait for a completion signal or until the timeout limit is reached + * while checking that the device is still active. + */ +static int ibmvnic_wait_for_completion(struct ibmvnic_adapter *adapter, + struct completion *comp_done, + unsigned long timeout) +{ + struct net_device *netdev; + unsigned long div_timeout; + u8 retry; + + netdev = adapter->netdev; + retry = 5; + div_timeout = msecs_to_jiffies(timeout / retry); + while (true) { + if (!adapter->crq.active) { + netdev_err(netdev, "Device down!\n"); + return -ENODEV; + } + if (retry--) + break; + if (wait_for_completion_timeout(comp_done, div_timeout)) + return 0; + } + netdev_err(netdev, "Operation timed out.\n"); + return -ETIMEDOUT; +} + static int alloc_long_term_buff(struct ibmvnic_adapter *adapter, struct ibmvnic_long_term_buff *ltb, int size) { @@ -176,21 +210,35 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter, ltb->map_id = adapter->map_id; adapter->map_id++; - init_completion(&adapter->fw_done); + mutex_lock(&adapter->fw_lock); + adapter->fw_done_rc = 0; + reinit_completion(&adapter->fw_done); rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id); if (rc) { dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); + mutex_unlock(&adapter->fw_lock); + return rc; + } + + rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); + if (rc) { + dev_err(dev, + "Long term map request aborted or timed out,rc = %d\n", + rc); + dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); + mutex_unlock(&adapter->fw_lock); return rc; } - wait_for_completion(&adapter->fw_done); if (adapter->fw_done_rc) { dev_err(dev, "Couldn't map long term buffer,rc = %d\n", adapter->fw_done_rc); dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); + mutex_unlock(&adapter->fw_lock); return -1; } + mutex_unlock(&adapter->fw_lock); return 0; } @@ -211,22 +259,37 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter, static int reset_long_term_buff(struct ibmvnic_adapter *adapter, struct ibmvnic_long_term_buff *ltb) { + struct device *dev = &adapter->vdev->dev; int rc; memset(ltb->buff, 0, ltb->size); - init_completion(&adapter->fw_done); + mutex_lock(&adapter->fw_lock); + adapter->fw_done_rc = 0; + + reinit_completion(&adapter->fw_done); rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id); - if (rc) + if (rc) { + mutex_unlock(&adapter->fw_lock); + return rc; + } + + rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); + if (rc) { + dev_info(dev, + "Reset failed, long term map request timed out or aborted\n"); + mutex_unlock(&adapter->fw_lock); return rc; - wait_for_completion(&adapter->fw_done); + } if (adapter->fw_done_rc) { - dev_info(&adapter->vdev->dev, + dev_info(dev, "Reset failed, attempting to free and reallocate buffer\n"); free_long_term_buff(adapter, ltb); + mutex_unlock(&adapter->fw_lock); return alloc_long_term_buff(adapter, ltb, ltb->size); } + mutex_unlock(&adapter->fw_lock); return 0; } @@ -943,13 +1006,25 @@ static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter) if (adapter->vpd->buff) len = adapter->vpd->len; - init_completion(&adapter->fw_done); + mutex_lock(&adapter->fw_lock); + adapter->fw_done_rc = 0; + reinit_completion(&adapter->fw_done); + crq.get_vpd_size.first = IBMVNIC_CRQ_CMD; crq.get_vpd_size.cmd = GET_VPD_SIZE; rc = ibmvnic_send_crq(adapter, &crq); - if (rc) + if (rc) { + mutex_unlock(&adapter->fw_lock); return rc; - wait_for_completion(&adapter->fw_done); + } + + rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); + if (rc) { + dev_err(dev, "Could not retrieve VPD size, rc = %d\n", rc); + mutex_unlock(&adapter->fw_lock); + return rc; + } + mutex_unlock(&adapter->fw_lock); if (!adapter->vpd->len) return -ENODATA; @@ -976,7 +1051,10 @@ static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter) return -ENOMEM; } + mutex_lock(&adapter->fw_lock); + adapter->fw_done_rc = 0; reinit_completion(&adapter->fw_done); + crq.get_vpd.first = IBMVNIC_CRQ_CMD; crq.get_vpd.cmd = GET_VPD; crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr); @@ -985,10 +1063,20 @@ static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter) if (rc) { kfree(adapter->vpd->buff); adapter->vpd->buff = NULL; + mutex_unlock(&adapter->fw_lock); + return rc; + } + + rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); + if (rc) { + dev_err(dev, "Unable to retrieve VPD, rc = %d\n", rc); + kfree(adapter->vpd->buff); + adapter->vpd->buff = NULL; + mutex_unlock(&adapter->fw_lock); return rc; } - wait_for_completion(&adapter->fw_done); + mutex_unlock(&adapter->fw_lock); return 0; } @@ -1689,20 +1777,25 @@ static int __ibmvnic_set_mac(struct net_device *netdev, u8 *dev_addr) crq.change_mac_addr.cmd = CHANGE_MAC_ADDR; ether_addr_copy(&crq.change_mac_addr.mac_addr[0], dev_addr); - init_completion(&adapter->fw_done); + mutex_lock(&adapter->fw_lock); + adapter->fw_done_rc = 0; + reinit_completion(&adapter->fw_done); + rc = ibmvnic_send_crq(adapter, &crq); if (rc) { rc = -EIO; + mutex_unlock(&adapter->fw_lock); goto err; } - wait_for_completion(&adapter->fw_done); + rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); /* netdev->dev_addr is changed in handle_change_mac_rsp function */ - if (adapter->fw_done_rc) { + if (rc || adapter->fw_done_rc) { rc = -EIO; + mutex_unlock(&adapter->fw_lock); goto err; } - + mutex_unlock(&adapter->fw_lock); return 0; err: ether_addr_copy(adapter->mac_addr, netdev->dev_addr); @@ -2316,12 +2409,19 @@ static int wait_for_reset(struct ibmvnic_adapter *adapter) adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq; adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq; - init_completion(&adapter->reset_done); + reinit_completion(&adapter->reset_done); adapter->wait_for_reset = true; rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM); - if (rc) - return rc; - wait_for_completion(&adapter->reset_done); + + if (rc) { + ret = rc; + goto out; + } + rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done, 60000); + if (rc) { + ret = -ENODEV; + goto out; + } ret = 0; if (adapter->reset_done_rc) { @@ -2332,13 +2432,21 @@ static int wait_for_reset(struct ibmvnic_adapter *adapter) adapter->desired.rx_entries = adapter->fallback.rx_entries; adapter->desired.tx_entries = adapter->fallback.tx_entries; - init_completion(&adapter->reset_done); + reinit_completion(&adapter->reset_done); adapter->wait_for_reset = true; rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM); - if (rc) - return ret; - wait_for_completion(&adapter->reset_done); + if (rc) { + ret = rc; + goto out; + } + rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done, + 60000); + if (rc) { + ret = -ENODEV; + goto out; + } } +out: adapter->wait_for_reset = false; return ret; @@ -2603,11 +2711,13 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev, cpu_to_be32(sizeof(struct ibmvnic_statistics)); /* Wait for data to be written */ - init_completion(&adapter->stats_done); + reinit_completion(&adapter->stats_done); rc = ibmvnic_send_crq(adapter, &crq); if (rc) return; - wait_for_completion(&adapter->stats_done); + rc = ibmvnic_wait_for_completion(adapter, &adapter->stats_done, 10000); + if (rc) + return; for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++) data[i] = be64_to_cpu(IBMVNIC_GET_STAT(adapter, @@ -4408,11 +4518,24 @@ static int send_query_phys_parms(struct ibmvnic_adapter *adapter) memset(&crq, 0, sizeof(crq)); crq.query_phys_parms.first = IBMVNIC_CRQ_CMD; crq.query_phys_parms.cmd = QUERY_PHYS_PARMS; - init_completion(&adapter->fw_done); + + mutex_lock(&adapter->fw_lock); + adapter->fw_done_rc = 0; + reinit_completion(&adapter->fw_done); + rc = ibmvnic_send_crq(adapter, &crq); - if (rc) + if (rc) { + mutex_unlock(&adapter->fw_lock); return rc; - wait_for_completion(&adapter->fw_done); + } + + rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); + if (rc) { + mutex_unlock(&adapter->fw_lock); + return rc; + } + + mutex_unlock(&adapter->fw_lock); return adapter->fw_done_rc ? -EIO : 0; } @@ -4505,6 +4628,15 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, case IBMVNIC_CRQ_XPORT_EVENT: netif_carrier_off(netdev); adapter->crq.active = false; + /* terminate any thread waiting for a response + * from the device + */ + if (!completion_done(&adapter->fw_done)) { + adapter->fw_done_rc = -EIO; + complete(&adapter->fw_done); + } + if (!completion_done(&adapter->stats_done)) + complete(&adapter->stats_done); if (test_bit(0, &adapter->resetting)) adapter->force_reset_recovery = true; if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) { @@ -4959,7 +5091,11 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) __ibmvnic_delayed_reset); INIT_LIST_HEAD(&adapter->rwi_list); spin_lock_init(&adapter->rwi_lock); + mutex_init(&adapter->fw_lock); init_completion(&adapter->init_done); + init_completion(&adapter->fw_done); + init_completion(&adapter->reset_done); + init_completion(&adapter->stats_done); clear_bit(0, &adapter->resetting); do { @@ -5017,6 +5153,7 @@ ibmvnic_stats_fail: ibmvnic_init_fail: release_sub_crqs(adapter, 1); release_crq_queue(adapter); + mutex_destroy(&adapter->fw_lock); free_netdev(netdev); return rc; @@ -5041,6 +5178,7 @@ static int ibmvnic_remove(struct vio_dev *dev) adapter->state = VNIC_REMOVED; rtnl_unlock(); + mutex_destroy(&adapter->fw_lock); device_remove_file(&dev->dev, &dev_attr_failover); free_netdev(netdev); dev_set_drvdata(&dev->dev, NULL); diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index ebc39248b334..60eccaf91b12 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -1026,6 +1026,8 @@ struct ibmvnic_adapter { int init_done_rc; struct completion fw_done; + /* Used for serialization of device commands */ + struct mutex fw_lock; int fw_done_rc; struct completion reset_done; diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h index c40729b2c184..7fad2f24dcad 100644 --- a/drivers/net/ethernet/intel/e1000/e1000.h +++ b/drivers/net/ethernet/intel/e1000/e1000.h @@ -45,7 +45,6 @@ #define BAR_0 0 #define BAR_1 1 -#define BAR_5 5 #define INTEL_E1000_ETHERNET_DEVICE(device_id) {\ PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 416da9619928..aca97b084003 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -977,7 +977,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_ioremap; if (adapter->need_ioport) { - for (i = BAR_1; i <= BAR_5; i++) { + for (i = BAR_1; i < PCI_STD_NUM_BARS; i++) { if (pci_resource_len(pdev, i) == 0) continue; if (pci_resource_flags(pdev, i) & IORESOURCE_IO) { diff --git a/drivers/net/ethernet/intel/ixgb/ixgb.h b/drivers/net/ethernet/intel/ixgb/ixgb.h index e85271b68410..681d44cc9784 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb.h +++ b/drivers/net/ethernet/intel/ixgb/ixgb.h @@ -42,7 +42,6 @@ #define BAR_0 0 #define BAR_1 1 -#define BAR_5 5 struct ixgb_adapter; #include "ixgb_hw.h" diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c index 0940a0da16f2..3d8c051dd327 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c @@ -412,7 +412,7 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_ioremap; } - for (i = BAR_1; i <= BAR_5; i++) { + for (i = BAR_1; i < PCI_STD_NUM_BARS; i++) { if (pci_resource_len(pdev, i) == 0) continue; if (pci_resource_flags(pdev, i) & IORESOURCE_IO) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index f1a7bc46f1c0..2c16add0b642 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -816,7 +816,7 @@ struct mlx5e_xsk { struct mlx5e_priv { /* priv data path fields - start */ struct mlx5e_txqsq *txq2sq[MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC]; - int channel_tc2txq[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC]; + int channel_tc2realtxq[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC]; #ifdef CONFIG_MLX5_CORE_EN_DCB struct mlx5e_dcbx_dp dcbx_dp; #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c index f777994f3005..fce6eccdcf8b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c @@ -73,6 +73,7 @@ static const u32 mlx5e_ext_link_speed[MLX5E_EXT_LINK_MODES_NUMBER] = { [MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2] = 50000, [MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR] = 50000, [MLX5E_CAUI_4_100GBASE_CR4_KR4] = 100000, + [MLX5E_100GAUI_2_100GBASE_CR2_KR2] = 100000, [MLX5E_200GAUI_4_200GBASE_CR4_KR4] = 200000, [MLX5E_400GAUI_8] = 400000, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c index 7b672ada63a3..ae99fac08b53 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c @@ -155,8 +155,11 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer, } if (port_buffer->buffer[i].size < - (xoff + max_mtu + (1 << MLX5E_BUFFER_CELL_SHIFT))) + (xoff + max_mtu + (1 << MLX5E_BUFFER_CELL_SHIFT))) { + pr_err("buffer_size[%d]=%d is not enough for lossless buffer\n", + i, port_buffer->buffer[i].size); return -ENOMEM; + } port_buffer->buffer[i].xoff = port_buffer->buffer[i].size - xoff; port_buffer->buffer[i].xon = @@ -232,6 +235,26 @@ static int update_buffer_lossy(unsigned int max_mtu, return 0; } +static int fill_pfc_en(struct mlx5_core_dev *mdev, u8 *pfc_en) +{ + u32 g_rx_pause, g_tx_pause; + int err; + + err = mlx5_query_port_pause(mdev, &g_rx_pause, &g_tx_pause); + if (err) + return err; + + /* If global pause enabled, set all active buffers to lossless. + * Otherwise, check PFC setting. + */ + if (g_rx_pause || g_tx_pause) + *pfc_en = 0xff; + else + err = mlx5_query_port_pfc(mdev, pfc_en, NULL); + + return err; +} + #define MINIMUM_MAX_MTU 9216 int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, u32 change, unsigned int mtu, @@ -277,7 +300,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, if (change & MLX5E_PORT_BUFFER_PRIO2BUFFER) { update_prio2buffer = true; - err = mlx5_query_port_pfc(priv->mdev, &curr_pfc_en, NULL); + err = fill_pfc_en(priv->mdev, &curr_pfc_en); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c index 784b1e26f414..af4ebd2951b5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c @@ -130,42 +130,6 @@ static const char *mlx5e_netdev_kind(struct net_device *dev) return "unknown"; } -static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv, - struct net_device *mirred_dev, - struct net_device **out_dev, - struct net_device **route_dev, - struct flowi6 *fl6, - struct neighbour **out_n, - u8 *out_ttl) -{ - struct dst_entry *dst; - struct neighbour *n; - - int ret; - - ret = ipv6_stub->ipv6_dst_lookup(dev_net(mirred_dev), NULL, &dst, - fl6); - if (ret < 0) - return ret; - - if (!(*out_ttl)) - *out_ttl = ip6_dst_hoplimit(dst); - - ret = get_route_and_out_devs(priv, dst->dev, route_dev, out_dev); - if (ret < 0) { - dst_release(dst); - return ret; - } - - n = dst_neigh_lookup(dst, &fl6->daddr); - dst_release(dst); - if (!n) - return -ENOMEM; - - *out_n = n; - return 0; -} - static int mlx5e_gen_ip_tunnel_header(char buf[], __u8 *ip_proto, struct mlx5e_encap_entry *e) { @@ -319,6 +283,43 @@ release_neigh: return err; } +#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6) +static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv, + struct net_device *mirred_dev, + struct net_device **out_dev, + struct net_device **route_dev, + struct flowi6 *fl6, + struct neighbour **out_n, + u8 *out_ttl) +{ + struct dst_entry *dst; + struct neighbour *n; + + int ret; + + dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(mirred_dev), NULL, fl6, + NULL); + if (IS_ERR(dst)) + return PTR_ERR(dst); + + if (!(*out_ttl)) + *out_ttl = ip6_dst_hoplimit(dst); + + ret = get_route_and_out_devs(priv, dst->dev, route_dev, out_dev); + if (ret < 0) { + dst_release(dst); + return ret; + } + + n = dst_neigh_lookup(dst, &fl6->daddr); + dst_release(dst); + if (!n) + return -ENOMEM; + + *out_n = n; + return 0; +} + int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv, struct net_device *mirred_dev, struct mlx5e_encap_entry *e) @@ -328,7 +329,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv, struct net_device *out_dev, *route_dev; struct flowi6 fl6 = {}; struct ipv6hdr *ip6h; - struct neighbour *n; + struct neighbour *n = NULL; int ipv6_encap_size; char *encap_header; u8 nud_state, ttl; @@ -436,6 +437,7 @@ release_neigh: neigh_release(n); return err; } +#endif bool mlx5e_tc_tun_device_to_offload(struct mlx5e_priv *priv, struct net_device *netdev) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 95601269fa2e..c6776f308d5e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -1027,18 +1027,11 @@ static bool ext_link_mode_requested(const unsigned long *adver) return bitmap_intersects(modes, adver, __ETHTOOL_LINK_MODE_MASK_NBITS); } -static bool ext_speed_requested(u32 speed) -{ -#define MLX5E_MAX_PTYS_LEGACY_SPEED 100000 - return !!(speed > MLX5E_MAX_PTYS_LEGACY_SPEED); -} - -static bool ext_requested(u8 autoneg, const unsigned long *adver, u32 speed) +static bool ext_requested(u8 autoneg, const unsigned long *adver, bool ext_supported) { bool ext_link_mode = ext_link_mode_requested(adver); - bool ext_speed = ext_speed_requested(speed); - return autoneg == AUTONEG_ENABLE ? ext_link_mode : ext_speed; + return autoneg == AUTONEG_ENABLE ? ext_link_mode : ext_supported; } int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv, @@ -1065,8 +1058,8 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv, autoneg = link_ksettings->base.autoneg; speed = link_ksettings->base.speed; - ext = ext_requested(autoneg, adver, speed), ext_supported = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); + ext = ext_requested(autoneg, adver, ext_supported); if (!ext_supported && ext) return -EOPNOTSUPP; @@ -1643,7 +1636,7 @@ static int mlx5e_get_module_info(struct net_device *netdev, break; case MLX5_MODULE_ID_SFP: modinfo->type = ETH_MODULE_SFF_8472; - modinfo->eeprom_len = MLX5_EEPROM_PAGE_LENGTH; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; break; default: netdev_err(priv->netdev, "%s: cable type not recognized:0x%x\n", diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 09ed7f5f688b..4980e80a5e85 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -1691,11 +1691,10 @@ static int mlx5e_open_sqs(struct mlx5e_channel *c, struct mlx5e_params *params, struct mlx5e_channel_param *cparam) { - struct mlx5e_priv *priv = c->priv; int err, tc; for (tc = 0; tc < params->num_tc; tc++) { - int txq_ix = c->ix + tc * priv->max_nch; + int txq_ix = c->ix + tc * params->num_channels; err = mlx5e_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix, params, &cparam->sq, &c->sq[tc], tc); @@ -2876,26 +2875,21 @@ static void mlx5e_netdev_set_tcs(struct net_device *netdev) netdev_set_tc_queue(netdev, tc, nch, 0); } -static void mlx5e_build_tc2txq_maps(struct mlx5e_priv *priv) +static void mlx5e_build_txq_maps(struct mlx5e_priv *priv) { - int i, tc; + int i, ch; - for (i = 0; i < priv->max_nch; i++) - for (tc = 0; tc < priv->profile->max_tc; tc++) - priv->channel_tc2txq[i][tc] = i + tc * priv->max_nch; -} + ch = priv->channels.num; -static void mlx5e_build_tx2sq_maps(struct mlx5e_priv *priv) -{ - struct mlx5e_channel *c; - struct mlx5e_txqsq *sq; - int i, tc; + for (i = 0; i < ch; i++) { + int tc; + + for (tc = 0; tc < priv->channels.params.num_tc; tc++) { + struct mlx5e_channel *c = priv->channels.c[i]; + struct mlx5e_txqsq *sq = &c->sq[tc]; - for (i = 0; i < priv->channels.num; i++) { - c = priv->channels.c[i]; - for (tc = 0; tc < c->num_tc; tc++) { - sq = &c->sq[tc]; priv->txq2sq[sq->txq_ix] = sq; + priv->channel_tc2realtxq[i][tc] = i + tc * ch; } } } @@ -2910,7 +2904,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv) netif_set_real_num_tx_queues(netdev, num_txqs); netif_set_real_num_rx_queues(netdev, num_rxqs); - mlx5e_build_tx2sq_maps(priv); + mlx5e_build_txq_maps(priv); mlx5e_activate_channels(&priv->channels); mlx5e_xdp_tx_enable(priv); netif_tx_start_all_queues(priv->netdev); @@ -5021,7 +5015,6 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev, if (err) mlx5_core_err(mdev, "TLS initialization failed, %d\n", err); mlx5e_build_nic_netdev(netdev); - mlx5e_build_tc2txq_maps(priv); mlx5e_health_create_reporters(priv); return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 7e6ebd0505cc..9f09253f9f46 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -1601,7 +1601,7 @@ static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data, for (j = 0; j < NUM_SQ_STATS; j++) sprintf(data + (idx++) * ETH_GSTRING_LEN, sq_stats_desc[j].format, - priv->channel_tc2txq[i][tc]); + i + tc * max_nch); for (i = 0; i < max_nch; i++) { for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 0d5d84b5fa23..9b32a9c0f497 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -1626,8 +1626,11 @@ static void __mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow) flow_flag_clear(flow, DUP); - mlx5e_tc_del_fdb_flow(flow->peer_flow->priv, flow->peer_flow); - kvfree(flow->peer_flow); + if (refcount_dec_and_test(&flow->peer_flow->refcnt)) { + mlx5e_tc_del_fdb_flow(flow->peer_flow->priv, flow->peer_flow); + kfree(flow->peer_flow); + } + flow->peer_flow = NULL; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 66951ff975f4..2565ba8692d9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -93,7 +93,7 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, if (txq_ix >= num_channels) txq_ix = priv->txq2sq[txq_ix]->ch_ix; - return priv->channel_tc2txq[txq_ix][up]; + return priv->channel_tc2realtxq[txq_ix][up]; } static inline int mlx5e_skb_l2_header_offset(struct sk_buff *skb) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 962888a7c3c9..ffcff3ba3701 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -81,7 +81,14 @@ struct vport_ingress { struct mlx5_fc *drop_counter; } legacy; struct { - struct mlx5_flow_group *metadata_grp; + /* Optional group to add an FTE to do internal priority + * tagging on ingress packets. + */ + struct mlx5_flow_group *metadata_prio_tag_grp; + /* Group to add default match-all FTE entry to tag ingress + * packet with metadata. + */ + struct mlx5_flow_group *metadata_allmatch_grp; struct mlx5_modify_hdr *modify_metadata; struct mlx5_flow_handle *modify_metadata_rule; } offloads; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 8ba59a21a163..243a5440867e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -88,6 +88,14 @@ u16 mlx5_eswitch_get_prio_range(struct mlx5_eswitch *esw) return 1; } +static bool +esw_check_ingress_prio_tag_enabled(const struct mlx5_eswitch *esw, + const struct mlx5_vport *vport) +{ + return (MLX5_CAP_GEN(esw->dev, prio_tag_required) && + mlx5_eswitch_is_vf_vport(esw, vport->vport)); +} + static void mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw, struct mlx5_flow_spec *spec, @@ -1760,12 +1768,9 @@ static int esw_vport_ingress_prio_tag_config(struct mlx5_eswitch *esw, * required, allow * Unmatched traffic is allowed by default */ - spec = kvzalloc(sizeof(*spec), GFP_KERNEL); - if (!spec) { - err = -ENOMEM; - goto out_no_mem; - } + if (!spec) + return -ENOMEM; /* Untagged packets - push prio tag VLAN, allow */ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag); @@ -1791,14 +1796,9 @@ static int esw_vport_ingress_prio_tag_config(struct mlx5_eswitch *esw, "vport[%d] configure ingress untagged allow rule, err(%d)\n", vport->vport, err); vport->ingress.allow_rule = NULL; - goto out; } -out: kvfree(spec); -out_no_mem: - if (err) - esw_vport_cleanup_ingress_rules(esw, vport); return err; } @@ -1836,13 +1836,9 @@ static int esw_vport_add_ingress_acl_modify_metadata(struct mlx5_eswitch *esw, esw_warn(esw->dev, "failed to add setting metadata rule for vport %d ingress acl, err(%d)\n", vport->vport, err); + mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata); vport->ingress.offloads.modify_metadata_rule = NULL; - goto out; } - -out: - if (err) - mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata); return err; } @@ -1862,50 +1858,103 @@ static int esw_vport_create_ingress_acl_group(struct mlx5_eswitch *esw, { int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); struct mlx5_flow_group *g; + void *match_criteria; u32 *flow_group_in; + u32 flow_index = 0; int ret = 0; flow_group_in = kvzalloc(inlen, GFP_KERNEL); if (!flow_group_in) return -ENOMEM; - memset(flow_group_in, 0, inlen); - MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); - MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0); + if (esw_check_ingress_prio_tag_enabled(esw, vport)) { + /* This group is to hold FTE to match untagged packets when prio_tag + * is enabled. + */ + memset(flow_group_in, 0, inlen); - g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in); - if (IS_ERR(g)) { - ret = PTR_ERR(g); - esw_warn(esw->dev, - "Failed to create vport[%d] ingress metadata group, err(%d)\n", - vport->vport, ret); - goto grp_err; + match_criteria = MLX5_ADDR_OF(create_flow_group_in, + flow_group_in, match_criteria); + MLX5_SET(create_flow_group_in, flow_group_in, + match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); + MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag); + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index); + + g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in); + if (IS_ERR(g)) { + ret = PTR_ERR(g); + esw_warn(esw->dev, "vport[%d] ingress create untagged flow group, err(%d)\n", + vport->vport, ret); + goto prio_tag_err; + } + vport->ingress.offloads.metadata_prio_tag_grp = g; + flow_index++; + } + + if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { + /* This group holds an FTE with no matches for add metadata for + * tagged packets, if prio-tag is enabled (as a fallthrough), + * or all traffic in case prio-tag is disabled. + */ + memset(flow_group_in, 0, inlen); + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index); + + g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in); + if (IS_ERR(g)) { + ret = PTR_ERR(g); + esw_warn(esw->dev, "vport[%d] ingress create drop flow group, err(%d)\n", + vport->vport, ret); + goto metadata_err; + } + vport->ingress.offloads.metadata_allmatch_grp = g; + } + + kvfree(flow_group_in); + return 0; + +metadata_err: + if (!IS_ERR_OR_NULL(vport->ingress.offloads.metadata_prio_tag_grp)) { + mlx5_destroy_flow_group(vport->ingress.offloads.metadata_prio_tag_grp); + vport->ingress.offloads.metadata_prio_tag_grp = NULL; } - vport->ingress.offloads.metadata_grp = g; -grp_err: +prio_tag_err: kvfree(flow_group_in); return ret; } static void esw_vport_destroy_ingress_acl_group(struct mlx5_vport *vport) { - if (vport->ingress.offloads.metadata_grp) { - mlx5_destroy_flow_group(vport->ingress.offloads.metadata_grp); - vport->ingress.offloads.metadata_grp = NULL; + if (vport->ingress.offloads.metadata_allmatch_grp) { + mlx5_destroy_flow_group(vport->ingress.offloads.metadata_allmatch_grp); + vport->ingress.offloads.metadata_allmatch_grp = NULL; + } + + if (vport->ingress.offloads.metadata_prio_tag_grp) { + mlx5_destroy_flow_group(vport->ingress.offloads.metadata_prio_tag_grp); + vport->ingress.offloads.metadata_prio_tag_grp = NULL; } } static int esw_vport_ingress_config(struct mlx5_eswitch *esw, struct mlx5_vport *vport) { + int num_ftes = 0; int err; if (!mlx5_eswitch_vport_match_metadata_enabled(esw) && - !MLX5_CAP_GEN(esw->dev, prio_tag_required)) + !esw_check_ingress_prio_tag_enabled(esw, vport)) return 0; esw_vport_cleanup_ingress_rules(esw, vport); - err = esw_vport_create_ingress_acl_table(esw, vport, 1); + + if (mlx5_eswitch_vport_match_metadata_enabled(esw)) + num_ftes++; + if (esw_check_ingress_prio_tag_enabled(esw, vport)) + num_ftes++; + + err = esw_vport_create_ingress_acl_table(esw, vport, num_ftes); if (err) { esw_warn(esw->dev, "failed to enable ingress acl (%d) on vport[%d]\n", @@ -1926,8 +1975,7 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, goto metadata_err; } - if (MLX5_CAP_GEN(esw->dev, prio_tag_required) && - mlx5_eswitch_is_vf_vport(esw, vport->vport)) { + if (esw_check_ingress_prio_tag_enabled(esw, vport)) { err = esw_vport_ingress_prio_tag_config(esw, vport); if (err) goto prio_tag_err; @@ -1937,7 +1985,6 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, prio_tag_err: esw_vport_del_ingress_acl_modify_metadata(esw, vport); metadata_err: - esw_vport_cleanup_ingress_rules(esw, vport); esw_vport_destroy_ingress_acl_group(vport); group_err: esw_vport_destroy_ingress_acl_table(vport); @@ -2008,8 +2055,9 @@ esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw, if (mlx5_eswitch_is_vf_vport(esw, vport->vport)) { err = esw_vport_egress_config(esw, vport); if (err) { - esw_vport_del_ingress_acl_modify_metadata(esw, vport); esw_vport_cleanup_ingress_rules(esw, vport); + esw_vport_del_ingress_acl_modify_metadata(esw, vport); + esw_vport_destroy_ingress_acl_group(vport); esw_vport_destroy_ingress_acl_table(vport); } } @@ -2021,8 +2069,8 @@ esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw, struct mlx5_vport *vport) { esw_vport_disable_egress_acl(esw, vport); - esw_vport_del_ingress_acl_modify_metadata(esw, vport); esw_vport_cleanup_ingress_rules(esw, vport); + esw_vport_del_ingress_acl_modify_metadata(esw, vport); esw_vport_destroy_ingress_acl_group(vport); esw_vport_destroy_ingress_acl_table(vport); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 584074bbf669..173e2c12e1c7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -837,8 +837,6 @@ static int mlx5_init_once(struct mlx5_core_dev *dev) mlx5_init_qp_table(dev); - mlx5_init_mkey_table(dev); - mlx5_init_reserved_gids(dev); mlx5_init_clock(dev); @@ -896,7 +894,6 @@ err_rl_cleanup: err_tables_cleanup: mlx5_geneve_destroy(dev->geneve); mlx5_vxlan_destroy(dev->vxlan); - mlx5_cleanup_mkey_table(dev); mlx5_cleanup_qp_table(dev); mlx5_cq_debugfs_cleanup(dev); mlx5_events_cleanup(dev); @@ -924,7 +921,6 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev) mlx5_vxlan_destroy(dev->vxlan); mlx5_cleanup_clock(dev); mlx5_cleanup_reserved_gids(dev); - mlx5_cleanup_mkey_table(dev); mlx5_cleanup_qp_table(dev); mlx5_cq_debugfs_cleanup(dev); mlx5_events_cleanup(dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c index c501bf2a0252..42cc3c7ac5b6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c @@ -36,16 +36,6 @@ #include <linux/mlx5/cmd.h> #include "mlx5_core.h" -void mlx5_init_mkey_table(struct mlx5_core_dev *dev) -{ - xa_init_flags(&dev->priv.mkey_table, XA_FLAGS_LOCK_IRQ); -} - -void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev) -{ - WARN_ON(!xa_empty(&dev->priv.mkey_table)); -} - int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey, struct mlx5_async_ctx *async_ctx, u32 *in, @@ -54,7 +44,6 @@ int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev, struct mlx5_async_work *context) { u32 lout[MLX5_ST_SZ_DW(create_mkey_out)] = {0}; - struct xarray *mkeys = &dev->priv.mkey_table; u32 mkey_index; void *mkc; int err; @@ -84,16 +73,7 @@ int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev, mlx5_core_dbg(dev, "out 0x%x, key 0x%x, mkey 0x%x\n", mkey_index, key, mkey->key); - - err = xa_err(xa_store_irq(mkeys, mlx5_base_mkey(mkey->key), mkey, - GFP_KERNEL)); - if (err) { - mlx5_core_warn(dev, "failed xarray insert of mkey 0x%x, %d\n", - mlx5_base_mkey(mkey->key), err); - mlx5_core_destroy_mkey(dev, mkey); - } - - return err; + return 0; } EXPORT_SYMBOL(mlx5_core_create_mkey_cb); @@ -111,12 +91,6 @@ int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, { u32 out[MLX5_ST_SZ_DW(destroy_mkey_out)] = {0}; u32 in[MLX5_ST_SZ_DW(destroy_mkey_in)] = {0}; - struct xarray *mkeys = &dev->priv.mkey_table; - unsigned long flags; - - xa_lock_irqsave(mkeys, flags); - __xa_erase(mkeys, mlx5_base_mkey(mkey->key)); - xa_unlock_irqrestore(mkeys, flags); MLX5_SET(destroy_mkey_in, in, opcode, MLX5_CMD_OP_DESTROY_MKEY); MLX5_SET(destroy_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mkey->key)); diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index 0e96ffab3b05..985b46d7e3d1 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -583,18 +583,10 @@ int ocelot_port_add_txtstamp_skb(struct ocelot_port *ocelot_port, if (ocelot->ptp && shinfo->tx_flags & SKBTX_HW_TSTAMP && ocelot_port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) { - struct ocelot_skb *oskb = - kzalloc(sizeof(struct ocelot_skb), GFP_ATOMIC); - - if (unlikely(!oskb)) - return -ENOMEM; - shinfo->tx_flags |= SKBTX_IN_PROGRESS; - - oskb->skb = skb; - oskb->id = ocelot_port->ts_id % 4; - - list_add_tail(&oskb->head, &ocelot_port->skbs); + /* Store timestamp ID in cb[0] of sk_buff */ + skb->cb[0] = ocelot_port->ts_id % 4; + skb_queue_tail(&ocelot_port->tx_skbs, skb); return 0; } return -ENODATA; @@ -704,12 +696,11 @@ void ocelot_get_txtstamp(struct ocelot *ocelot) int budget = OCELOT_PTP_QUEUE_SZ; while (budget--) { + struct sk_buff *skb, *skb_tmp, *skb_match = NULL; struct skb_shared_hwtstamps shhwtstamps; - struct list_head *pos, *tmp; - struct sk_buff *skb = NULL; - struct ocelot_skb *entry; struct ocelot_port *port; struct timespec64 ts; + unsigned long flags; u32 val, id, txport; val = ocelot_read(ocelot, SYS_PTP_STATUS); @@ -727,21 +718,22 @@ void ocelot_get_txtstamp(struct ocelot *ocelot) /* Retrieve its associated skb */ port = ocelot->ports[txport]; - list_for_each_safe(pos, tmp, &port->skbs) { - entry = list_entry(pos, struct ocelot_skb, head); - if (entry->id != id) - continue; - - skb = entry->skb; + spin_lock_irqsave(&port->tx_skbs.lock, flags); - list_del(pos); - kfree(entry); + skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) { + if (skb->cb[0] != id) + continue; + __skb_unlink(skb, &port->tx_skbs); + skb_match = skb; + break; } + spin_unlock_irqrestore(&port->tx_skbs.lock, flags); + /* Next ts */ ocelot_write(ocelot, SYS_PTP_NXT_PTP_NXT, SYS_PTP_NXT); - if (unlikely(!skb)) + if (unlikely(!skb_match)) continue; /* Get the h/w timestamp */ @@ -750,9 +742,9 @@ void ocelot_get_txtstamp(struct ocelot *ocelot) /* Set the timestamp into the skb */ memset(&shhwtstamps, 0, sizeof(shhwtstamps)); shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec); - skb_tstamp_tx(skb, &shhwtstamps); + skb_tstamp_tx(skb_match, &shhwtstamps); - dev_kfree_skb_any(skb); + dev_kfree_skb_any(skb_match); } } EXPORT_SYMBOL(ocelot_get_txtstamp); @@ -2157,14 +2149,18 @@ static struct ptp_clock_info ocelot_ptp_clock_info = { static int ocelot_init_timestamp(struct ocelot *ocelot) { + struct ptp_clock *ptp_clock; + ocelot->ptp_info = ocelot_ptp_clock_info; - ocelot->ptp_clock = ptp_clock_register(&ocelot->ptp_info, ocelot->dev); - if (IS_ERR(ocelot->ptp_clock)) - return PTR_ERR(ocelot->ptp_clock); + ptp_clock = ptp_clock_register(&ocelot->ptp_info, ocelot->dev); + if (IS_ERR(ptp_clock)) + return PTR_ERR(ptp_clock); /* Check if PHC support is missing at the configuration level */ - if (!ocelot->ptp_clock) + if (!ptp_clock) return 0; + ocelot->ptp_clock = ptp_clock; + ocelot_write(ocelot, SYS_PTP_CFG_PTP_STAMP_WID(30), SYS_PTP_CFG); ocelot_write(ocelot, 0xffffffff, ANA_TABLES_PTP_ID_LOW); ocelot_write(ocelot, 0xffffffff, ANA_TABLES_PTP_ID_HIGH); @@ -2205,7 +2201,7 @@ void ocelot_init_port(struct ocelot *ocelot, int port) { struct ocelot_port *ocelot_port = ocelot->ports[port]; - INIT_LIST_HEAD(&ocelot_port->skbs); + skb_queue_head_init(&ocelot_port->tx_skbs); /* Basic L2 initialization */ @@ -2490,26 +2486,19 @@ EXPORT_SYMBOL(ocelot_init); void ocelot_deinit(struct ocelot *ocelot) { - struct list_head *pos, *tmp; struct ocelot_port *port; - struct ocelot_skb *entry; int i; cancel_delayed_work(&ocelot->stats_work); destroy_workqueue(ocelot->stats_queue); mutex_destroy(&ocelot->stats_lock); ocelot_ace_deinit(); + if (ocelot->ptp_clock) + ptp_clock_unregister(ocelot->ptp_clock); for (i = 0; i < ocelot->num_phys_ports; i++) { port = ocelot->ports[i]; - - list_for_each_safe(pos, tmp, &port->skbs) { - entry = list_entry(pos, struct ocelot_skb, head); - - list_del(pos); - dev_kfree_skb_any(entry->skb); - kfree(entry); - } + skb_queue_purge(&port->tx_skbs); } } EXPORT_SYMBOL(ocelot_deinit); diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index ebb81d6d4ca1..656169214cdb 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c @@ -817,8 +817,6 @@ static int lpc_mii_init(struct netdata_local *pldat) pldat->mii_bus->priv = pldat; pldat->mii_bus->parent = &pldat->pdev->dev; - platform_set_drvdata(pldat->pdev, pldat->mii_bus); - node = of_get_child_by_name(pldat->pdev->dev.of_node, "mdio"); err = of_mdiobus_register(pldat->mii_bus, node); of_node_put(node); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_if.h b/drivers/net/ethernet/pensando/ionic/ionic_if.h index dbdb7c5ae8f1..39317cdfa6cf 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_if.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_if.h @@ -596,8 +596,8 @@ enum ionic_txq_desc_opcode { * the @encap is set, the device will * offload the outer header checksums using * LCO (local checksum offload) (see - * Documentation/networking/checksum- - * offloads.txt for more info). + * Documentation/networking/checksum-offloads.rst + * for more info). * * IONIC_TXQ_DESC_OPCODE_CSUM_HW: * diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index 60fd14df49d7..ef8258713369 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -1381,12 +1381,9 @@ int ionic_lif_rss_config(struct ionic_lif *lif, const u16 types, static int ionic_lif_rss_init(struct ionic_lif *lif) { - u8 rss_key[IONIC_RSS_HASH_KEY_SIZE]; unsigned int tbl_sz; unsigned int i; - netdev_rss_key_fill(rss_key, IONIC_RSS_HASH_KEY_SIZE); - lif->rss_types = IONIC_RSS_TYPE_IPV4 | IONIC_RSS_TYPE_IPV4_TCP | IONIC_RSS_TYPE_IPV4_UDP | @@ -1399,12 +1396,18 @@ static int ionic_lif_rss_init(struct ionic_lif *lif) for (i = 0; i < tbl_sz; i++) lif->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, lif->nxqs); - return ionic_lif_rss_config(lif, lif->rss_types, rss_key, NULL); + return ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL); } -static int ionic_lif_rss_deinit(struct ionic_lif *lif) +static void ionic_lif_rss_deinit(struct ionic_lif *lif) { - return ionic_lif_rss_config(lif, 0x0, NULL, NULL); + int tbl_sz; + + tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); + memset(lif->rss_ind_tbl, 0, tbl_sz); + memset(lif->rss_hash_key, 0, IONIC_RSS_HASH_KEY_SIZE); + + ionic_lif_rss_config(lif, 0x0, NULL, NULL); } static void ionic_txrx_disable(struct ionic_lif *lif) @@ -1729,6 +1732,7 @@ static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index dev_err(dev, "Failed to allocate rss indirection table, aborting\n"); goto err_out_free_qcqs; } + netdev_rss_key_fill(lif->rss_hash_key, IONIC_RSS_HASH_KEY_SIZE); list_add_tail(&lif->list, &ionic->lifs); diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index d47a038cb8d0..67a4d5d45e3a 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -1542,6 +1542,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) rtl_lock_config_regs(tp); device_set_wakeup_enable(tp_to_dev(tp), wolopts); + tp->dev->wol_enabled = wolopts ? 1 : 0; } static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) @@ -3694,7 +3695,7 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_32: case RTL_GIGA_MAC_VER_33: case RTL_GIGA_MAC_VER_34: - case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_52: + case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_61: RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) | AcceptBroadcast | AcceptMulticast | AcceptMyPhys); break; @@ -3872,7 +3873,7 @@ static void rtl_hw_jumbo_enable(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_27 ... RTL_GIGA_MAC_VER_28: r8168dp_hw_jumbo_enable(tp); break; - case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_34: + case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_33: r8168e_hw_jumbo_enable(tp); break; default: @@ -3895,7 +3896,7 @@ static void rtl_hw_jumbo_disable(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_27 ... RTL_GIGA_MAC_VER_28: r8168dp_hw_jumbo_disable(tp); break; - case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_34: + case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_33: r8168e_hw_jumbo_disable(tp); break; default: diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 644cb5d1fd4f..bbc65bd332a8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -2009,6 +2009,8 @@ static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan) tx_q->cur_tx = 0; tx_q->mss = 0; netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan)); + stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, + tx_q->dma_tx_phy, chan); stmmac_start_tx_dma(priv, chan); priv->dev->stats.tx_errors++; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c index 292045f4581f..8237dbc3e991 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c @@ -489,7 +489,7 @@ static int stmmac_pci_probe(struct pci_dev *pdev, } /* Get the base address of device */ - for (i = 0; i <= PCI_STD_RESOURCE_END; i++) { + for (i = 0; i < PCI_STD_NUM_BARS; i++) { if (pci_resource_len(pdev, i) == 0) continue; ret = pcim_iomap_regions(pdev, BIT(i), pci_name(pdev)); @@ -532,7 +532,7 @@ static void stmmac_pci_remove(struct pci_dev *pdev) if (priv->plat->stmmac_clk) clk_unregister_fixed_rate(priv->plat->stmmac_clk); - for (i = 0; i <= PCI_STD_RESOURCE_END; i++) { + for (i = 0; i < PCI_STD_NUM_BARS; i++) { if (pci_resource_len(pdev, i) == 0) continue; pcim_iounmap_regions(pdev, BIT(i)); diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c index 386bafe74c3f..fa8604d7b797 100644 --- a/drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c +++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c @@ -34,7 +34,7 @@ static int xlgmac_probe(struct pci_dev *pcidev, const struct pci_device_id *id) return ret; } - for (i = 0; i <= PCI_STD_RESOURCE_END; i++) { + for (i = 0; i < PCI_STD_NUM_BARS; i++) { if (pci_resource_len(pcidev, i) == 0) continue; ret = pcim_iomap_regions(pcidev, BIT(i), XLGMAC_DRV_NAME); diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig index 9170572346b5..a46f4189fde3 100644 --- a/drivers/net/ethernet/ti/Kconfig +++ b/drivers/net/ethernet/ti/Kconfig @@ -62,7 +62,7 @@ config TI_CPSW config TI_CPSW_SWITCHDEV tristate "TI CPSW Switch Support with switchdev" depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST - select NET_SWITCHDEV + depends on NET_SWITCHDEV select TI_DAVINCI_MDIO select MFD_SYSCON select REGMAP diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c index 929f3d3354e3..ecdbde539eb7 100644 --- a/drivers/net/ethernet/ti/cpsw_ale.c +++ b/drivers/net/ethernet/ti/cpsw_ale.c @@ -384,7 +384,7 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, const u8 *addr, int port_mask, int flags, u16 vid) { u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0}; - int mcast_members; + int mcast_members = 0; int idx; idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0); @@ -397,11 +397,13 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, const u8 *addr, int port_mask, mcast_members = cpsw_ale_get_port_mask(ale_entry, ale->port_mask_bits); mcast_members &= ~port_mask; + } + + if (mcast_members) cpsw_ale_set_port_mask(ale_entry, mcast_members, ale->port_mask_bits); - } else { + else cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE); - } cpsw_ale_write(ale, idx, ale_entry); return 0; @@ -478,6 +480,10 @@ static void cpsw_ale_del_vlan_modify(struct cpsw_ale *ale, u32 *ale_entry, members = cpsw_ale_get_vlan_member_list(ale_entry, ale->vlan_field_bits); members &= ~port_mask; + if (!members) { + cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE); + return; + } untag = cpsw_ale_get_vlan_untag_force(ale_entry, ale->vlan_field_bits); diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c index b833cc1d188c..707d5eb480ce 100644 --- a/drivers/net/ethernet/ti/cpsw_priv.c +++ b/drivers/net/ethernet/ti/cpsw_priv.c @@ -100,8 +100,8 @@ irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id) { struct cpsw_common *cpsw = dev_id; - cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_RX); writel(0, &cpsw->wr_regs->rx_en); + cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_RX); if (cpsw->quirk_irq) { disable_irq_nosync(cpsw->irqs_table[0]); |