diff options
-rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 8 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_main.c | 19 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_txrx.c | 21 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_txrx_common.h | 4 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 425 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_xsk.c | 151 | ||||
-rw-r--r-- | include/net/xdp_sock.h | 69 | ||||
-rw-r--r-- | net/xdp/xdp_umem.c | 2 | ||||
-rw-r--r-- | net/xdp/xsk_queue.c | 55 | ||||
-rw-r--r-- | net/xdp/xsk_queue.h | 3 |
10 files changed, 541 insertions, 216 deletions
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 87fe2e60602f..9f8464f80783 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -5,6 +5,7 @@ #include "i40e.h" #include "i40e_diag.h" +#include "i40e_txrx_common.h" /* ethtool statistics helpers */ @@ -1710,6 +1711,13 @@ static int i40e_set_ringparam(struct net_device *netdev, (new_rx_count == vsi->rx_rings[0]->count)) return 0; + /* If there is a AF_XDP UMEM attached to any of Rx rings, + * disallow changing the number of descriptors -- regardless + * if the netdev is running or not. + */ + if (i40e_xsk_any_rx_ring_enabled(vsi)) + return -EBUSY; + while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) { timeout--; if (!timeout) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index c7d2c9010fdf..330bafe6a689 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -1532,8 +1532,8 @@ static int i40e_set_mac(struct net_device *netdev, void *p) return 0; } - if (test_bit(__I40E_VSI_DOWN, vsi->back->state) || - test_bit(__I40E_RESET_RECOVERY_PENDING, vsi->back->state)) + if (test_bit(__I40E_DOWN, pf->state) || + test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) return -EADDRNOTAVAIL; if (ether_addr_equal(hw->mac.addr, addr->sa_data)) @@ -1557,8 +1557,7 @@ static int i40e_set_mac(struct net_device *netdev, void *p) if (vsi->type == I40E_VSI_MAIN) { i40e_status ret; - ret = i40e_aq_mac_address_write(&vsi->back->hw, - I40E_AQC_WRITE_TYPE_LAA_WOL, + ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL, addr->sa_data, NULL); if (ret) netdev_info(netdev, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n", @@ -1569,7 +1568,7 @@ static int i40e_set_mac(struct net_device *netdev, void *p) /* schedule our worker thread which will take care of * applying the new filter changes */ - i40e_service_event_schedule(vsi->back); + i40e_service_event_schedule(pf); return 0; } @@ -6432,7 +6431,10 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) char *req_fec = ""; char *an = ""; - new_speed = pf->hw.phy.link_info.link_speed; + if (isup) + new_speed = pf->hw.phy.link_info.link_speed; + else + new_speed = I40E_LINK_SPEED_UNKNOWN; if ((vsi->current_isup == isup) && (vsi->current_speed == new_speed)) return; @@ -8509,14 +8511,9 @@ static void i40e_link_event(struct i40e_pf *pf) i40e_status status; bool new_link, old_link; - /* save off old link status information */ - pf->hw.phy.link_info_old = pf->hw.phy.link_info; - /* set this to force the get_link_status call to refresh state */ pf->hw.phy.get_link_info = true; - old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP); - status = i40e_get_link_status(&pf->hw, &new_link); /* On success, disable temp link polling */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 37bd4e50ccde..740ea58ba938 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -636,13 +636,18 @@ void i40e_clean_tx_ring(struct i40e_ring *tx_ring) unsigned long bi_size; u16 i; - /* ring already cleared, nothing to do */ - if (!tx_ring->tx_bi) - return; + if (ring_is_xdp(tx_ring) && tx_ring->xsk_umem) { + i40e_xsk_clean_tx_ring(tx_ring); + } else { + /* ring already cleared, nothing to do */ + if (!tx_ring->tx_bi) + return; - /* Free all the Tx ring sk_buffs */ - for (i = 0; i < tx_ring->count; i++) - i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]); + /* Free all the Tx ring sk_buffs */ + for (i = 0; i < tx_ring->count; i++) + i40e_unmap_and_free_tx_resource(tx_ring, + &tx_ring->tx_bi[i]); + } bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; memset(tx_ring->tx_bi, 0, bi_size); @@ -1350,8 +1355,10 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring) rx_ring->skb = NULL; } - if (rx_ring->xsk_umem) + if (rx_ring->xsk_umem) { + i40e_xsk_clean_rx_ring(rx_ring); goto skip_free; + } /* Free all the Rx ring sk_buffs */ for (i = 0; i < rx_ring->count; i++) { diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h b/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h index b5afd479a9c5..09809dffe399 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h @@ -87,4 +87,8 @@ static inline void i40e_arm_wb(struct i40e_ring *tx_ring, } } +void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring); +void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring); +bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi); + #endif /* I40E_TXRX_COMMON_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 3e707c7e6782..f4bb2779f03a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1084,6 +1084,136 @@ static int i40e_quiesce_vf_pci(struct i40e_vf *vf) return -EIO; } +static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi); + +/** + * i40e_config_vf_promiscuous_mode + * @vf: pointer to the VF info + * @vsi_id: VSI id + * @allmulti: set MAC L2 layer multicast promiscuous enable/disable + * @alluni: set MAC L2 layer unicast promiscuous enable/disable + * + * Called from the VF to configure the promiscuous mode of + * VF vsis and from the VF reset path to reset promiscuous mode. + **/ +static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf, + u16 vsi_id, + bool allmulti, + bool alluni) +{ + struct i40e_pf *pf = vf->pf; + struct i40e_hw *hw = &pf->hw; + struct i40e_mac_filter *f; + i40e_status aq_ret = 0; + struct i40e_vsi *vsi; + int bkt; + + vsi = i40e_find_vsi_from_id(pf, vsi_id); + if (!i40e_vc_isvalid_vsi_id(vf, vsi_id) || !vsi) + return I40E_ERR_PARAM; + + if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { + dev_err(&pf->pdev->dev, + "Unprivileged VF %d is attempting to configure promiscuous mode\n", + vf->vf_id); + /* Lie to the VF on purpose. */ + return 0; + } + + if (vf->port_vlan_id) { + aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, vsi->seid, + allmulti, + vf->port_vlan_id, + NULL); + if (aq_ret) { + int aq_err = pf->hw.aq.asq_last_status; + + dev_err(&pf->pdev->dev, + "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n", + vf->vf_id, + i40e_stat_str(&pf->hw, aq_ret), + i40e_aq_str(&pf->hw, aq_err)); + return aq_ret; + } + + aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, vsi->seid, + alluni, + vf->port_vlan_id, + NULL); + if (aq_ret) { + int aq_err = pf->hw.aq.asq_last_status; + + dev_err(&pf->pdev->dev, + "VF %d failed to set unicast promiscuous mode err %s aq_err %s\n", + vf->vf_id, + i40e_stat_str(&pf->hw, aq_ret), + i40e_aq_str(&pf->hw, aq_err)); + } + return aq_ret; + } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) { + hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { + if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID) + continue; + aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, + vsi->seid, + allmulti, + f->vlan, + NULL); + if (aq_ret) { + int aq_err = pf->hw.aq.asq_last_status; + + dev_err(&pf->pdev->dev, + "Could not add VLAN %d to multicast promiscuous domain err %s aq_err %s\n", + f->vlan, + i40e_stat_str(&pf->hw, aq_ret), + i40e_aq_str(&pf->hw, aq_err)); + } + + aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, + vsi->seid, + alluni, + f->vlan, + NULL); + if (aq_ret) { + int aq_err = pf->hw.aq.asq_last_status; + + dev_err(&pf->pdev->dev, + "Could not add VLAN %d to Unicast promiscuous domain err %s aq_err %s\n", + f->vlan, + i40e_stat_str(&pf->hw, aq_ret), + i40e_aq_str(&pf->hw, aq_err)); + } + } + return aq_ret; + } + aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, allmulti, + NULL); + if (aq_ret) { + int aq_err = pf->hw.aq.asq_last_status; + + dev_err(&pf->pdev->dev, + "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n", + vf->vf_id, + i40e_stat_str(&pf->hw, aq_ret), + i40e_aq_str(&pf->hw, aq_err)); + return aq_ret; + } + + aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid, alluni, + NULL, true); + if (aq_ret) { + int aq_err = pf->hw.aq.asq_last_status; + + dev_err(&pf->pdev->dev, + "VF %d failed to set unicast promiscuous mode err %s aq_err %s\n", + vf->vf_id, + i40e_stat_str(&pf->hw, aq_ret), + i40e_aq_str(&pf->hw, aq_err)); + } + + return aq_ret; +} + /** * i40e_trigger_vf_reset * @vf: pointer to the VF structure @@ -1145,6 +1275,9 @@ static void i40e_cleanup_reset_vf(struct i40e_vf *vf) struct i40e_hw *hw = &pf->hw; u32 reg; + /* disable promisc modes in case they were enabled */ + i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id, false, false); + /* free VF resources to begin resetting the VSI state */ i40e_free_vf_res(vf); @@ -1840,143 +1973,55 @@ static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi) * i40e_vc_config_promiscuous_mode_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer - * @msglen: msg length * * called from the VF to configure the promiscuous mode of * VF vsis **/ -static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, - u8 *msg, u16 msglen) +static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg) { struct virtchnl_promisc_info *info = (struct virtchnl_promisc_info *)msg; struct i40e_pf *pf = vf->pf; - struct i40e_hw *hw = &pf->hw; - struct i40e_mac_filter *f; i40e_status aq_ret = 0; bool allmulti = false; - struct i40e_vsi *vsi; bool alluni = false; - int aq_err = 0; - int bkt; - vsi = i40e_find_vsi_from_id(pf, info->vsi_id); - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || - !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) || - !vsi) { - aq_ret = I40E_ERR_PARAM; - goto error_param; - } - if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { - dev_err(&pf->pdev->dev, - "Unprivileged VF %d is attempting to configure promiscuous mode\n", - vf->vf_id); - /* Lie to the VF on purpose. */ - aq_ret = 0; - goto error_param; - } + if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) + return I40E_ERR_PARAM; + /* Multicast promiscuous handling*/ if (info->flags & FLAG_VF_MULTICAST_PROMISC) allmulti = true; - if (vf->port_vlan_id) { - aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, vsi->seid, - allmulti, - vf->port_vlan_id, - NULL); - } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) { - hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { - if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID) - continue; - aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, - vsi->seid, - allmulti, - f->vlan, - NULL); - aq_err = pf->hw.aq.asq_last_status; - if (aq_ret) { - dev_err(&pf->pdev->dev, - "Could not add VLAN %d to multicast promiscuous domain err %s aq_err %s\n", - f->vlan, - i40e_stat_str(&pf->hw, aq_ret), - i40e_aq_str(&pf->hw, aq_err)); - break; - } - } - } else { - aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, - allmulti, NULL); - aq_err = pf->hw.aq.asq_last_status; - if (aq_ret) { - dev_err(&pf->pdev->dev, - "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n", - vf->vf_id, - i40e_stat_str(&pf->hw, aq_ret), - i40e_aq_str(&pf->hw, aq_err)); - goto error_param; - } - } - + if (info->flags & FLAG_VF_UNICAST_PROMISC) + alluni = true; + aq_ret = i40e_config_vf_promiscuous_mode(vf, info->vsi_id, allmulti, + alluni); if (!aq_ret) { - dev_info(&pf->pdev->dev, - "VF %d successfully set multicast promiscuous mode\n", - vf->vf_id); - if (allmulti) + if (allmulti) { + dev_info(&pf->pdev->dev, + "VF %d successfully set multicast promiscuous mode\n", + vf->vf_id); set_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states); - else + } else { + dev_info(&pf->pdev->dev, + "VF %d successfully unset multicast promiscuous mode\n", + vf->vf_id); clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states); - } - - if (info->flags & FLAG_VF_UNICAST_PROMISC) - alluni = true; - if (vf->port_vlan_id) { - aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, vsi->seid, - alluni, - vf->port_vlan_id, - NULL); - } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) { - hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { - if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID) - continue; - aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, - vsi->seid, - alluni, - f->vlan, - NULL); - aq_err = pf->hw.aq.asq_last_status; - if (aq_ret) - dev_err(&pf->pdev->dev, - "Could not add VLAN %d to Unicast promiscuous domain err %s aq_err %s\n", - f->vlan, - i40e_stat_str(&pf->hw, aq_ret), - i40e_aq_str(&pf->hw, aq_err)); - } - } else { - aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid, - alluni, NULL, - true); - aq_err = pf->hw.aq.asq_last_status; - if (aq_ret) { - dev_err(&pf->pdev->dev, - "VF %d failed to set unicast promiscuous mode %8.8x err %s aq_err %s\n", - vf->vf_id, info->flags, - i40e_stat_str(&pf->hw, aq_ret), - i40e_aq_str(&pf->hw, aq_err)); - goto error_param; } - } - - if (!aq_ret) { - dev_info(&pf->pdev->dev, - "VF %d successfully set unicast promiscuous mode\n", - vf->vf_id); - if (alluni) + if (alluni) { + dev_info(&pf->pdev->dev, + "VF %d successfully set unicast promiscuous mode\n", + vf->vf_id); set_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states); - else + } else { + dev_info(&pf->pdev->dev, + "VF %d successfully unset unicast promiscuous mode\n", + vf->vf_id); clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states); + } } -error_param: /* send the response to the VF */ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, @@ -1987,12 +2032,11 @@ error_param: * i40e_vc_config_queues_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer - * @msglen: msg length * * called from the VF to configure the rx/tx * queues **/ -static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) +static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg) { struct virtchnl_vsi_queue_config_info *qci = (struct virtchnl_vsi_queue_config_info *)msg; @@ -2105,12 +2149,11 @@ static int i40e_validate_queue_map(struct i40e_vf *vf, u16 vsi_id, * i40e_vc_config_irq_map_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer - * @msglen: msg length * * called from the VF to configure the irq to * queue map **/ -static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) +static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg) { struct virtchnl_irq_map_info *irqmap_info = (struct virtchnl_irq_map_info *)msg; @@ -2202,11 +2245,10 @@ static int i40e_ctrl_vf_rx_rings(struct i40e_vsi *vsi, unsigned long q_map, * i40e_vc_enable_queues_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer - * @msglen: msg length * * called from the VF to enable all or specific queue(s) **/ -static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) +static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg) { struct virtchnl_queue_select *vqs = (struct virtchnl_queue_select *)msg; @@ -2261,12 +2303,11 @@ error_param: * i40e_vc_disable_queues_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer - * @msglen: msg length * * called from the VF to disable all or specific * queue(s) **/ -static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) +static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg) { struct virtchnl_queue_select *vqs = (struct virtchnl_queue_select *)msg; @@ -2309,14 +2350,13 @@ error_param: * i40e_vc_request_queues_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer - * @msglen: msg length * * VFs get a default number of queues but can use this message to request a * different number. If the request is successful, PF will reset the VF and * return 0. If unsuccessful, PF will send message informing VF of number of * available queues and return result of sending VF a message. **/ -static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg, int msglen) +static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg) { struct virtchnl_vf_res_request *vfres = (struct virtchnl_vf_res_request *)msg; @@ -2360,11 +2400,10 @@ static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg, int msglen) * i40e_vc_get_stats_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer - * @msglen: msg length * * called from the VF to get vsi stats **/ -static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) +static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg) { struct virtchnl_queue_select *vqs = (struct virtchnl_queue_select *)msg; @@ -2470,11 +2509,10 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf, * i40e_vc_add_mac_addr_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer - * @msglen: msg length * * add guest mac address filter **/ -static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) +static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg) { struct virtchnl_ether_addr_list *al = (struct virtchnl_ether_addr_list *)msg; @@ -2541,11 +2579,10 @@ error_param: * i40e_vc_del_mac_addr_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer - * @msglen: msg length * * remove guest mac address filter **/ -static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) +static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg) { struct virtchnl_ether_addr_list *al = (struct virtchnl_ether_addr_list *)msg; @@ -2611,11 +2648,10 @@ error_param: * i40e_vc_add_vlan_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer - * @msglen: msg length * * program guest vlan id **/ -static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) +static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg) { struct virtchnl_vlan_filter_list *vfl = (struct virtchnl_vlan_filter_list *)msg; @@ -2684,11 +2720,10 @@ error_param: * i40e_vc_remove_vlan_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer - * @msglen: msg length * * remove programmed guest vlan id **/ -static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) +static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg) { struct virtchnl_vlan_filter_list *vfl = (struct virtchnl_vlan_filter_list *)msg; @@ -2771,13 +2806,11 @@ error_param: * i40e_vc_iwarp_qvmap_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer - * @msglen: msg length * @config: config qvmap or release it * * called from the VF for the iwarp msgs **/ -static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, u16 msglen, - bool config) +static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, bool config) { struct virtchnl_iwarp_qvlist_info *qvlist_info = (struct virtchnl_iwarp_qvlist_info *)msg; @@ -2808,11 +2841,10 @@ error_param: * i40e_vc_config_rss_key * @vf: pointer to the VF info * @msg: pointer to the msg buffer - * @msglen: msg length * * Configure the VF's RSS key **/ -static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg, u16 msglen) +static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg) { struct virtchnl_rss_key *vrk = (struct virtchnl_rss_key *)msg; @@ -2840,11 +2872,10 @@ err: * i40e_vc_config_rss_lut * @vf: pointer to the VF info * @msg: pointer to the msg buffer - * @msglen: msg length * * Configure the VF's RSS LUT **/ -static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg, u16 msglen) +static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg) { struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg; @@ -2872,11 +2903,10 @@ err: * i40e_vc_get_rss_hena * @vf: pointer to the VF info * @msg: pointer to the msg buffer - * @msglen: msg length * * Return the RSS HENA bits allowed by the hardware **/ -static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen) +static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg) { struct virtchnl_rss_hena *vrh = NULL; struct i40e_pf *pf = vf->pf; @@ -2908,11 +2938,10 @@ err: * i40e_vc_set_rss_hena * @vf: pointer to the VF info * @msg: pointer to the msg buffer - * @msglen: msg length * * Set the RSS HENA bits for the VF **/ -static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen) +static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg) { struct virtchnl_rss_hena *vrh = (struct virtchnl_rss_hena *)msg; @@ -2937,12 +2966,10 @@ err: * i40e_vc_enable_vlan_stripping * @vf: pointer to the VF info * @msg: pointer to the msg buffer - * @msglen: msg length * * Enable vlan header stripping for the VF **/ -static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg, - u16 msglen) +static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg) { struct i40e_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx]; i40e_status aq_ret = 0; @@ -2964,12 +2991,10 @@ err: * i40e_vc_disable_vlan_stripping * @vf: pointer to the VF info * @msg: pointer to the msg buffer - * @msglen: msg length * * Disable vlan header stripping for the VF **/ -static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg, - u16 msglen) +static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg) { struct i40e_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx]; i40e_status aq_ret = 0; @@ -3669,65 +3694,65 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode, ret = 0; break; case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: - ret = i40e_vc_config_promiscuous_mode_msg(vf, msg, msglen); + ret = i40e_vc_config_promiscuous_mode_msg(vf, msg); break; case VIRTCHNL_OP_CONFIG_VSI_QUEUES: - ret = i40e_vc_config_queues_msg(vf, msg, msglen); + ret = i40e_vc_config_queues_msg(vf, msg); break; case VIRTCHNL_OP_CONFIG_IRQ_MAP: - ret = i40e_vc_config_irq_map_msg(vf, msg, msglen); + ret = i40e_vc_config_irq_map_msg(vf, msg); break; case VIRTCHNL_OP_ENABLE_QUEUES: - ret = i40e_vc_enable_queues_msg(vf, msg, msglen); + ret = i40e_vc_enable_queues_msg(vf, msg); i40e_vc_notify_vf_link_state(vf); break; case VIRTCHNL_OP_DISABLE_QUEUES: - ret = i40e_vc_disable_queues_msg(vf, msg, msglen); + ret = i40e_vc_disable_queues_msg(vf, msg); break; case VIRTCHNL_OP_ADD_ETH_ADDR: - ret = i40e_vc_add_mac_addr_msg(vf, msg, msglen); + ret = i40e_vc_add_mac_addr_msg(vf, msg); break; case VIRTCHNL_OP_DEL_ETH_ADDR: - ret = i40e_vc_del_mac_addr_msg(vf, msg, msglen); + ret = i40e_vc_del_mac_addr_msg(vf, msg); break; case VIRTCHNL_OP_ADD_VLAN: - ret = i40e_vc_add_vlan_msg(vf, msg, msglen); + ret = i40e_vc_add_vlan_msg(vf, msg); break; case VIRTCHNL_OP_DEL_VLAN: - ret = i40e_vc_remove_vlan_msg(vf, msg, msglen); + ret = i40e_vc_remove_vlan_msg(vf, msg); break; case VIRTCHNL_OP_GET_STATS: - ret = i40e_vc_get_stats_msg(vf, msg, msglen); + ret = i40e_vc_get_stats_msg(vf, msg); break; case VIRTCHNL_OP_IWARP: ret = i40e_vc_iwarp_msg(vf, msg, msglen); break; case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP: - ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, true); + ret = i40e_vc_iwarp_qvmap_msg(vf, msg, true); break; case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP: - ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, false); + ret = i40e_vc_iwarp_qvmap_msg(vf, msg, false); break; case VIRTCHNL_OP_CONFIG_RSS_KEY: - ret = i40e_vc_config_rss_key(vf, msg, msglen); + ret = i40e_vc_config_rss_key(vf, msg); break; case VIRTCHNL_OP_CONFIG_RSS_LUT: - ret = i40e_vc_config_rss_lut(vf, msg, msglen); + ret = i40e_vc_config_rss_lut(vf, msg); break; case VIRTCHNL_OP_GET_RSS_HENA_CAPS: - ret = i40e_vc_get_rss_hena(vf, msg, msglen); + ret = i40e_vc_get_rss_hena(vf, msg); break; case VIRTCHNL_OP_SET_RSS_HENA: - ret = i40e_vc_set_rss_hena(vf, msg, msglen); + ret = i40e_vc_set_rss_hena(vf, msg); break; case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: - ret = i40e_vc_enable_vlan_stripping(vf, msg, msglen); + ret = i40e_vc_enable_vlan_stripping(vf, msg); break; case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: - ret = i40e_vc_disable_vlan_stripping(vf, msg, msglen); + ret = i40e_vc_disable_vlan_stripping(vf, msg); break; case VIRTCHNL_OP_REQUEST_QUEUES: - ret = i40e_vc_request_queues_msg(vf, msg, msglen); + ret = i40e_vc_request_queues_msg(vf, msg); break; case VIRTCHNL_OP_ENABLE_CHANNELS: ret = i40e_vc_add_qch_msg(vf, msg); @@ -3796,6 +3821,35 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf) } /** + * i40e_validate_vf + * @pf: the physical function + * @vf_id: VF identifier + * + * Check that the VF is enabled and the VSI exists. + * + * Returns 0 on success, negative on failure + **/ +static int i40e_validate_vf(struct i40e_pf *pf, int vf_id) +{ + struct i40e_vsi *vsi; + struct i40e_vf *vf; + int ret = 0; + + if (vf_id >= pf->num_alloc_vfs) { + dev_err(&pf->pdev->dev, + "Invalid VF Identifier %d\n", vf_id); + ret = -EINVAL; + goto err_out; + } + vf = &pf->vf[vf_id]; + vsi = i40e_find_vsi_from_id(pf, vf->lan_vsi_id); + if (!vsi) + ret = -EINVAL; +err_out: + return ret; +} + +/** * i40e_ndo_set_vf_mac * @netdev: network interface device structure * @vf_id: VF identifier @@ -3816,14 +3870,11 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) u8 i; /* validate the request */ - if (vf_id >= pf->num_alloc_vfs) { - dev_err(&pf->pdev->dev, - "Invalid VF Identifier %d\n", vf_id); - ret = -EINVAL; + ret = i40e_validate_vf(pf, vf_id); + if (ret) goto error_param; - } - vf = &(pf->vf[vf_id]); + vf = &pf->vf[vf_id]; vsi = pf->vsi[vf->lan_vsi_idx]; /* When the VF is resetting wait until it is done. @@ -3942,11 +3993,9 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, int ret = 0; /* validate the request */ - if (vf_id >= pf->num_alloc_vfs) { - dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); - ret = -EINVAL; + ret = i40e_validate_vf(pf, vf_id); + if (ret) goto error_pvid; - } if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) { dev_err(&pf->pdev->dev, "Invalid VF Parameters\n"); @@ -3960,7 +4009,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, goto error_pvid; } - vf = &(pf->vf[vf_id]); + vf = &pf->vf[vf_id]; vsi = pf->vsi[vf->lan_vsi_idx]; if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", @@ -4080,11 +4129,9 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, int ret = 0; /* validate the request */ - if (vf_id >= pf->num_alloc_vfs) { - dev_err(&pf->pdev->dev, "Invalid VF Identifier %d.\n", vf_id); - ret = -EINVAL; + ret = i40e_validate_vf(pf, vf_id); + if (ret) goto error; - } if (min_tx_rate) { dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n", @@ -4092,7 +4139,7 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, return -EINVAL; } - vf = &(pf->vf[vf_id]); + vf = &pf->vf[vf_id]; vsi = pf->vsi[vf->lan_vsi_idx]; if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", @@ -4128,13 +4175,11 @@ int i40e_ndo_get_vf_config(struct net_device *netdev, int ret = 0; /* validate the request */ - if (vf_id >= pf->num_alloc_vfs) { - dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); - ret = -EINVAL; + ret = i40e_validate_vf(pf, vf_id); + if (ret) goto error_param; - } - vf = &(pf->vf[vf_id]); + vf = &pf->vf[vf_id]; /* first vsi is always the LAN vsi */ vsi = pf->vsi[vf->lan_vsi_idx]; if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c index 2ebfc78bbd09..add1e457886d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c @@ -140,6 +140,7 @@ static void i40e_xsk_umem_dma_unmap(struct i40e_vsi *vsi, struct xdp_umem *umem) static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem, u16 qid) { + struct xdp_umem_fq_reuse *reuseq; bool if_running; int err; @@ -156,6 +157,12 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem, return -EBUSY; } + reuseq = xsk_reuseq_prepare(vsi->rx_rings[0]->count); + if (!reuseq) + return -ENOMEM; + + xsk_reuseq_free(xsk_reuseq_swap(umem, reuseq)); + err = i40e_xsk_umem_dma_map(vsi, umem); if (err) return err; @@ -353,16 +360,46 @@ static bool i40e_alloc_buffer_zc(struct i40e_ring *rx_ring, } /** - * i40e_alloc_rx_buffers_zc - Allocates a number of Rx buffers + * i40e_alloc_buffer_slow_zc - Allocates an i40e_rx_buffer * @rx_ring: Rx ring - * @count: The number of buffers to allocate + * @bi: Rx buffer to populate * - * This function allocates a number of Rx buffers and places them on - * the Rx ring. + * This function allocates an Rx buffer. The buffer can come from fill + * queue, or via the reuse queue. * * Returns true for a successful allocation, false otherwise **/ -bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count) +static bool i40e_alloc_buffer_slow_zc(struct i40e_ring *rx_ring, + struct i40e_rx_buffer *bi) +{ + struct xdp_umem *umem = rx_ring->xsk_umem; + u64 handle, hr; + + if (!xsk_umem_peek_addr_rq(umem, &handle)) { + rx_ring->rx_stats.alloc_page_failed++; + return false; + } + + handle &= rx_ring->xsk_umem->chunk_mask; + + hr = umem->headroom + XDP_PACKET_HEADROOM; + + bi->dma = xdp_umem_get_dma(umem, handle); + bi->dma += hr; + + bi->addr = xdp_umem_get_data(umem, handle); + bi->addr += hr; + + bi->handle = handle + umem->headroom; + + xsk_umem_discard_addr_rq(umem); + return true; +} + +static __always_inline bool +__i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count, + bool alloc(struct i40e_ring *rx_ring, + struct i40e_rx_buffer *bi)) { u16 ntu = rx_ring->next_to_use; union i40e_rx_desc *rx_desc; @@ -372,7 +409,7 @@ bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count) rx_desc = I40E_RX_DESC(rx_ring, ntu); bi = &rx_ring->rx_bi[ntu]; do { - if (!i40e_alloc_buffer_zc(rx_ring, bi)) { + if (!alloc(rx_ring, bi)) { ok = false; goto no_buffers; } @@ -405,6 +442,38 @@ no_buffers: } /** + * i40e_alloc_rx_buffers_zc - Allocates a number of Rx buffers + * @rx_ring: Rx ring + * @count: The number of buffers to allocate + * + * This function allocates a number of Rx buffers from the reuse queue + * or fill ring and places them on the Rx ring. + * + * Returns true for a successful allocation, false otherwise + **/ +bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count) +{ + return __i40e_alloc_rx_buffers_zc(rx_ring, count, + i40e_alloc_buffer_slow_zc); +} + +/** + * i40e_alloc_rx_buffers_fast_zc - Allocates a number of Rx buffers + * @rx_ring: Rx ring + * @count: The number of buffers to allocate + * + * This function allocates a number of Rx buffers from the fill ring + * or the internal recycle mechanism and places them on the Rx ring. + * + * Returns true for a successful allocation, false otherwise + **/ +static bool i40e_alloc_rx_buffers_fast_zc(struct i40e_ring *rx_ring, u16 count) +{ + return __i40e_alloc_rx_buffers_zc(rx_ring, count, + i40e_alloc_buffer_zc); +} + +/** * i40e_get_rx_buffer_zc - Return the current Rx buffer * @rx_ring: Rx ring * @size: The size of the rx buffer (read from descriptor) @@ -571,8 +640,8 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget) if (cleaned_count >= I40E_RX_BUFFER_WRITE) { failure = failure || - !i40e_alloc_rx_buffers_zc(rx_ring, - cleaned_count); + !i40e_alloc_rx_buffers_fast_zc(rx_ring, + cleaned_count); cleaned_count = 0; } @@ -830,3 +899,69 @@ int i40e_xsk_async_xmit(struct net_device *dev, u32 queue_id) return 0; } + +void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring) +{ + u16 i; + + for (i = 0; i < rx_ring->count; i++) { + struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i]; + + if (!rx_bi->addr) + continue; + + xsk_umem_fq_reuse(rx_ring->xsk_umem, rx_bi->handle); + rx_bi->addr = NULL; + } +} + +/** + * i40e_xsk_clean_xdp_ring - Clean the XDP Tx ring on shutdown + * @xdp_ring: XDP Tx ring + **/ +void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring) +{ + u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use; + struct xdp_umem *umem = tx_ring->xsk_umem; + struct i40e_tx_buffer *tx_bi; + u32 xsk_frames = 0; + + while (ntc != ntu) { + tx_bi = &tx_ring->tx_bi[ntc]; + + if (tx_bi->xdpf) + i40e_clean_xdp_tx_buffer(tx_ring, tx_bi); + else + xsk_frames++; + + tx_bi->xdpf = NULL; + + ntc++; + if (ntc >= tx_ring->count) + ntc = 0; + } + + if (xsk_frames) + xsk_umem_complete_tx(umem, xsk_frames); +} + +/** + * i40e_xsk_any_rx_ring_enabled - Checks if Rx rings have AF_XDP UMEM attached + * @vsi: vsi + * + * Returns true if any of the Rx rings has an AF_XDP UMEM attached + **/ +bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi) +{ + int i; + + if (!vsi->xsk_umems) + return false; + + for (i = 0; i < vsi->num_queue_pairs; i++) { + if (vsi->xsk_umems[i]) + return true; + } + + return false; +} diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h index 932ca0dad6f3..70a115bea4f4 100644 --- a/include/net/xdp_sock.h +++ b/include/net/xdp_sock.h @@ -21,6 +21,12 @@ struct xdp_umem_page { dma_addr_t dma; }; +struct xdp_umem_fq_reuse { + u32 nentries; + u32 length; + u64 handles[]; +}; + struct xdp_umem { struct xsk_queue *fq; struct xsk_queue *cq; @@ -37,6 +43,7 @@ struct xdp_umem { struct page **pgs; u32 npgs; struct net_device *dev; + struct xdp_umem_fq_reuse *fq_reuse; u16 queue_id; bool zc; spinlock_t xsk_list_lock; @@ -75,6 +82,10 @@ void xsk_umem_discard_addr(struct xdp_umem *umem); void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries); bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, u32 *len); void xsk_umem_consume_tx_done(struct xdp_umem *umem); +struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries); +struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem, + struct xdp_umem_fq_reuse *newq); +void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq); static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr) { @@ -85,6 +96,35 @@ static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr) { return umem->pages[addr >> PAGE_SHIFT].dma + (addr & (PAGE_SIZE - 1)); } + +/* Reuse-queue aware version of FILL queue helpers */ +static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr) +{ + struct xdp_umem_fq_reuse *rq = umem->fq_reuse; + + if (!rq->length) + return xsk_umem_peek_addr(umem, addr); + + *addr = rq->handles[rq->length - 1]; + return addr; +} + +static inline void xsk_umem_discard_addr_rq(struct xdp_umem *umem) +{ + struct xdp_umem_fq_reuse *rq = umem->fq_reuse; + + if (!rq->length) + xsk_umem_discard_addr(umem); + else + rq->length--; +} + +static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr) +{ + struct xdp_umem_fq_reuse *rq = umem->fq_reuse; + + rq->handles[rq->length++] = addr; +} #else static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) { @@ -128,6 +168,21 @@ static inline void xsk_umem_consume_tx_done(struct xdp_umem *umem) { } +static inline struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries) +{ + return NULL; +} + +static inline struct xdp_umem_fq_reuse *xsk_reuseq_swap( + struct xdp_umem *umem, + struct xdp_umem_fq_reuse *newq) +{ + return NULL; +} +static inline void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq) +{ +} + static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr) { return NULL; @@ -137,6 +192,20 @@ static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr) { return 0; } + +static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr) +{ + return NULL; +} + +static inline void xsk_umem_discard_addr_rq(struct xdp_umem *umem) +{ +} + +static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr) +{ +} + #endif /* CONFIG_XDP_SOCKETS */ #endif /* _LINUX_XDP_SOCK_H */ diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c index b3b632c5aeae..555427b3e0fe 100644 --- a/net/xdp/xdp_umem.c +++ b/net/xdp/xdp_umem.c @@ -165,6 +165,8 @@ static void xdp_umem_release(struct xdp_umem *umem) umem->cq = NULL; } + xsk_reuseq_destroy(umem); + xdp_umem_unpin_pages(umem); task = get_pid_task(umem->pid, PIDTYPE_PID); diff --git a/net/xdp/xsk_queue.c b/net/xdp/xsk_queue.c index 2dc1384d9f27..b66504592d9b 100644 --- a/net/xdp/xsk_queue.c +++ b/net/xdp/xsk_queue.c @@ -3,7 +3,9 @@ * Copyright(c) 2018 Intel Corporation. */ +#include <linux/log2.h> #include <linux/slab.h> +#include <linux/overflow.h> #include "xsk_queue.h" @@ -62,3 +64,56 @@ void xskq_destroy(struct xsk_queue *q) page_frag_free(q->ring); kfree(q); } + +struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries) +{ + struct xdp_umem_fq_reuse *newq; + + /* Check for overflow */ + if (nentries > (u32)roundup_pow_of_two(nentries)) + return NULL; + nentries = roundup_pow_of_two(nentries); + + newq = kvmalloc(struct_size(newq, handles, nentries), GFP_KERNEL); + if (!newq) + return NULL; + memset(newq, 0, offsetof(typeof(*newq), handles)); + + newq->nentries = nentries; + return newq; +} +EXPORT_SYMBOL_GPL(xsk_reuseq_prepare); + +struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem, + struct xdp_umem_fq_reuse *newq) +{ + struct xdp_umem_fq_reuse *oldq = umem->fq_reuse; + + if (!oldq) { + umem->fq_reuse = newq; + return NULL; + } + + if (newq->nentries < oldq->length) + return newq; + + memcpy(newq->handles, oldq->handles, + array_size(oldq->length, sizeof(u64))); + newq->length = oldq->length; + + umem->fq_reuse = newq; + return oldq; +} +EXPORT_SYMBOL_GPL(xsk_reuseq_swap); + +void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq) +{ + kvfree(rq); +} +EXPORT_SYMBOL_GPL(xsk_reuseq_free); + +void xsk_reuseq_destroy(struct xdp_umem *umem) +{ + xsk_reuseq_free(umem->fq_reuse); + umem->fq_reuse = NULL; +} diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h index 82252cccb4e0..bcb5cbb40419 100644 --- a/net/xdp/xsk_queue.h +++ b/net/xdp/xsk_queue.h @@ -258,4 +258,7 @@ void xskq_set_umem(struct xsk_queue *q, u64 size, u64 chunk_mask); struct xsk_queue *xskq_create(u32 nentries, bool umem_queue); void xskq_destroy(struct xsk_queue *q_ops); +/* Executed by the core when the entire UMEM gets freed */ +void xsk_reuseq_destroy(struct xdp_umem *umem); + #endif /* _LINUX_XSK_QUEUE_H */ |