diff options
author | Alexei Starovoitov <ast@kernel.org> | 2022-03-29 04:56:28 +0200 |
---|---|---|
committer | Alexei Starovoitov <ast@kernel.org> | 2022-03-29 04:56:35 +0200 |
commit | 9e928831113c64087fb173dd241fa97812e49490 (patch) | |
tree | adb33b3d631593641aadbd245727aa44c1719741 | |
parent | Merge branch 'kprobes: rethook: x86: Replace kretprobe trampoline with rethook' (diff) | |
parent | ice: xsk: Fix indexing in ice_tx_xsk_pool() (diff) | |
download | linux-9e928831113c64087fb173dd241fa97812e49490.tar.xz linux-9e928831113c64087fb173dd241fa97812e49490.zip |
Merge branch 'xsk: another round of fixes'
Maciej Fijalkowski says:
====================
Hello,
yet another fixes for XSK from Magnus and me.
Magnus addresses the fact that xp_alloc() can return NULL, so this needs
to be handled to avoid clearing entries in the SW ring on driver side.
Then he addresses the off-by-one problem in Tx desc cleaning routine for
ice ZC driver.
From my side, I am adding protection to ZC Rx processing loop so that
cleaning of descriptors wouldn't go over already processed entries.
Then I also fix an issue with assigning XSK pool to Tx queues.
This is directed to bpf tree.
Thanks!
Maciej Fijalkowski (2):
ice: xsk: stop Rx processing when ntc catches ntu
ice: xsk: fix indexing in ice_tx_xsk_pool()
====================
Acked-by: Alexander Lobakin <alexandr.lobakin@intel.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice.h | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_xsk.c | 5 | ||||
-rw-r--r-- | net/xdp/xsk_buff_pool.c | 8 |
3 files changed, 11 insertions, 4 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index b0b27bfcd7a2..d4f1874df7d0 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -710,7 +710,7 @@ static inline struct xsk_buff_pool *ice_tx_xsk_pool(struct ice_tx_ring *ring) struct ice_vsi *vsi = ring->vsi; u16 qid; - qid = ring->q_index - vsi->num_xdp_txq; + qid = ring->q_index - vsi->alloc_txq; if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps)) return NULL; diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index 88853a6ed931..dfbcaf08520e 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -608,6 +608,9 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget) */ dma_rmb(); + if (unlikely(rx_ring->next_to_clean == rx_ring->next_to_use)) + break; + xdp = *ice_xdp_buf(rx_ring, rx_ring->next_to_clean); size = le16_to_cpu(rx_desc->wb.pkt_len) & @@ -754,7 +757,7 @@ skip: next_dd = next_dd + tx_thresh; if (next_dd >= desc_cnt) next_dd = tx_thresh - 1; - } while (budget--); + } while (--budget); xdp_ring->next_dd = next_dd; diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c index b34fca6ada86..af040ffa14ff 100644 --- a/net/xdp/xsk_buff_pool.c +++ b/net/xdp/xsk_buff_pool.c @@ -591,9 +591,13 @@ u32 xp_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max) u32 nb_entries1 = 0, nb_entries2; if (unlikely(pool->dma_need_sync)) { + struct xdp_buff *buff; + /* Slow path */ - *xdp = xp_alloc(pool); - return !!*xdp; + buff = xp_alloc(pool); + if (buff) + *xdp = buff; + return !!buff; } if (unlikely(pool->free_list_cnt)) { |