summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorMaciej Fijalkowski <maciej.fijalkowski@intel.com>2024-08-07 12:53:24 +0200
committerTony Nguyen <anthony.l.nguyen@intel.com>2024-08-20 22:19:52 +0200
commit50b2143356e888777fc5bca023c39f34f404613a (patch)
treebbe77ba7fdfb8876d8d9417a453a994ee00d1df3 /drivers
parentMerge branch 'bonding-fix-xfrm-offload-bugs' (diff)
downloadlinux-50b2143356e888777fc5bca023c39f34f404613a.tar.xz
linux-50b2143356e888777fc5bca023c39f34f404613a.zip
ice: fix page reuse when PAGE_SIZE is over 8k
Architectures that have PAGE_SIZE >= 8192 such as arm64 should act the same as x86 currently, meaning reuse of a page should only take place when no one else is busy with it. Do two things independently of underlying PAGE_SIZE: - store the page count under ice_rx_buf::pgcnt - then act upon its value vs ice_rx_buf::pagecnt_bias when making the decision regarding page reuse Fixes: 2b245cb29421 ("ice: Implement transmit and NAPI support") Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com> Tested-by: Chandan Kumar Rout <chandanx.rout@intel.com> (A Contingent Worker at Intel) Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c12
1 files changed, 3 insertions, 9 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 8d25b6981269..50211188c1a7 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -837,16 +837,15 @@ ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
if (!dev_page_is_reusable(page))
return false;
-#if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */
if (unlikely(rx_buf->pgcnt - pagecnt_bias > 1))
return false;
-#else
+#if (PAGE_SIZE >= 8192)
#define ICE_LAST_OFFSET \
(SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_2048)
if (rx_buf->page_offset > ICE_LAST_OFFSET)
return false;
-#endif /* PAGE_SIZE < 8192) */
+#endif /* PAGE_SIZE >= 8192) */
/* If we have drained the page fragment pool we need to update
* the pagecnt_bias and page count so that we fully restock the
@@ -949,12 +948,7 @@ ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size,
struct ice_rx_buf *rx_buf;
rx_buf = &rx_ring->rx_buf[ntc];
- rx_buf->pgcnt =
-#if (PAGE_SIZE < 8192)
- page_count(rx_buf->page);
-#else
- 0;
-#endif
+ rx_buf->pgcnt = page_count(rx_buf->page);
prefetchw(rx_buf->page);
if (!size)