diff options
author | Kal Conley <kal.conley@dectris.com> | 2023-04-23 20:01:56 +0200 |
---|---|---|
committer | Daniel Borkmann <daniel@iogearbox.net> | 2023-04-27 22:24:51 +0200 |
commit | 6ec7be9a2d2b09815f69fc2183ec31857eaa6e27 (patch) | |
tree | 6bf5f263e108d9ec9e418a2d088de890834f700f /net/xdp | |
parent | selftests/bpf: Update the aarch64 tests deny list (diff) | |
download | linux-6ec7be9a2d2b09815f69fc2183ec31857eaa6e27.tar.xz linux-6ec7be9a2d2b09815f69fc2183ec31857eaa6e27.zip |
xsk: Use pool->dma_pages to check for DMA
Compare pool->dma_pages instead of pool->dma_pages_cnt to check for an
active DMA mapping. pool->dma_pages needs to be read anyway to access
the map so this compiles to more efficient code.
Signed-off-by: Kal Conley <kal.conley@dectris.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Reviewed-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
Acked-by: Magnus Karlsson <magnus.karlsson@intel.com>
Link: https://lore.kernel.org/bpf/20230423180157.93559-1-kal.conley@dectris.com
Diffstat (limited to 'net/xdp')
-rw-r--r-- | net/xdp/xsk_buff_pool.c | 7 |
1 files changed, 4 insertions, 3 deletions
diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c index b2df1e0f8153..26f6d304451e 100644 --- a/net/xdp/xsk_buff_pool.c +++ b/net/xdp/xsk_buff_pool.c @@ -350,7 +350,7 @@ void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs) { struct xsk_dma_map *dma_map; - if (pool->dma_pages_cnt == 0) + if (!pool->dma_pages) return; dma_map = xp_find_dma_map(pool); @@ -364,6 +364,7 @@ void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs) __xp_dma_unmap(dma_map, attrs); kvfree(pool->dma_pages); + pool->dma_pages = NULL; pool->dma_pages_cnt = 0; pool->dev = NULL; } @@ -503,7 +504,7 @@ static struct xdp_buff_xsk *__xp_alloc(struct xsk_buff_pool *pool) if (pool->unaligned) { xskb = pool->free_heads[--pool->free_heads_cnt]; xp_init_xskb_addr(xskb, pool, addr); - if (pool->dma_pages_cnt) + if (pool->dma_pages) xp_init_xskb_dma(xskb, pool, pool->dma_pages, addr); } else { xskb = &pool->heads[xp_aligned_extract_idx(pool, addr)]; @@ -569,7 +570,7 @@ static u32 xp_alloc_new_from_fq(struct xsk_buff_pool *pool, struct xdp_buff **xd if (pool->unaligned) { xskb = pool->free_heads[--pool->free_heads_cnt]; xp_init_xskb_addr(xskb, pool, addr); - if (pool->dma_pages_cnt) + if (pool->dma_pages) xp_init_xskb_dma(xskb, pool, pool->dma_pages, addr); } else { xskb = &pool->heads[xp_aligned_extract_idx(pool, addr)]; |