summaryrefslogtreecommitdiffstats
path: root/net/xdp
diff options
context:
space:
mode:
authorMagnus Karlsson <magnus.karlsson@intel.com>2020-08-28 10:26:15 +0200
committerDaniel Borkmann <daniel@iogearbox.net>2020-08-31 21:15:03 +0200
commit1742b3d528690ae7773cf7bf2f01a90ee1de2fe0 (patch)
treeeeb0924a4023c43b42bbe2f552b3481c1fd19442 /net/xdp
parentbpf: Fix build without BPF_LSM. (diff)
downloadlinux-1742b3d528690ae7773cf7bf2f01a90ee1de2fe0.tar.xz
linux-1742b3d528690ae7773cf7bf2f01a90ee1de2fe0.zip
xsk: i40e: ice: ixgbe: mlx5: Pass buffer pool to driver instead of umem
Replace the explicit umem reference passed to the driver in AF_XDP zero-copy mode with the buffer pool instead. This in preparation for extending the functionality of the zero-copy mode so that umems can be shared between queues on the same netdev and also between netdevs. In this commit, only an umem reference has been added to the buffer pool struct. But later commits will add other entities to it. These are going to be entities that are different between different queue ids and netdevs even though the umem is shared between them. Signed-off-by: Magnus Karlsson <magnus.karlsson@intel.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Björn Töpel <bjorn.topel@intel.com> Link: https://lore.kernel.org/bpf/1598603189-32145-2-git-send-email-magnus.karlsson@intel.com
Diffstat (limited to 'net/xdp')
-rw-r--r--net/xdp/xdp_umem.c45
-rw-r--r--net/xdp/xsk_buff_pool.c5
2 files changed, 26 insertions, 24 deletions
diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
index e97db37354e4..0b5f3b04216f 100644
--- a/net/xdp/xdp_umem.c
+++ b/net/xdp/xdp_umem.c
@@ -51,8 +51,9 @@ void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
* not know if the device has more tx queues than rx, or the opposite.
* This might also change during run time.
*/
-static int xdp_reg_umem_at_qid(struct net_device *dev, struct xdp_umem *umem,
- u16 queue_id)
+static int xdp_reg_xsk_pool_at_qid(struct net_device *dev,
+ struct xsk_buff_pool *pool,
+ u16 queue_id)
{
if (queue_id >= max_t(unsigned int,
dev->real_num_rx_queues,
@@ -60,31 +61,31 @@ static int xdp_reg_umem_at_qid(struct net_device *dev, struct xdp_umem *umem,
return -EINVAL;
if (queue_id < dev->real_num_rx_queues)
- dev->_rx[queue_id].umem = umem;
+ dev->_rx[queue_id].pool = pool;
if (queue_id < dev->real_num_tx_queues)
- dev->_tx[queue_id].umem = umem;
+ dev->_tx[queue_id].pool = pool;
return 0;
}
-struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev,
- u16 queue_id)
+struct xsk_buff_pool *xdp_get_xsk_pool_from_qid(struct net_device *dev,
+ u16 queue_id)
{
if (queue_id < dev->real_num_rx_queues)
- return dev->_rx[queue_id].umem;
+ return dev->_rx[queue_id].pool;
if (queue_id < dev->real_num_tx_queues)
- return dev->_tx[queue_id].umem;
+ return dev->_tx[queue_id].pool;
return NULL;
}
-EXPORT_SYMBOL(xdp_get_umem_from_qid);
+EXPORT_SYMBOL(xdp_get_xsk_pool_from_qid);
-static void xdp_clear_umem_at_qid(struct net_device *dev, u16 queue_id)
+static void xdp_clear_xsk_pool_at_qid(struct net_device *dev, u16 queue_id)
{
if (queue_id < dev->real_num_rx_queues)
- dev->_rx[queue_id].umem = NULL;
+ dev->_rx[queue_id].pool = NULL;
if (queue_id < dev->real_num_tx_queues)
- dev->_tx[queue_id].umem = NULL;
+ dev->_tx[queue_id].pool = NULL;
}
int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
@@ -102,10 +103,10 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
if (force_zc && force_copy)
return -EINVAL;
- if (xdp_get_umem_from_qid(dev, queue_id))
+ if (xdp_get_xsk_pool_from_qid(dev, queue_id))
return -EBUSY;
- err = xdp_reg_umem_at_qid(dev, umem, queue_id);
+ err = xdp_reg_xsk_pool_at_qid(dev, umem->pool, queue_id);
if (err)
return err;
@@ -132,8 +133,8 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
goto err_unreg_umem;
}
- bpf.command = XDP_SETUP_XSK_UMEM;
- bpf.xsk.umem = umem;
+ bpf.command = XDP_SETUP_XSK_POOL;
+ bpf.xsk.pool = umem->pool;
bpf.xsk.queue_id = queue_id;
err = dev->netdev_ops->ndo_bpf(dev, &bpf);
@@ -147,7 +148,7 @@ err_unreg_umem:
if (!force_zc)
err = 0; /* fallback to copy mode */
if (err)
- xdp_clear_umem_at_qid(dev, queue_id);
+ xdp_clear_xsk_pool_at_qid(dev, queue_id);
return err;
}
@@ -162,8 +163,8 @@ void xdp_umem_clear_dev(struct xdp_umem *umem)
return;
if (umem->zc) {
- bpf.command = XDP_SETUP_XSK_UMEM;
- bpf.xsk.umem = NULL;
+ bpf.command = XDP_SETUP_XSK_POOL;
+ bpf.xsk.pool = NULL;
bpf.xsk.queue_id = umem->queue_id;
err = umem->dev->netdev_ops->ndo_bpf(umem->dev, &bpf);
@@ -172,7 +173,7 @@ void xdp_umem_clear_dev(struct xdp_umem *umem)
WARN(1, "failed to disable umem!\n");
}
- xdp_clear_umem_at_qid(umem->dev, umem->queue_id);
+ xdp_clear_xsk_pool_at_qid(umem->dev, umem->queue_id);
dev_put(umem->dev);
umem->dev = NULL;
@@ -373,8 +374,8 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
if (err)
goto out_account;
- umem->pool = xp_create(umem->pgs, umem->npgs, chunks, chunk_size,
- headroom, size, unaligned_chunks);
+ umem->pool = xp_create(umem, chunks, chunk_size, headroom, size,
+ unaligned_chunks);
if (!umem->pool) {
err = -ENOMEM;
goto out_pin;
diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c
index a2044c245215..f3df3cba3d73 100644
--- a/net/xdp/xsk_buff_pool.c
+++ b/net/xdp/xsk_buff_pool.c
@@ -29,7 +29,7 @@ void xp_destroy(struct xsk_buff_pool *pool)
kvfree(pool);
}
-struct xsk_buff_pool *xp_create(struct page **pages, u32 nr_pages, u32 chunks,
+struct xsk_buff_pool *xp_create(struct xdp_umem *umem, u32 chunks,
u32 chunk_size, u32 headroom, u64 size,
bool unaligned)
{
@@ -54,6 +54,7 @@ struct xsk_buff_pool *xp_create(struct page **pages, u32 nr_pages, u32 chunks,
pool->chunk_size = chunk_size;
pool->unaligned = unaligned;
pool->frame_len = chunk_size - headroom - XDP_PACKET_HEADROOM;
+ pool->umem = umem;
INIT_LIST_HEAD(&pool->free_list);
for (i = 0; i < pool->free_heads_cnt; i++) {
@@ -63,7 +64,7 @@ struct xsk_buff_pool *xp_create(struct page **pages, u32 nr_pages, u32 chunks,
pool->free_heads[i] = xskb;
}
- err = xp_addr_map(pool, pages, nr_pages);
+ err = xp_addr_map(pool, umem->pgs, umem->npgs);
if (!err)
return pool;