summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorCiara Loftus <ciara.loftus@intel.com>2021-03-31 08:12:18 +0200
committerAlexei Starovoitov <ast@kernel.org>2021-04-01 23:45:43 +0200
commitca7a83e2487ad0bc9a3e0e7a8645354aa1782f13 (patch)
tree818d93eb379796febf71ff998a0c7edd0490e4f0
parentlibbpf: Restore umem state after socket create failure (diff)
downloadlinux-ca7a83e2487ad0bc9a3e0e7a8645354aa1782f13.tar.xz
linux-ca7a83e2487ad0bc9a3e0e7a8645354aa1782f13.zip
libbpf: Only create rx and tx XDP rings when necessary
Prior to this commit xsk_socket__create(_shared) always attempted to create the rx and tx rings for the socket. However this causes an issue when the socket being setup is that which shares the fd with the UMEM. If a previous call to this function failed with this socket after the rings were set up, a subsequent call would always fail because the rings are not torn down after the first call and when we try to set them up again we encounter an error because they already exist. Solve this by remembering whether the rings were set up by introducing new bools to struct xsk_umem which represent the ring setup status and using them to determine whether or not to set up the rings. Fixes: 1cad07884239 ("libbpf: add support for using AF_XDP sockets") Signed-off-by: Ciara Loftus <ciara.loftus@intel.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Link: https://lore.kernel.org/bpf/20210331061218.1647-4-ciara.loftus@intel.com
-rw-r--r--tools/lib/bpf/xsk.c13
1 files changed, 11 insertions, 2 deletions
diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c
index 5098d9e3b55a..d24b5cc720ec 100644
--- a/tools/lib/bpf/xsk.c
+++ b/tools/lib/bpf/xsk.c
@@ -59,6 +59,8 @@ struct xsk_umem {
int fd;
int refcount;
struct list_head ctx_list;
+ bool rx_ring_setup_done;
+ bool tx_ring_setup_done;
};
struct xsk_ctx {
@@ -857,6 +859,7 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
struct xsk_ctx *ctx;
int err, ifindex;
bool unmap = umem->fill_save != fill;
+ bool rx_setup_done = false, tx_setup_done = false;
if (!umem || !xsk_ptr || !(rx || tx))
return -EFAULT;
@@ -884,6 +887,8 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
}
} else {
xsk->fd = umem->fd;
+ rx_setup_done = umem->rx_ring_setup_done;
+ tx_setup_done = umem->tx_ring_setup_done;
}
ctx = xsk_get_ctx(umem, ifindex, queue_id);
@@ -902,7 +907,7 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
}
xsk->ctx = ctx;
- if (rx) {
+ if (rx && !rx_setup_done) {
err = setsockopt(xsk->fd, SOL_XDP, XDP_RX_RING,
&xsk->config.rx_size,
sizeof(xsk->config.rx_size));
@@ -910,8 +915,10 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
err = -errno;
goto out_put_ctx;
}
+ if (xsk->fd == umem->fd)
+ umem->rx_ring_setup_done = true;
}
- if (tx) {
+ if (tx && !tx_setup_done) {
err = setsockopt(xsk->fd, SOL_XDP, XDP_TX_RING,
&xsk->config.tx_size,
sizeof(xsk->config.tx_size));
@@ -919,6 +926,8 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
err = -errno;
goto out_put_ctx;
}
+ if (xsk->fd == umem->fd)
+ umem->rx_ring_setup_done = true;
}
err = xsk_get_mmap_offsets(xsk->fd, &off);