summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2018-09-26 05:25:00 +0200
committerDavid S. Miller <davem@davemloft.net>2018-09-26 05:25:00 +0200
commit71f9b61c5b3f894fdc9dff9e511a961ada122862 (patch)
tree8c8e92d19ae9c3bb3cbafcde380c36075b0f90c0 /net
parentMerge branch 'Refactor-classifier-API-to-work-with-Qdisc-blocks-without-rtnl-... (diff)
parenti40e: disallow changing the number of descriptors when AF_XDP is on (diff)
downloadlinux-71f9b61c5b3f894fdc9dff9e511a961ada122862.tar.xz
linux-71f9b61c5b3f894fdc9dff9e511a961ada122862.zip
Merge branch '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue
Jeff Kirsher says: ==================== 40GbE Intel Wired LAN Driver Updates 2018-09-25 This series contains updates to i40e and xsk. Mariusz fixes an issue where the VF link state was not being updated properly when the PF is down or up. Also cleaned up the promiscuous configuration during a VF reset. Patryk simplifies the code a bit to use the variables for PF and HW that are declared, rather than using the VSI pointers. Cleaned up the message length parameter to several virtchnl functions, since it was not being used (or needed). Harshitha fixes two potential race conditions when trying to change VF settings by creating a helper function to validate that the VF is enabled and that the VSI is set up. Sergey corrects a double "link down" message by putting in a check for whether or not the link is up or going down. Björn addresses an AF_XDP zero-copy issue that buffers passed from userspace to the kernel was leaked when the hardware descriptor ring was torn down. A zero-copy capable driver picks buffers off the fill ring and places them on the hardware receive ring to be completed at a later point when DMA is complete. Similar on the transmit side; The driver picks buffers off the transmit ring and places them on the transmit hardware ring. In the typical flow, the receive buffer will be placed onto an receive ring (completed to the user), and the transmit buffer will be placed on the completion ring to notify the user that the transfer is done. However, if the driver needs to tear down the hardware rings for some reason (interface goes down, reconfiguration and such), the userspace buffers cannot be leaked. They have to be reused or completed back to userspace. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/xdp/xdp_umem.c2
-rw-r--r--net/xdp/xsk_queue.c55
-rw-r--r--net/xdp/xsk_queue.h3
3 files changed, 60 insertions, 0 deletions
diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
index b3b632c5aeae..555427b3e0fe 100644
--- a/net/xdp/xdp_umem.c
+++ b/net/xdp/xdp_umem.c
@@ -165,6 +165,8 @@ static void xdp_umem_release(struct xdp_umem *umem)
umem->cq = NULL;
}
+ xsk_reuseq_destroy(umem);
+
xdp_umem_unpin_pages(umem);
task = get_pid_task(umem->pid, PIDTYPE_PID);
diff --git a/net/xdp/xsk_queue.c b/net/xdp/xsk_queue.c
index 2dc1384d9f27..b66504592d9b 100644
--- a/net/xdp/xsk_queue.c
+++ b/net/xdp/xsk_queue.c
@@ -3,7 +3,9 @@
* Copyright(c) 2018 Intel Corporation.
*/
+#include <linux/log2.h>
#include <linux/slab.h>
+#include <linux/overflow.h>
#include "xsk_queue.h"
@@ -62,3 +64,56 @@ void xskq_destroy(struct xsk_queue *q)
page_frag_free(q->ring);
kfree(q);
}
+
+struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries)
+{
+ struct xdp_umem_fq_reuse *newq;
+
+ /* Check for overflow */
+ if (nentries > (u32)roundup_pow_of_two(nentries))
+ return NULL;
+ nentries = roundup_pow_of_two(nentries);
+
+ newq = kvmalloc(struct_size(newq, handles, nentries), GFP_KERNEL);
+ if (!newq)
+ return NULL;
+ memset(newq, 0, offsetof(typeof(*newq), handles));
+
+ newq->nentries = nentries;
+ return newq;
+}
+EXPORT_SYMBOL_GPL(xsk_reuseq_prepare);
+
+struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem,
+ struct xdp_umem_fq_reuse *newq)
+{
+ struct xdp_umem_fq_reuse *oldq = umem->fq_reuse;
+
+ if (!oldq) {
+ umem->fq_reuse = newq;
+ return NULL;
+ }
+
+ if (newq->nentries < oldq->length)
+ return newq;
+
+ memcpy(newq->handles, oldq->handles,
+ array_size(oldq->length, sizeof(u64)));
+ newq->length = oldq->length;
+
+ umem->fq_reuse = newq;
+ return oldq;
+}
+EXPORT_SYMBOL_GPL(xsk_reuseq_swap);
+
+void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq)
+{
+ kvfree(rq);
+}
+EXPORT_SYMBOL_GPL(xsk_reuseq_free);
+
+void xsk_reuseq_destroy(struct xdp_umem *umem)
+{
+ xsk_reuseq_free(umem->fq_reuse);
+ umem->fq_reuse = NULL;
+}
diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
index 82252cccb4e0..bcb5cbb40419 100644
--- a/net/xdp/xsk_queue.h
+++ b/net/xdp/xsk_queue.h
@@ -258,4 +258,7 @@ void xskq_set_umem(struct xsk_queue *q, u64 size, u64 chunk_mask);
struct xsk_queue *xskq_create(u32 nentries, bool umem_queue);
void xskq_destroy(struct xsk_queue *q_ops);
+/* Executed by the core when the entire UMEM gets freed */
+void xsk_reuseq_destroy(struct xdp_umem *umem);
+
#endif /* _LINUX_XSK_QUEUE_H */