summaryrefslogtreecommitdiffstats
path: root/include/net/xdp_sock.h
diff options
context:
space:
mode:
authorMagnus Karlsson <magnus.karlsson@intel.com>2020-08-28 10:26:23 +0200
committerDaniel Borkmann <daniel@iogearbox.net>2020-08-31 21:15:04 +0200
commit8ef4e27eb3f03edfbfbe5657b8061f2a47757037 (patch)
tree657f2adf5f8665231c2b98d02e59aaf4c14551b3 /include/net/xdp_sock.h
parentxsk: Enable sharing of dma mappings (diff)
downloadlinux-8ef4e27eb3f03edfbfbe5657b8061f2a47757037.tar.xz
linux-8ef4e27eb3f03edfbfbe5657b8061f2a47757037.zip
xsk: Rearrange internal structs for better performance
Rearrange the xdp_sock, xdp_umem and xsk_buff_pool structures so that they get smaller and align better to the cache lines. In the previous commits of this patch set, these structs have been reordered with the focus on functionality and simplicity, not performance. This patch improves throughput performance by around 3%. Signed-off-by: Magnus Karlsson <magnus.karlsson@intel.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Björn Töpel <bjorn.topel@intel.com> Link: https://lore.kernel.org/bpf/1598603189-32145-10-git-send-email-magnus.karlsson@intel.com
Diffstat (limited to 'include/net/xdp_sock.h')
-rw-r--r--include/net/xdp_sock.h13
1 files changed, 7 insertions, 6 deletions
diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
index 282aeba0d20f..1a9559c0cbdd 100644
--- a/include/net/xdp_sock.h
+++ b/include/net/xdp_sock.h
@@ -23,13 +23,13 @@ struct xdp_umem {
u32 headroom;
u32 chunk_size;
u32 chunks;
+ u32 npgs;
struct user_struct *user;
refcount_t users;
- struct page **pgs;
- u32 npgs;
u8 flags;
- int id;
bool zc;
+ struct page **pgs;
+ int id;
struct list_head xsk_dma_list;
};
@@ -42,7 +42,7 @@ struct xsk_map {
struct xdp_sock {
/* struct sock must be the first member of struct xdp_sock */
struct sock sk;
- struct xsk_queue *rx;
+ struct xsk_queue *rx ____cacheline_aligned_in_smp;
struct net_device *dev;
struct xdp_umem *umem;
struct list_head flush_node;
@@ -54,8 +54,7 @@ struct xdp_sock {
XSK_BOUND,
XSK_UNBOUND,
} state;
- /* Protects multiple processes in the control path */
- struct mutex mutex;
+
struct xsk_queue *tx ____cacheline_aligned_in_smp;
struct list_head tx_list;
/* Mutual exclusion of NAPI TX thread and sendmsg error paths
@@ -72,6 +71,8 @@ struct xdp_sock {
struct list_head map_list;
/* Protects map_list */
spinlock_t map_list_lock;
+ /* Protects multiple processes in the control path */
+ struct mutex mutex;
struct xsk_queue *fq_tmp; /* Only as tmp storage before bind */
struct xsk_queue *cq_tmp; /* Only as tmp storage before bind */
};