summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBreno Leitao <leitao@debian.org>2024-03-14 23:49:07 +0100
committerPaolo Abeni <pabeni@redhat.com>2024-03-19 11:22:49 +0100
commitdb2952dfbdf1192df77df6869323d487390f5da6 (patch)
treef945f32ce2f1d306d3d8c2a275bf8f6665e29106
parentwireguard: receive: annotate data-race around receiving_counter.counter (diff)
downloadlinux-db2952dfbdf1192df77df6869323d487390f5da6.tar.xz
linux-db2952dfbdf1192df77df6869323d487390f5da6.zip
wireguard: device: leverage core stats allocator
With commit 34d21de99cea9 ("net: Move {l,t,d}stats allocation to core and convert veth & vrf"), stats allocation could be done on net core instead of in this driver. With this new approach, the driver doesn't have to bother with error handling (allocation failure checking, making sure free happens in the right spot, etc). This is core responsibility now. Remove the allocation in this driver and leverage the network core allocation instead. Signed-off-by: Breno Leitao <leitao@debian.org> Reviewed-by: Simon Horman <horms@kernel.org> Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com> Reviewed-by: Jiri Pirko <jiri@nvidia.com> Signed-off-by: Paolo Abeni <pabeni@redhat.com>
-rw-r--r--drivers/net/wireguard/device.c10
1 files changed, 2 insertions, 8 deletions
diff --git a/drivers/net/wireguard/device.c b/drivers/net/wireguard/device.c
index deb9636b0ecf..6aa071469e1c 100644
--- a/drivers/net/wireguard/device.c
+++ b/drivers/net/wireguard/device.c
@@ -262,7 +262,6 @@ static void wg_destruct(struct net_device *dev)
rcu_barrier(); /* Wait for all the peers to be actually freed. */
wg_ratelimiter_uninit();
memzero_explicit(&wg->static_identity, sizeof(wg->static_identity));
- free_percpu(dev->tstats);
kvfree(wg->index_hashtable);
kvfree(wg->peer_hashtable);
mutex_unlock(&wg->device_update_lock);
@@ -297,6 +296,7 @@ static void wg_setup(struct net_device *dev)
dev->hw_enc_features |= WG_NETDEV_FEATURES;
dev->mtu = ETH_DATA_LEN - overhead;
dev->max_mtu = round_down(INT_MAX, MESSAGE_PADDING_MULTIPLE) - overhead;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
SET_NETDEV_DEVTYPE(dev, &device_type);
@@ -331,14 +331,10 @@ static int wg_newlink(struct net *src_net, struct net_device *dev,
if (!wg->index_hashtable)
goto err_free_peer_hashtable;
- dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!dev->tstats)
- goto err_free_index_hashtable;
-
wg->handshake_receive_wq = alloc_workqueue("wg-kex-%s",
WQ_CPU_INTENSIVE | WQ_FREEZABLE, 0, dev->name);
if (!wg->handshake_receive_wq)
- goto err_free_tstats;
+ goto err_free_index_hashtable;
wg->handshake_send_wq = alloc_workqueue("wg-kex-%s",
WQ_UNBOUND | WQ_FREEZABLE, 0, dev->name);
@@ -397,8 +393,6 @@ err_destroy_handshake_send:
destroy_workqueue(wg->handshake_send_wq);
err_destroy_handshake_receive:
destroy_workqueue(wg->handshake_receive_wq);
-err_free_tstats:
- free_percpu(dev->tstats);
err_free_index_hashtable:
kvfree(wg->index_hashtable);
err_free_peer_hashtable: