summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2024-09-15 18:12:21 +0200
committerJakub Kicinski <kuba@kernel.org>2024-09-15 18:13:19 +0200
commit3561373114c8b3359114e2da27259317dc51145a (patch)
tree3885239a7987bc96fc9eb3684f77ff4d9e42eef7 /net
parentnet/mlx5: HWS, check the correct variable in hws_send_ring_alloc_sq() (diff)
parentnetfilter: nft_socket: Fix a NULL vs IS_ERR() bug in nft_socket_cgroup_subtre... (diff)
downloadlinux-3561373114c8b3359114e2da27259317dc51145a.tar.xz
linux-3561373114c8b3359114e2da27259317dc51145a.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Merge in late fixes to prepare for the 6.12 net-next PR. No conflicts or adjacent changes. Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'net')
-rw-r--r--net/can/bcm.c4
-rw-r--r--net/ipv6/route.c2
-rw-r--r--net/ipv6/rpl_iptunnel.c12
-rw-r--r--net/netfilter/nft_socket.c4
-rw-r--r--net/tipc/bcast.c2
-rw-r--r--net/xdp/xsk_buff_pool.c25
6 files changed, 32 insertions, 17 deletions
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 46d3ec3aa44b..217049fa496e 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -1471,8 +1471,10 @@ static void bcm_notify(struct bcm_sock *bo, unsigned long msg,
/* remove device reference, if this is our bound device */
if (bo->bound && bo->ifindex == dev->ifindex) {
#if IS_ENABLED(CONFIG_PROC_FS)
- if (sock_net(sk)->can.bcmproc_dir && bo->bcm_proc_read)
+ if (sock_net(sk)->can.bcmproc_dir && bo->bcm_proc_read) {
remove_proc_entry(bo->procname, sock_net(sk)->can.bcmproc_dir);
+ bo->bcm_proc_read = NULL;
+ }
#endif
bo->bound = 0;
bo->ifindex = 0;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 7e7de29ad05f..b4251915585f 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -174,7 +174,7 @@ static void rt6_uncached_list_flush_dev(struct net_device *dev)
struct net_device *rt_dev = rt->dst.dev;
bool handled = false;
- if (rt_idev->dev == dev) {
+ if (rt_idev && rt_idev->dev == dev) {
rt->rt6i_idev = in6_dev_get(blackhole_netdev);
in6_dev_put(rt_idev);
handled = true;
diff --git a/net/ipv6/rpl_iptunnel.c b/net/ipv6/rpl_iptunnel.c
index 2c83b7586422..db3c19a42e1c 100644
--- a/net/ipv6/rpl_iptunnel.c
+++ b/net/ipv6/rpl_iptunnel.c
@@ -263,10 +263,8 @@ static int rpl_input(struct sk_buff *skb)
rlwt = rpl_lwt_lwtunnel(orig_dst->lwtstate);
err = rpl_do_srh(skb, rlwt);
- if (unlikely(err)) {
- kfree_skb(skb);
- return err;
- }
+ if (unlikely(err))
+ goto drop;
local_bh_disable();
dst = dst_cache_get(&rlwt->cache);
@@ -286,9 +284,13 @@ static int rpl_input(struct sk_buff *skb)
err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
if (unlikely(err))
- return err;
+ goto drop;
return dst_input(skb);
+
+drop:
+ kfree_skb(skb);
+ return err;
}
static int nla_put_rpl_srh(struct sk_buff *skb, int attrtype,
diff --git a/net/netfilter/nft_socket.c b/net/netfilter/nft_socket.c
index ac3c9e9cf0f3..f5da0c1775f2 100644
--- a/net/netfilter/nft_socket.c
+++ b/net/netfilter/nft_socket.c
@@ -61,8 +61,8 @@ static noinline int nft_socket_cgroup_subtree_level(void)
struct cgroup *cgrp = cgroup_get_from_path("/");
int level;
- if (!cgrp)
- return -ENOENT;
+ if (IS_ERR(cgrp))
+ return PTR_ERR(cgrp);
level = cgrp->level;
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 593846d25214..114fef65f92e 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -320,8 +320,8 @@ static int tipc_mcast_send_sync(struct net *net, struct sk_buff *skb,
{
struct tipc_msg *hdr, *_hdr;
struct sk_buff_head tmpq;
+ u16 cong_link_cnt = 0;
struct sk_buff *_skb;
- u16 cong_link_cnt;
int rc = 0;
/* Is a cluster supporting with new capabilities ? */
diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c
index 56edb98e5b47..521a2938e50a 100644
--- a/net/xdp/xsk_buff_pool.c
+++ b/net/xdp/xsk_buff_pool.c
@@ -628,20 +628,31 @@ static u32 xp_alloc_reused(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u3
return nb_entries;
}
-u32 xp_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
+static u32 xp_alloc_slow(struct xsk_buff_pool *pool, struct xdp_buff **xdp,
+ u32 max)
{
- u32 nb_entries1 = 0, nb_entries2;
+ int i;
- if (unlikely(pool->dev && dma_dev_need_sync(pool->dev))) {
+ for (i = 0; i < max; i++) {
struct xdp_buff *buff;
- /* Slow path */
buff = xp_alloc(pool);
- if (buff)
- *xdp = buff;
- return !!buff;
+ if (unlikely(!buff))
+ return i;
+ *xdp = buff;
+ xdp++;
}
+ return max;
+}
+
+u32 xp_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
+{
+ u32 nb_entries1 = 0, nb_entries2;
+
+ if (unlikely(pool->dev && dma_dev_need_sync(pool->dev)))
+ return xp_alloc_slow(pool, xdp, max);
+
if (unlikely(pool->free_list_cnt)) {
nb_entries1 = xp_alloc_reused(pool, xdp, max);
if (nb_entries1 == max)