summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2020-04-10 02:39:22 +0200
committerDavid S. Miller <davem@davemloft.net>2020-04-10 02:39:22 +0200
commit40fc7ad2c8863479f3db34f9a9283b4884cd0e90 (patch)
tree20d697f61bc6108e7230d629b2cf85ff0e2b094f /net
parentnet: ipv4: devinet: Fix crash when add/del multicast IP with autojoin (diff)
parentarm, bpf: Fix bugs with ALU64 {RSH, ARSH} BPF_K shift by 0 (diff)
downloadlinux-40fc7ad2c8863479f3db34f9a9283b4884cd0e90.tar.xz
linux-40fc7ad2c8863479f3db34f9a9283b4884cd0e90.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Daniel Borkmann says: ==================== pull-request: bpf 2020-04-10 The following pull-request contains BPF updates for your *net* tree. We've added 13 non-merge commits during the last 7 day(s) which contain a total of 13 files changed, 137 insertions(+), 43 deletions(-). The main changes are: 1) JIT code emission fixes for riscv and arm32, from Luke Nelson and Xi Wang. 2) Disable vmlinux BTF info if GCC_PLUGIN_RANDSTRUCT is used, from Slava Bacherikov. 3) Fix oob write in AF_XDP when meta data is used, from Li RongQing. 4) Fix bpf_get_link_xdp_id() handling on single prog when flags are specified, from Andrey Ignatov. 5) Fix sk_assign() BPF helper for request sockets that can have sk_reuseport field uninitialized, from Joe Stringer. 6) Fix mprotect() test case for the BPF LSM, from KP Singh. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/core/filter.c2
-rw-r--r--net/core/sock.c2
-rw-r--r--net/xdp/xsk.c5
3 files changed, 5 insertions, 4 deletions
diff --git a/net/core/filter.c b/net/core/filter.c
index 7628b947dbc3..7d6ceaa54d21 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -5925,7 +5925,7 @@ BPF_CALL_3(bpf_sk_assign, struct sk_buff *, skb, struct sock *, sk, u64, flags)
return -EOPNOTSUPP;
if (unlikely(dev_net(skb->dev) != sock_net(sk)))
return -ENETUNREACH;
- if (unlikely(sk->sk_reuseport))
+ if (unlikely(sk_fullsock(sk) && sk->sk_reuseport))
return -ESOCKTNOSUPPORT;
if (sk_is_refcounted(sk) &&
unlikely(!refcount_inc_not_zero(&sk->sk_refcnt)))
diff --git a/net/core/sock.c b/net/core/sock.c
index ce1d8dce9b7a..90509c37d291 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1872,7 +1872,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
* as not suitable for copying when cloning.
*/
if (sk_user_data_is_nocopy(newsk))
- RCU_INIT_POINTER(newsk->sk_user_data, NULL);
+ newsk->sk_user_data = NULL;
newsk->sk_err = 0;
newsk->sk_err_soft = 0;
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 356f90e4522b..c350108aa38d 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -131,8 +131,9 @@ static void __xsk_rcv_memcpy(struct xdp_umem *umem, u64 addr, void *from_buf,
u64 page_start = addr & ~(PAGE_SIZE - 1);
u64 first_len = PAGE_SIZE - (addr - page_start);
- memcpy(to_buf, from_buf, first_len + metalen);
- memcpy(next_pg_addr, from_buf + first_len, len - first_len);
+ memcpy(to_buf, from_buf, first_len);
+ memcpy(next_pg_addr, from_buf + first_len,
+ len + metalen - first_len);
return;
}