summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorJohn Fastabend <john.fastabend@gmail.com>2020-10-09 20:36:16 +0200
committerAlexei Starovoitov <ast@kernel.org>2020-10-12 03:00:57 +0200
commitcfea28f890cf292d5fe90680db64b68086ef25ba (patch)
treec366a50c034eb3efd5cd2185a41dbed0907a0186 /net
parentbpf: Migrate from patchwork.ozlabs.org to patchwork.kernel.org. (diff)
downloadlinux-cfea28f890cf292d5fe90680db64b68086ef25ba.tar.xz
linux-cfea28f890cf292d5fe90680db64b68086ef25ba.zip
bpf, sockmap: Skb verdict SK_PASS to self already checked rmem limits
For sk_skb case where skb_verdict program returns SK_PASS to continue to pass packet up the stack, the memory limits were already checked before enqueuing in skb_queue_tail from TCP side. So, lets remove the extra checks here. The theory is if the TCP stack believes we have memory to receive the packet then lets trust the stack and not double check the limits. In fact the accounting here can cause a drop if sk_rmem_alloc has increased after the stack accepted this packet, but before the duplicate check here. And worse if this happens because TCP stack already believes the data has been received there is no retransmit. Fixes: 51199405f9672 ("bpf: skb_verdict, support SK_PASS on RX BPF path") Signed-off-by: John Fastabend <john.fastabend@gmail.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Link: https://lore.kernel.org/bpf/160226857664.5692.668205469388498375.stgit@john-Precision-5820-Tower
Diffstat (limited to 'net')
-rw-r--r--net/core/skmsg.c15
1 files changed, 6 insertions, 9 deletions
diff --git a/net/core/skmsg.c b/net/core/skmsg.c
index 4b5f7c8fecd1..040ae1d75b65 100644
--- a/net/core/skmsg.c
+++ b/net/core/skmsg.c
@@ -771,6 +771,7 @@ EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read);
static void sk_psock_verdict_apply(struct sk_psock *psock,
struct sk_buff *skb, int verdict)
{
+ struct tcp_skb_cb *tcp;
struct sock *sk_other;
switch (verdict) {
@@ -780,16 +781,12 @@ static void sk_psock_verdict_apply(struct sk_psock *psock,
!sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
goto out_free;
}
- if (atomic_read(&sk_other->sk_rmem_alloc) <=
- sk_other->sk_rcvbuf) {
- struct tcp_skb_cb *tcp = TCP_SKB_CB(skb);
- tcp->bpf.flags |= BPF_F_INGRESS;
- skb_queue_tail(&psock->ingress_skb, skb);
- schedule_work(&psock->work);
- break;
- }
- goto out_free;
+ tcp = TCP_SKB_CB(skb);
+ tcp->bpf.flags |= BPF_F_INGRESS;
+ skb_queue_tail(&psock->ingress_skb, skb);
+ schedule_work(&psock->work);
+ break;
case __SK_REDIRECT:
sk_psock_skb_redirect(skb);
break;