diff options
author | Ursula Braun <ubraun@linux.vnet.ibm.com> | 2016-10-07 15:51:48 +0200 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2016-10-12 07:56:04 +0200 |
commit | 8c68b1a0e9fa288015f1492e1b342db159734da9 (patch) | |
tree | e4debe75e45f90af24e3cefe45bcf6435b5347a8 /net/iucv | |
parent | Subject: [PATCH] af_iucv: enable control sends in case of SEND_SHUTDOWN (diff) | |
download | linux-8c68b1a0e9fa288015f1492e1b342db159734da9.tar.xz linux-8c68b1a0e9fa288015f1492e1b342db159734da9.zip |
Subject: [PATCH] af_iucv: drop skbs rejected by filter
A packet filter might be installed for instance with setsockopt
SO_ATTACH_FILTER. af_iucv currently queues skbs rejected by filter
into the backlog queue. This does not make sense, since packets
rejected by filter can be dropped immediately. This patch adds
separate sk_filter return code checking, and dropping of packets
if applicable.
Signed-off-by: Ursula Braun <ubraun@linux.vnet.ibm.com>
Reported-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/iucv')
-rw-r--r-- | net/iucv/af_iucv.c | 24 |
1 files changed, 17 insertions, 7 deletions
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index 88d0eaa18749..cfb9e5f4e28f 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c @@ -1323,8 +1323,13 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb, } IUCV_SKB_CB(skb)->offset = 0; - if (sock_queue_rcv_skb(sk, skb)) - skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb); + if (sk_filter(sk, skb)) { + atomic_inc(&sk->sk_drops); /* skb rejected by filter */ + kfree_skb(skb); + return; + } + if (__sock_queue_rcv_skb(sk, skb)) /* handle rcv queue full */ + skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb); } /* iucv_process_message_q() - Process outstanding IUCV messages @@ -1438,13 +1443,13 @@ static int iucv_sock_recvmsg(struct socket *sock, struct msghdr *msg, rskb = skb_dequeue(&iucv->backlog_skb_q); while (rskb) { IUCV_SKB_CB(rskb)->offset = 0; - if (sock_queue_rcv_skb(sk, rskb)) { + if (__sock_queue_rcv_skb(sk, rskb)) { + /* handle rcv queue full */ skb_queue_head(&iucv->backlog_skb_q, rskb); break; - } else { - rskb = skb_dequeue(&iucv->backlog_skb_q); } + rskb = skb_dequeue(&iucv->backlog_skb_q); } if (skb_queue_empty(&iucv->backlog_skb_q)) { if (!list_empty(&iucv->message_q.list)) @@ -2124,12 +2129,17 @@ static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb) skb_reset_transport_header(skb); skb_reset_network_header(skb); IUCV_SKB_CB(skb)->offset = 0; + if (sk_filter(sk, skb)) { + atomic_inc(&sk->sk_drops); /* skb rejected by filter */ + kfree_skb(skb); + return NET_RX_SUCCESS; + } + spin_lock(&iucv->message_q.lock); if (skb_queue_empty(&iucv->backlog_skb_q)) { - if (sock_queue_rcv_skb(sk, skb)) { + if (__sock_queue_rcv_skb(sk, skb)) /* handle rcv queue full */ skb_queue_tail(&iucv->backlog_skb_q, skb); - } } else skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb); spin_unlock(&iucv->message_q.lock); |