summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMarcelo Ricardo Leitner <marcelo.leitner@gmail.com>2016-04-29 19:17:08 +0200
committerDavid S. Miller <davem@davemloft.net>2016-05-02 03:06:10 +0200
commit0970f5b3665933f5f0d069607c78fb10bd918b62 (patch)
treed9dac95d765c7337d9e32cbd0cc2013aba5bb74f
parentmdio_bus: Fix MDIO bus scanning in __mdiobus_register() (diff)
downloadlinux-0970f5b3665933f5f0d069607c78fb10bd918b62.tar.xz
linux-0970f5b3665933f5f0d069607c78fb10bd918b62.zip
sctp: signal sk_data_ready earlier on data chunks reception
Dave Miller pointed out that fb586f25300f ("sctp: delay calls to sk_data_ready() as much as possible") may insert latency specially if the receiving application is running on another CPU and that it would be better if we signalled as early as possible. This patch thus basically inverts the logic on fb586f25300f and signals it as early as possible, similar to what we had before. Fixes: fb586f25300f ("sctp: delay calls to sk_data_ready() as much as possible") Reported-by: Dave Miller <davem@davemloft.net> Signed-off-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/net/sctp/structs.h2
-rw-r--r--net/sctp/sm_sideeffect.c7
-rw-r--r--net/sctp/ulpqueue.c25
3 files changed, 20 insertions, 14 deletions
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 558bae3cbe0d..16b013a6191c 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -218,7 +218,7 @@ struct sctp_sock {
frag_interleave:1,
recvrcvinfo:1,
recvnxtinfo:1,
- pending_data_ready:1;
+ data_ready_signalled:1;
atomic_t pd_mode;
/* Receive to here while partial delivery is in effect. */
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index e8f0112f9b28..aa3712259368 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -1741,10 +1741,9 @@ out:
} else if (local_cork)
error = sctp_outq_uncork(&asoc->outqueue, gfp);
- if (sp->pending_data_ready) {
- sk->sk_data_ready(sk);
- sp->pending_data_ready = 0;
- }
+ if (sp->data_ready_signalled)
+ sp->data_ready_signalled = 0;
+
return error;
nomem:
error = -ENOMEM;
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index ec12a8920e5f..ec166d2bd2d9 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -194,6 +194,7 @@ static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
{
struct sock *sk = ulpq->asoc->base.sk;
+ struct sctp_sock *sp = sctp_sk(sk);
struct sk_buff_head *queue, *skb_list;
struct sk_buff *skb = sctp_event2skb(event);
int clear_pd = 0;
@@ -211,7 +212,7 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
sk_incoming_cpu_update(sk);
}
/* Check if the user wishes to receive this event. */
- if (!sctp_ulpevent_is_enabled(event, &sctp_sk(sk)->subscribe))
+ if (!sctp_ulpevent_is_enabled(event, &sp->subscribe))
goto out_free;
/* If we are in partial delivery mode, post to the lobby until
@@ -219,7 +220,7 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
* the association the cause of the partial delivery.
*/
- if (atomic_read(&sctp_sk(sk)->pd_mode) == 0) {
+ if (atomic_read(&sp->pd_mode) == 0) {
queue = &sk->sk_receive_queue;
} else {
if (ulpq->pd_mode) {
@@ -231,7 +232,7 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
if ((event->msg_flags & MSG_NOTIFICATION) ||
(SCTP_DATA_NOT_FRAG ==
(event->msg_flags & SCTP_DATA_FRAG_MASK)))
- queue = &sctp_sk(sk)->pd_lobby;
+ queue = &sp->pd_lobby;
else {
clear_pd = event->msg_flags & MSG_EOR;
queue = &sk->sk_receive_queue;
@@ -242,10 +243,10 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
* can queue this to the receive queue instead
* of the lobby.
*/
- if (sctp_sk(sk)->frag_interleave)
+ if (sp->frag_interleave)
queue = &sk->sk_receive_queue;
else
- queue = &sctp_sk(sk)->pd_lobby;
+ queue = &sp->pd_lobby;
}
}
@@ -264,8 +265,10 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
if (clear_pd)
sctp_ulpq_clear_pd(ulpq);
- if (queue == &sk->sk_receive_queue)
- sctp_sk(sk)->pending_data_ready = 1;
+ if (queue == &sk->sk_receive_queue && !sp->data_ready_signalled) {
+ sp->data_ready_signalled = 1;
+ sk->sk_data_ready(sk);
+ }
return 1;
out_free:
@@ -1126,11 +1129,13 @@ void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
{
struct sctp_ulpevent *ev = NULL;
struct sock *sk;
+ struct sctp_sock *sp;
if (!ulpq->pd_mode)
return;
sk = ulpq->asoc->base.sk;
+ sp = sctp_sk(sk);
if (sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT,
&sctp_sk(sk)->subscribe))
ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
@@ -1140,6 +1145,8 @@ void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
__skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
/* If there is data waiting, send it up the socket now. */
- if (sctp_ulpq_clear_pd(ulpq) || ev)
- sctp_sk(sk)->pending_data_ready = 1;
+ if ((sctp_ulpq_clear_pd(ulpq) || ev) && !sp->data_ready_signalled) {
+ sp->data_ready_signalled = 1;
+ sk->sk_data_ready(sk);
+ }
}