diff options
author | Eric Dumazet <edumazet@google.com> | 2016-12-03 20:14:53 +0100 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2016-12-05 19:32:23 +0100 |
commit | a9b204d1564702b704ad6fe74f10a102c7b87ba3 (patch) | |
tree | 44251778743174bb4e68c70225b19397f6c37c48 | |
parent | tcp: tsq: add shortcut in tcp_tasklet_func() (diff) | |
download | linux-a9b204d1564702b704ad6fe74f10a102c7b87ba3.tar.xz linux-a9b204d1564702b704ad6fe74f10a102c7b87ba3.zip |
tcp: tsq: avoid one atomic in tcp_wfree()
Under high load, tcp_wfree() has an atomic operation trying
to schedule a tasklet over and over.
We can schedule it only if our per cpu list was empty.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | net/ipv4/tcp_output.c | 5 |
1 files changed, 4 insertions, 1 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index fa23b688a6f3..0db63efe5b8b 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -880,6 +880,7 @@ void tcp_wfree(struct sk_buff *skb) for (oval = READ_ONCE(tp->tsq_flags);; oval = nval) { struct tsq_tasklet *tsq; + bool empty; if (!(oval & TSQF_THROTTLED) || (oval & TSQF_QUEUED)) goto out; @@ -892,8 +893,10 @@ void tcp_wfree(struct sk_buff *skb) /* queue this socket to tasklet queue */ local_irq_save(flags); tsq = this_cpu_ptr(&tsq_tasklet); + empty = list_empty(&tsq->head); list_add(&tp->tsq_node, &tsq->head); - tasklet_schedule(&tsq->tasklet); + if (empty) + tasklet_schedule(&tsq->tasklet); local_irq_restore(flags); return; } |