summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorDave Watson <davejwatson@fb.com>2018-05-01 22:05:39 +0200
committerDavid S. Miller <davem@davemloft.net>2018-05-02 00:57:52 +0200
commitc212d2c7fc4736d49be102fb7a1a545cdc2f1fea (patch)
treef4ebbd4af00a32f8f96ea2d92498c533d33da615 /net
parentipv6: Allow non-gateway ECMP for IPv6 (diff)
downloadlinux-c212d2c7fc4736d49be102fb7a1a545cdc2f1fea.tar.xz
linux-c212d2c7fc4736d49be102fb7a1a545cdc2f1fea.zip
net/tls: Don't recursively call push_record during tls_write_space callbacks
It is reported that in some cases, write_space may be called in do_tcp_sendpages, such that we recursively invoke do_tcp_sendpages again: [ 660.468802] ? do_tcp_sendpages+0x8d/0x580 [ 660.468826] ? tls_push_sg+0x74/0x130 [tls] [ 660.468852] ? tls_push_record+0x24a/0x390 [tls] [ 660.468880] ? tls_write_space+0x6a/0x80 [tls] ... tls_push_sg already does a loop over all sending sg's, so ignore any tls_write_space notifications until we are done sending. We then have to call the previous write_space to wake up poll() waiters after we are done with the send loop. Reported-by: Andre Tomt <andre@tomt.net> Signed-off-by: Dave Watson <davejwatson@fb.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/tls/tls_main.c7
1 files changed, 7 insertions, 0 deletions
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index 0d379970960e..cc03e00785c7 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -114,6 +114,7 @@ int tls_push_sg(struct sock *sk,
size = sg->length - offset;
offset += sg->offset;
+ ctx->in_tcp_sendpages = true;
while (1) {
if (sg_is_last(sg))
sendpage_flags = flags;
@@ -148,6 +149,8 @@ retry:
}
clear_bit(TLS_PENDING_CLOSED_RECORD, &ctx->flags);
+ ctx->in_tcp_sendpages = false;
+ ctx->sk_write_space(sk);
return 0;
}
@@ -217,6 +220,10 @@ static void tls_write_space(struct sock *sk)
{
struct tls_context *ctx = tls_get_ctx(sk);
+ /* We are already sending pages, ignore notification */
+ if (ctx->in_tcp_sendpages)
+ return;
+
if (!sk->sk_write_pending && tls_is_pending_closed_record(ctx)) {
gfp_t sk_allocation = sk->sk_allocation;
int rc;