summaryrefslogtreecommitdiffstats
path: root/net/tls/tls_sw.c
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2022-07-06 01:59:26 +0200
committerDavid S. Miller <davem@davemloft.net>2022-07-06 13:56:35 +0200
commitc46b01839f7aad5889e23505bbfbeb5f4d7fde8e (patch)
treec3759309c9c88b17a1f87d0bcbb4b939491e2ef7 /net/tls/tls_sw.c
parentselftests: tls: add selftest variant for pad (diff)
downloadlinux-c46b01839f7aad5889e23505bbfbeb5f4d7fde8e.tar.xz
linux-c46b01839f7aad5889e23505bbfbeb5f4d7fde8e.zip
tls: rx: periodically flush socket backlog
We continuously hold the socket lock during large reads and writes. This may inflate RTT and negatively impact TCP performance. Flush the backlog periodically. I tried to pick a flush period (128kB) which gives significant benefit but the max Bps rate is not yet visibly impacted. Signed-off-by: Jakub Kicinski <kuba@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to '')
-rw-r--r--net/tls/tls_sw.c23
1 files changed, 23 insertions, 0 deletions
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 7592b6519953..79043bc3da39 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -1738,6 +1738,24 @@ out:
return copied ? : err;
}
+static void
+tls_read_flush_backlog(struct sock *sk, struct tls_prot_info *prot,
+ size_t len_left, size_t decrypted, ssize_t done,
+ size_t *flushed_at)
+{
+ size_t max_rec;
+
+ if (len_left <= decrypted)
+ return;
+
+ max_rec = prot->overhead_size - prot->tail_size + TLS_MAX_PAYLOAD_SIZE;
+ if (done - *flushed_at < SZ_128K && tcp_inq(sk) > max_rec)
+ return;
+
+ *flushed_at = done;
+ sk_flush_backlog(sk);
+}
+
int tls_sw_recvmsg(struct sock *sk,
struct msghdr *msg,
size_t len,
@@ -1750,6 +1768,7 @@ int tls_sw_recvmsg(struct sock *sk,
struct sk_psock *psock;
unsigned char control = 0;
ssize_t decrypted = 0;
+ size_t flushed_at = 0;
struct strp_msg *rxm;
struct tls_msg *tlm;
struct sk_buff *skb;
@@ -1839,6 +1858,10 @@ int tls_sw_recvmsg(struct sock *sk,
if (err <= 0)
goto recv_end;
+ /* periodically flush backlog, and feed strparser */
+ tls_read_flush_backlog(sk, prot, len, to_decrypt,
+ decrypted + copied, &flushed_at);
+
ctx->recv_pkt = NULL;
__strp_unpause(&ctx->strp);
__skb_queue_tail(&ctx->rx_list, skb);