summaryrefslogtreecommitdiffstats
path: root/net/tls
diff options
context:
space:
mode:
authorJakub Kicinski <jakub.kicinski@netronome.com>2019-09-07 07:29:59 +0200
committerDavid S. Miller <davem@davemloft.net>2019-09-07 18:10:34 +0200
commite7b159a48ba6f7c243881c7ef3afa6e8785c0826 (patch)
tree82dd0d59e0d2de03e3da81eb237131657684cdce /net/tls
parentnet/tls: use RCU for the adder to the offload record list (diff)
downloadlinux-e7b159a48ba6f7c243881c7ef3afa6e8785c0826.tar.xz
linux-e7b159a48ba6f7c243881c7ef3afa6e8785c0826.zip
net/tls: remove the record tail optimization
For TLS device offload the tag/message authentication code are filled in by the device. The kernel merely reserves space for them. Because device overwrites it, the contents of the tag make do no matter. Current code tries to save space by reusing the header as the tag. This, however, leads to an additional frag being created and defeats buffer coalescing (which trickles all the way down to the drivers). Remove this optimization, and try to allocate the space for the tag in the usual way, leave the memory uninitialized. If memory allocation fails rewind the record pointer so that we use the already copied user data as tag. Note that the optimization was actually buggy, as the tag for TLS 1.2 is 16 bytes, but header is just 13, so the reuse may had looked past the end of the page.. Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com> Reviewed-by: Dirk van der Merwe <dirk.vandermerwe@netronome.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/tls')
-rw-r--r--net/tls/tls_device.c67
1 files changed, 47 insertions, 20 deletions
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index b11355e00514..916c3c0a99f0 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -256,29 +256,13 @@ static int tls_push_record(struct sock *sk,
struct tls_context *ctx,
struct tls_offload_context_tx *offload_ctx,
struct tls_record_info *record,
- struct page_frag *pfrag,
- int flags,
- unsigned char record_type)
+ int flags)
{
struct tls_prot_info *prot = &ctx->prot_info;
struct tcp_sock *tp = tcp_sk(sk);
- struct page_frag dummy_tag_frag;
skb_frag_t *frag;
int i;
- /* fill prepend */
- frag = &record->frags[0];
- tls_fill_prepend(ctx,
- skb_frag_address(frag),
- record->len - prot->prepend_size,
- record_type,
- prot->version);
-
- /* HW doesn't care about the data in the tag, because it fills it. */
- dummy_tag_frag.page = skb_frag_page(frag);
- dummy_tag_frag.offset = 0;
-
- tls_append_frag(record, &dummy_tag_frag, prot->tag_size);
record->end_seq = tp->write_seq + record->len;
list_add_tail_rcu(&record->list, &offload_ctx->records_list);
offload_ctx->open_record = NULL;
@@ -302,6 +286,38 @@ static int tls_push_record(struct sock *sk,
return tls_push_sg(sk, ctx, offload_ctx->sg_tx_data, 0, flags);
}
+static int tls_device_record_close(struct sock *sk,
+ struct tls_context *ctx,
+ struct tls_record_info *record,
+ struct page_frag *pfrag,
+ unsigned char record_type)
+{
+ struct tls_prot_info *prot = &ctx->prot_info;
+ int ret;
+
+ /* append tag
+ * device will fill in the tag, we just need to append a placeholder
+ * use socket memory to improve coalescing (re-using a single buffer
+ * increases frag count)
+ * if we can't allocate memory now, steal some back from data
+ */
+ if (likely(skb_page_frag_refill(prot->tag_size, pfrag,
+ sk->sk_allocation))) {
+ ret = 0;
+ tls_append_frag(record, pfrag, prot->tag_size);
+ } else {
+ ret = prot->tag_size;
+ if (record->len <= prot->overhead_size)
+ return -ENOMEM;
+ }
+
+ /* fill prepend */
+ tls_fill_prepend(ctx, skb_frag_address(&record->frags[0]),
+ record->len - prot->overhead_size,
+ record_type, prot->version);
+ return ret;
+}
+
static int tls_create_new_record(struct tls_offload_context_tx *offload_ctx,
struct page_frag *pfrag,
size_t prepend_size)
@@ -452,13 +468,24 @@ last_record:
if (done || record->len >= max_open_record_len ||
(record->num_frags >= MAX_SKB_FRAGS - 1)) {
+ rc = tls_device_record_close(sk, tls_ctx, record,
+ pfrag, record_type);
+ if (rc) {
+ if (rc > 0) {
+ size += rc;
+ } else {
+ size = orig_size;
+ destroy_record(record);
+ ctx->open_record = NULL;
+ break;
+ }
+ }
+
rc = tls_push_record(sk,
tls_ctx,
ctx,
record,
- pfrag,
- tls_push_record_flags,
- record_type);
+ tls_push_record_flags);
if (rc < 0)
break;
}