summaryrefslogtreecommitdiffstats
path: root/net/tls/tls_device.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/tls/tls_device.c')
-rw-r--r--net/tls/tls_device.c99
1 files changed, 60 insertions, 39 deletions
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index 135a7ee9db03..e225c81e6b35 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -52,8 +52,11 @@ static DEFINE_SPINLOCK(tls_device_lock);
static void tls_device_free_ctx(struct tls_context *ctx)
{
- if (ctx->tx_conf == TLS_HW)
+ if (ctx->tx_conf == TLS_HW) {
kfree(tls_offload_ctx_tx(ctx));
+ kfree(ctx->tx.rec_seq);
+ kfree(ctx->tx.iv);
+ }
if (ctx->rx_conf == TLS_HW)
kfree(tls_offload_ctx_rx(ctx));
@@ -86,22 +89,6 @@ static void tls_device_gc_task(struct work_struct *work)
}
}
-static void tls_device_attach(struct tls_context *ctx, struct sock *sk,
- struct net_device *netdev)
-{
- if (sk->sk_destruct != tls_device_sk_destruct) {
- refcount_set(&ctx->refcount, 1);
- dev_hold(netdev);
- ctx->netdev = netdev;
- spin_lock_irq(&tls_device_lock);
- list_add_tail(&ctx->list, &tls_device_list);
- spin_unlock_irq(&tls_device_lock);
-
- ctx->sk_destruct = sk->sk_destruct;
- sk->sk_destruct = tls_device_sk_destruct;
- }
-}
-
static void tls_device_queue_ctx_destruction(struct tls_context *ctx)
{
unsigned long flags;
@@ -196,7 +183,7 @@ static void tls_icsk_clean_acked(struct sock *sk, u32 acked_seq)
* socket and no in-flight SKBs associated with this
* socket, so it is safe to free all the resources.
*/
-void tls_device_sk_destruct(struct sock *sk)
+static void tls_device_sk_destruct(struct sock *sk)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
@@ -214,7 +201,13 @@ void tls_device_sk_destruct(struct sock *sk)
if (refcount_dec_and_test(&tls_ctx->refcount))
tls_device_queue_ctx_destruction(tls_ctx);
}
-EXPORT_SYMBOL(tls_device_sk_destruct);
+
+void tls_device_free_resources_tx(struct sock *sk)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+
+ tls_free_partial_record(sk, tls_ctx);
+}
static void tls_append_frag(struct tls_record_info *record,
struct page_frag *pfrag,
@@ -574,7 +567,7 @@ void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn)
rx_ctx = tls_offload_ctx_rx(tls_ctx);
resync_req = atomic64_read(&rx_ctx->resync_req);
- req_seq = ntohl(resync_req >> 32) - ((u32)TLS_HEADER_SIZE - 1);
+ req_seq = (resync_req >> 32) - ((u32)TLS_HEADER_SIZE - 1);
is_req_pending = resync_req;
if (unlikely(is_req_pending) && req_seq == seq &&
@@ -587,7 +580,7 @@ void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn)
static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb)
{
struct strp_msg *rxm = strp_msg(skb);
- int err = 0, offset = rxm->offset, copy, nsg;
+ int err = 0, offset = rxm->offset, copy, nsg, data_len, pos;
struct sk_buff *skb_iter, *unused;
struct scatterlist sg[1];
char *orig_buf, *buf;
@@ -618,25 +611,42 @@ static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb)
else
err = 0;
- copy = min_t(int, skb_pagelen(skb) - offset,
- rxm->full_len - TLS_CIPHER_AES_GCM_128_TAG_SIZE);
+ data_len = rxm->full_len - TLS_CIPHER_AES_GCM_128_TAG_SIZE;
+
+ if (skb_pagelen(skb) > offset) {
+ copy = min_t(int, skb_pagelen(skb) - offset, data_len);
- if (skb->decrypted)
- skb_store_bits(skb, offset, buf, copy);
+ if (skb->decrypted)
+ skb_store_bits(skb, offset, buf, copy);
- offset += copy;
- buf += copy;
+ offset += copy;
+ buf += copy;
+ }
+ pos = skb_pagelen(skb);
skb_walk_frags(skb, skb_iter) {
- copy = min_t(int, skb_iter->len,
- rxm->full_len - offset + rxm->offset -
- TLS_CIPHER_AES_GCM_128_TAG_SIZE);
+ int frag_pos;
+
+ /* Practically all frags must belong to msg if reencrypt
+ * is needed with current strparser and coalescing logic,
+ * but strparser may "get optimized", so let's be safe.
+ */
+ if (pos + skb_iter->len <= offset)
+ goto done_with_frag;
+ if (pos >= data_len + rxm->offset)
+ break;
+
+ frag_pos = offset - pos;
+ copy = min_t(int, skb_iter->len - frag_pos,
+ data_len + rxm->offset - offset);
if (skb_iter->decrypted)
- skb_store_bits(skb_iter, offset, buf, copy);
+ skb_store_bits(skb_iter, frag_pos, buf, copy);
offset += copy;
buf += copy;
+done_with_frag:
+ pos += skb_iter->len;
}
free_buf:
@@ -672,6 +682,22 @@ int tls_device_decrypted(struct sock *sk, struct sk_buff *skb)
tls_device_reencrypt(sk, skb);
}
+static void tls_device_attach(struct tls_context *ctx, struct sock *sk,
+ struct net_device *netdev)
+{
+ if (sk->sk_destruct != tls_device_sk_destruct) {
+ refcount_set(&ctx->refcount, 1);
+ dev_hold(netdev);
+ ctx->netdev = netdev;
+ spin_lock_irq(&tls_device_lock);
+ list_add_tail(&ctx->list, &tls_device_list);
+ spin_unlock_irq(&tls_device_lock);
+
+ ctx->sk_destruct = sk->sk_destruct;
+ sk->sk_destruct = tls_device_sk_destruct;
+ }
+}
+
int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
{
u16 nonce_size, tag_size, iv_size, rec_seq_size;
@@ -855,8 +881,6 @@ int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
}
if (!(netdev->features & NETIF_F_HW_TLS_RX)) {
- pr_err_ratelimited("%s: netdev %s with no TLS offload\n",
- __func__, netdev->name);
rc = -ENOTSUPP;
goto release_netdev;
}
@@ -884,17 +908,16 @@ int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_RX,
&ctx->crypto_recv.info,
tcp_sk(sk)->copied_seq);
- if (rc) {
- pr_err_ratelimited("%s: The netdev has refused to offload this socket\n",
- __func__);
+ if (rc)
goto free_sw_resources;
- }
tls_device_attach(ctx, sk, netdev);
goto release_netdev;
free_sw_resources:
+ up_read(&device_offload_lock);
tls_sw_free_resources_rx(sk);
+ down_read(&device_offload_lock);
release_ctx:
ctx->priv_ctx_rx = NULL;
release_netdev:
@@ -929,8 +952,6 @@ void tls_device_offload_cleanup_rx(struct sock *sk)
}
out:
up_read(&device_offload_lock);
- kfree(tls_ctx->rx.rec_seq);
- kfree(tls_ctx->rx.iv);
tls_sw_release_resources_rx(sk);
}