summaryrefslogtreecommitdiffstats
path: root/net/tls
diff options
context:
space:
mode:
authorJakub Kicinski <jakub.kicinski@netronome.com>2019-06-11 06:40:00 +0200
committerDavid S. Miller <davem@davemloft.net>2019-06-11 21:22:26 +0200
commit89fec474fa1ab2c754e48d29e1081a2c2bd22dc6 (patch)
tree7c616c5ef6c27aaae2681c10cd852cc43253987d /net/tls
parentnet/tls: simplify seq calculation in handle_device_resync() (diff)
downloadlinux-89fec474fa1ab2c754e48d29e1081a2c2bd22dc6.tar.xz
linux-89fec474fa1ab2c754e48d29e1081a2c2bd22dc6.zip
net/tls: pass record number as a byte array
TLS offload code casts record number to a u64. The buffer should be aligned to 8 bytes, but its actually a __be64, and the rest of the TLS code treats it as big int. Make the offload callbacks take a byte array, drivers can make the choice to do the ugly cast if they want to. Prepare for copying the record number onto the stack by defining a constant for max size of the byte array. Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com> Reviewed-by: Dirk van der Merwe <dirk.vandermerwe@netronome.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/tls')
-rw-r--r--net/tls/tls_device.c12
-rw-r--r--net/tls/tls_sw.c8
2 files changed, 13 insertions, 7 deletions
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index 59f0c8dacbcc..16635f0c829c 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -551,7 +551,7 @@ void tls_device_write_space(struct sock *sk, struct tls_context *ctx)
}
static void tls_device_resync_rx(struct tls_context *tls_ctx,
- struct sock *sk, u32 seq, u64 rcd_sn)
+ struct sock *sk, u32 seq, u8 *rcd_sn)
{
struct net_device *netdev;
@@ -563,7 +563,7 @@ static void tls_device_resync_rx(struct tls_context *tls_ctx,
clear_bit_unlock(TLS_RX_SYNC_RUNNING, &tls_ctx->flags);
}
-void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn)
+void handle_device_resync(struct sock *sk, u32 seq)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_offload_context_rx *rx_ctx;
@@ -582,7 +582,7 @@ void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn)
if (unlikely(is_req_pending) && req_seq == seq &&
atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0))
- tls_device_resync_rx(tls_ctx, sk, seq, rcd_sn);
+ tls_device_resync_rx(tls_ctx, sk, seq, tls_ctx->rx.rec_seq);
}
static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb)
@@ -760,6 +760,12 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
goto free_offload_ctx;
}
+ /* Sanity-check the rec_seq_size for stack allocations */
+ if (rec_seq_size > TLS_MAX_REC_SEQ_SIZE) {
+ rc = -EINVAL;
+ goto free_offload_ctx;
+ }
+
prot->prepend_size = TLS_HEADER_SIZE + nonce_size;
prot->tag_size = tag_size;
prot->overhead_size = prot->prepend_size + prot->tag_size;
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index bef71e54fad0..c1d22290f1d0 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -2015,8 +2015,7 @@ static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
goto read_failure;
}
#ifdef CONFIG_TLS_DEVICE
- handle_device_resync(strp->sk, TCP_SKB_CB(skb)->seq + rxm->offset,
- *(u64*)tls_ctx->rx.rec_seq);
+ handle_device_resync(strp->sk, TCP_SKB_CB(skb)->seq + rxm->offset);
#endif
return data_len + TLS_HEADER_SIZE;
@@ -2283,8 +2282,9 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
goto free_priv;
}
- /* Sanity-check the IV size for stack allocations. */
- if (iv_size > MAX_IV_SIZE || nonce_size > MAX_IV_SIZE) {
+ /* Sanity-check the sizes for stack allocations. */
+ if (iv_size > MAX_IV_SIZE || nonce_size > MAX_IV_SIZE ||
+ rec_seq_size > TLS_MAX_REC_SEQ_SIZE) {
rc = -EINVAL;
goto free_priv;
}