diff options
author | John Fastabend <john.fastabend@gmail.com> | 2019-07-19 19:29:18 +0200 |
---|---|---|
committer | Daniel Borkmann <daniel@iogearbox.net> | 2019-07-22 16:04:17 +0200 |
commit | 32857cf57f920cdc03b5095f08febec94cf9c36b (patch) | |
tree | 1ac174d1b05d6d1a3644c6cc5defbe6c6e41345b /net/tls/tls_main.c | |
parent | net/tls: remove sock unlock/lock around strp_done() (diff) | |
download | linux-32857cf57f920cdc03b5095f08febec94cf9c36b.tar.xz linux-32857cf57f920cdc03b5095f08febec94cf9c36b.zip |
net/tls: fix transition through disconnect with close
It is possible (via shutdown()) for TCP socks to go through TCP_CLOSE
state via tcp_disconnect() without actually calling tcp_close which
would then call the tls close callback. Because of this a user could
disconnect a socket then put it in a LISTEN state which would break
our assumptions about sockets always being ESTABLISHED state.
More directly because close() can call unhash() and unhash is
implemented by sockmap if a sockmap socket has TLS enabled we can
incorrectly destroy the psock from unhash() and then call its close
handler again. But because the psock (sockmap socket representation)
is already destroyed we call close handler in sk->prot. However,
in some cases (TLS BASE/BASE case) this will still point at the
sockmap close handler resulting in a circular call and crash reported
by syzbot.
To fix both above issues implement the unhash() routine for TLS.
v4:
- add note about tls offload still needing the fix;
- move sk_proto to the cold cache line;
- split TX context free into "release" and "free",
otherwise the GC work itself is in already freed
memory;
- more TX before RX for consistency;
- reuse tls_ctx_free();
- schedule the GC work after we're done with context
to avoid UAF;
- don't set the unhash in all modes, all modes "inherit"
TLS_BASE's callbacks anyway;
- disable the unhash hook for TLS_HW.
Fixes: 3c4d7559159bf ("tls: kernel TLS support")
Reported-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: John Fastabend <john.fastabend@gmail.com>
Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Diffstat (limited to 'net/tls/tls_main.c')
-rw-r--r-- | net/tls/tls_main.c | 55 |
1 files changed, 55 insertions, 0 deletions
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index d152a00a7a27..48f1c26459d0 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c @@ -261,6 +261,33 @@ void tls_ctx_free(struct tls_context *ctx) kfree(ctx); } +static void tls_ctx_free_deferred(struct work_struct *gc) +{ + struct tls_context *ctx = container_of(gc, struct tls_context, gc); + + /* Ensure any remaining work items are completed. The sk will + * already have lost its tls_ctx reference by the time we get + * here so no xmit operation will actually be performed. + */ + if (ctx->tx_conf == TLS_SW) { + tls_sw_cancel_work_tx(ctx); + tls_sw_free_ctx_tx(ctx); + } + + if (ctx->rx_conf == TLS_SW) { + tls_sw_strparser_done(ctx); + tls_sw_free_ctx_rx(ctx); + } + + tls_ctx_free(ctx); +} + +static void tls_ctx_free_wq(struct tls_context *ctx) +{ + INIT_WORK(&ctx->gc, tls_ctx_free_deferred); + schedule_work(&ctx->gc); +} + static void tls_sk_proto_cleanup(struct sock *sk, struct tls_context *ctx, long timeo) { @@ -288,6 +315,26 @@ static void tls_sk_proto_cleanup(struct sock *sk, #endif } +static void tls_sk_proto_unhash(struct sock *sk) +{ + struct inet_connection_sock *icsk = inet_csk(sk); + long timeo = sock_sndtimeo(sk, 0); + struct tls_context *ctx; + + if (unlikely(!icsk->icsk_ulp_data)) { + if (sk->sk_prot->unhash) + sk->sk_prot->unhash(sk); + } + + ctx = tls_get_ctx(sk); + tls_sk_proto_cleanup(sk, ctx, timeo); + icsk->icsk_ulp_data = NULL; + + if (ctx->sk_proto->unhash) + ctx->sk_proto->unhash(sk); + tls_ctx_free_wq(ctx); +} + static void tls_sk_proto_close(struct sock *sk, long timeout) { void (*sk_proto_close)(struct sock *sk, long timeout); @@ -305,6 +352,7 @@ static void tls_sk_proto_close(struct sock *sk, long timeout) if (ctx->tx_conf != TLS_BASE || ctx->rx_conf != TLS_BASE) tls_sk_proto_cleanup(sk, ctx, timeo); + sk->sk_prot = ctx->sk_proto; release_sock(sk); if (ctx->tx_conf == TLS_SW) tls_sw_free_ctx_tx(ctx); @@ -608,6 +656,7 @@ static struct tls_context *create_ctx(struct sock *sk) ctx->setsockopt = sk->sk_prot->setsockopt; ctx->getsockopt = sk->sk_prot->getsockopt; ctx->sk_proto_close = sk->sk_prot->close; + ctx->unhash = sk->sk_prot->unhash; return ctx; } @@ -731,6 +780,7 @@ static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG], prot[TLS_BASE][TLS_BASE].setsockopt = tls_setsockopt; prot[TLS_BASE][TLS_BASE].getsockopt = tls_getsockopt; prot[TLS_BASE][TLS_BASE].close = tls_sk_proto_close; + prot[TLS_BASE][TLS_BASE].unhash = tls_sk_proto_unhash; prot[TLS_SW][TLS_BASE] = prot[TLS_BASE][TLS_BASE]; prot[TLS_SW][TLS_BASE].sendmsg = tls_sw_sendmsg; @@ -748,16 +798,20 @@ static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG], #ifdef CONFIG_TLS_DEVICE prot[TLS_HW][TLS_BASE] = prot[TLS_BASE][TLS_BASE]; + prot[TLS_HW][TLS_BASE].unhash = base->unhash; prot[TLS_HW][TLS_BASE].sendmsg = tls_device_sendmsg; prot[TLS_HW][TLS_BASE].sendpage = tls_device_sendpage; prot[TLS_HW][TLS_SW] = prot[TLS_BASE][TLS_SW]; + prot[TLS_HW][TLS_SW].unhash = base->unhash; prot[TLS_HW][TLS_SW].sendmsg = tls_device_sendmsg; prot[TLS_HW][TLS_SW].sendpage = tls_device_sendpage; prot[TLS_BASE][TLS_HW] = prot[TLS_BASE][TLS_SW]; + prot[TLS_BASE][TLS_HW].unhash = base->unhash; prot[TLS_SW][TLS_HW] = prot[TLS_SW][TLS_SW]; + prot[TLS_SW][TLS_HW].unhash = base->unhash; prot[TLS_HW][TLS_HW] = prot[TLS_HW][TLS_SW]; #endif @@ -794,6 +848,7 @@ static int tls_init(struct sock *sk) tls_build_proto(sk); ctx->tx_conf = TLS_BASE; ctx->rx_conf = TLS_BASE; + ctx->sk_proto = sk->sk_prot; update_sk_prot(sk, ctx); out: return rc; |