summaryrefslogtreecommitdiffstats
path: root/net/rxrpc/peer_event.c
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2020-01-30 22:50:36 +0100
committerDavid Howells <dhowells@redhat.com>2020-01-30 22:50:41 +0100
commit04d36d748fac349b068ef621611f454010054c58 (patch)
tree77b18f2ab733cda5c199ea289d3008df7726fa46 /net/rxrpc/peer_event.c
parentrxrpc: Fix insufficient receive notification generation (diff)
downloadlinux-04d36d748fac349b068ef621611f454010054c58.tar.xz
linux-04d36d748fac349b068ef621611f454010054c58.zip
rxrpc: Fix missing active use pinning of rxrpc_local object
The introduction of a split between the reference count on rxrpc_local objects and the usage count didn't quite go far enough. A number of kernel work items need to make use of the socket to perform transmission. These also need to get an active count on the local object to prevent the socket from being closed. Fix this by getting the active count in those places. Also split out the raw active count get/put functions as these places tend to hold refs on the rxrpc_local object already, so getting and putting an extra object ref is just a waste of time. The problem can lead to symptoms like: BUG: kernel NULL pointer dereference, address: 0000000000000018 .. CPU: 2 PID: 818 Comm: kworker/u9:0 Not tainted 5.5.0-fscache+ #51 ... RIP: 0010:selinux_socket_sendmsg+0x5/0x13 ... Call Trace: security_socket_sendmsg+0x2c/0x3e sock_sendmsg+0x1a/0x46 rxrpc_send_keepalive+0x131/0x1ae rxrpc_peer_keepalive_worker+0x219/0x34b process_one_work+0x18e/0x271 worker_thread+0x1a3/0x247 kthread+0xe6/0xeb ret_from_fork+0x1f/0x30 Fixes: 730c5fd42c1e ("rxrpc: Fix local endpoint refcounting") Signed-off-by: David Howells <dhowells@redhat.com>
Diffstat (limited to 'net/rxrpc/peer_event.c')
-rw-r--r--net/rxrpc/peer_event.c42
1 files changed, 23 insertions, 19 deletions
diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c
index 48f67a9b1037..923b263c401b 100644
--- a/net/rxrpc/peer_event.c
+++ b/net/rxrpc/peer_event.c
@@ -364,27 +364,31 @@ static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
if (!rxrpc_get_peer_maybe(peer))
continue;
- spin_unlock_bh(&rxnet->peer_hash_lock);
-
- keepalive_at = peer->last_tx_at + RXRPC_KEEPALIVE_TIME;
- slot = keepalive_at - base;
- _debug("%02x peer %u t=%d {%pISp}",
- cursor, peer->debug_id, slot, &peer->srx.transport);
+ if (__rxrpc_use_local(peer->local)) {
+ spin_unlock_bh(&rxnet->peer_hash_lock);
+
+ keepalive_at = peer->last_tx_at + RXRPC_KEEPALIVE_TIME;
+ slot = keepalive_at - base;
+ _debug("%02x peer %u t=%d {%pISp}",
+ cursor, peer->debug_id, slot, &peer->srx.transport);
+
+ if (keepalive_at <= base ||
+ keepalive_at > base + RXRPC_KEEPALIVE_TIME) {
+ rxrpc_send_keepalive(peer);
+ slot = RXRPC_KEEPALIVE_TIME;
+ }
- if (keepalive_at <= base ||
- keepalive_at > base + RXRPC_KEEPALIVE_TIME) {
- rxrpc_send_keepalive(peer);
- slot = RXRPC_KEEPALIVE_TIME;
+ /* A transmission to this peer occurred since last we
+ * examined it so put it into the appropriate future
+ * bucket.
+ */
+ slot += cursor;
+ slot &= mask;
+ spin_lock_bh(&rxnet->peer_hash_lock);
+ list_add_tail(&peer->keepalive_link,
+ &rxnet->peer_keepalive[slot & mask]);
+ rxrpc_unuse_local(peer->local);
}
-
- /* A transmission to this peer occurred since last we examined
- * it so put it into the appropriate future bucket.
- */
- slot += cursor;
- slot &= mask;
- spin_lock_bh(&rxnet->peer_hash_lock);
- list_add_tail(&peer->keepalive_link,
- &rxnet->peer_keepalive[slot & mask]);
rxrpc_put_peer_locked(peer);
}