diff options
author | David S. Miller <davem@davemloft.net> | 2020-10-06 15:18:20 +0200 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2020-10-06 15:18:20 +0200 |
commit | d91dc434f2baa592e9793597421231174d57bbbf (patch) | |
tree | 2c06c95ba2001e527d9823ba8bfc03a313bfa2b1 /net | |
parent | tcp: fix receive window update in tcp_add_backlog() (diff) | |
parent | rxrpc: Fix server keyring leak (diff) | |
download | linux-d91dc434f2baa592e9793597421231174d57bbbf.tar.xz linux-d91dc434f2baa592e9793597421231174d57bbbf.zip |
Merge tag 'rxrpc-fixes-20201005' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs
David Howells says:
====================
rxrpc: Miscellaneous fixes
Here are some miscellaneous rxrpc fixes:
(1) Fix the xdr encoding of the contents read from an rxrpc key.
(2) Fix a BUG() for a unsupported encoding type.
(3) Fix missing _bh lock annotations.
(4) Fix acceptance handling for an incoming call where the incoming call
is encrypted.
(5) The server token keyring isn't network namespaced - it belongs to the
server, so there's no need. Namespacing it means that request_key()
fails to find it.
(6) Fix a leak of the server keyring.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/rxrpc/ar-internal.h | 7 | ||||
-rw-r--r-- | net/rxrpc/call_accept.c | 263 | ||||
-rw-r--r-- | net/rxrpc/call_object.c | 5 | ||||
-rw-r--r-- | net/rxrpc/conn_event.c | 8 | ||||
-rw-r--r-- | net/rxrpc/key.c | 20 | ||||
-rw-r--r-- | net/rxrpc/recvmsg.c | 36 | ||||
-rw-r--r-- | net/rxrpc/sendmsg.c | 15 |
7 files changed, 65 insertions, 289 deletions
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index 884cff7bb169..97aebb5d19db 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -518,7 +518,6 @@ enum rxrpc_call_state { RXRPC_CALL_CLIENT_RECV_REPLY, /* - client receiving reply phase */ RXRPC_CALL_SERVER_PREALLOC, /* - service preallocation */ RXRPC_CALL_SERVER_SECURING, /* - server securing request connection */ - RXRPC_CALL_SERVER_ACCEPTING, /* - server accepting request */ RXRPC_CALL_SERVER_RECV_REQUEST, /* - server receiving request */ RXRPC_CALL_SERVER_ACK_REQUEST, /* - server pending ACK of request */ RXRPC_CALL_SERVER_SEND_REPLY, /* - server sending reply */ @@ -714,8 +713,8 @@ struct rxrpc_ack_summary { enum rxrpc_command { RXRPC_CMD_SEND_DATA, /* send data message */ RXRPC_CMD_SEND_ABORT, /* request abort generation */ - RXRPC_CMD_ACCEPT, /* [server] accept incoming call */ RXRPC_CMD_REJECT_BUSY, /* [server] reject a call as busy */ + RXRPC_CMD_CHARGE_ACCEPT, /* [server] charge accept preallocation */ }; struct rxrpc_call_params { @@ -755,9 +754,7 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *, struct rxrpc_sock *, struct sk_buff *); void rxrpc_accept_incoming_calls(struct rxrpc_local *); -struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *, unsigned long, - rxrpc_notify_rx_t); -int rxrpc_reject_call(struct rxrpc_sock *); +int rxrpc_user_charge_accept(struct rxrpc_sock *, unsigned long); /* * call_event.c diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c index ef160566aa9a..8df1964db333 100644 --- a/net/rxrpc/call_accept.c +++ b/net/rxrpc/call_accept.c @@ -39,8 +39,9 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx, unsigned int debug_id) { const void *here = __builtin_return_address(0); - struct rxrpc_call *call; + struct rxrpc_call *call, *xcall; struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk)); + struct rb_node *parent, **pp; int max, tmp; unsigned int size = RXRPC_BACKLOG_MAX; unsigned int head, tail, call_head, call_tail; @@ -94,7 +95,7 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx, } /* Now it gets complicated, because calls get registered with the - * socket here, particularly if a user ID is preassigned by the user. + * socket here, with a user ID preassigned by the user. */ call = rxrpc_alloc_call(rx, gfp, debug_id); if (!call) @@ -107,34 +108,33 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx, here, (const void *)user_call_ID); write_lock(&rx->call_lock); - if (user_attach_call) { - struct rxrpc_call *xcall; - struct rb_node *parent, **pp; - - /* Check the user ID isn't already in use */ - pp = &rx->calls.rb_node; - parent = NULL; - while (*pp) { - parent = *pp; - xcall = rb_entry(parent, struct rxrpc_call, sock_node); - if (user_call_ID < xcall->user_call_ID) - pp = &(*pp)->rb_left; - else if (user_call_ID > xcall->user_call_ID) - pp = &(*pp)->rb_right; - else - goto id_in_use; - } - call->user_call_ID = user_call_ID; - call->notify_rx = notify_rx; + /* Check the user ID isn't already in use */ + pp = &rx->calls.rb_node; + parent = NULL; + while (*pp) { + parent = *pp; + xcall = rb_entry(parent, struct rxrpc_call, sock_node); + if (user_call_ID < xcall->user_call_ID) + pp = &(*pp)->rb_left; + else if (user_call_ID > xcall->user_call_ID) + pp = &(*pp)->rb_right; + else + goto id_in_use; + } + + call->user_call_ID = user_call_ID; + call->notify_rx = notify_rx; + if (user_attach_call) { rxrpc_get_call(call, rxrpc_call_got_kernel); user_attach_call(call, user_call_ID); - rxrpc_get_call(call, rxrpc_call_got_userid); - rb_link_node(&call->sock_node, parent, pp); - rb_insert_color(&call->sock_node, &rx->calls); - set_bit(RXRPC_CALL_HAS_USERID, &call->flags); } + rxrpc_get_call(call, rxrpc_call_got_userid); + rb_link_node(&call->sock_node, parent, pp); + rb_insert_color(&call->sock_node, &rx->calls); + set_bit(RXRPC_CALL_HAS_USERID, &call->flags); + list_add(&call->sock_link, &rx->sock_calls); write_unlock(&rx->call_lock); @@ -157,11 +157,8 @@ id_in_use: } /* - * Preallocate sufficient service connections, calls and peers to cover the - * entire backlog of a socket. When a new call comes in, if we don't have - * sufficient of each available, the call gets rejected as busy or ignored. - * - * The backlog is replenished when a connection is accepted or rejected. + * Allocate the preallocation buffers for incoming service calls. These must + * be charged manually. */ int rxrpc_service_prealloc(struct rxrpc_sock *rx, gfp_t gfp) { @@ -174,13 +171,6 @@ int rxrpc_service_prealloc(struct rxrpc_sock *rx, gfp_t gfp) rx->backlog = b; } - if (rx->discard_new_call) - return 0; - - while (rxrpc_service_prealloc_one(rx, b, NULL, NULL, 0, gfp, - atomic_inc_return(&rxrpc_debug_id)) == 0) - ; - return 0; } @@ -333,6 +323,7 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx, rxrpc_see_call(call); call->conn = conn; call->security = conn->security; + call->security_ix = conn->security_ix; call->peer = rxrpc_get_peer(conn->params.peer); call->cong_cwnd = call->peer->cong_cwnd; return call; @@ -402,8 +393,6 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local, if (rx->notify_new_call) rx->notify_new_call(&rx->sk, call, call->user_call_ID); - else - sk_acceptq_added(&rx->sk); spin_lock(&conn->state_lock); switch (conn->state) { @@ -415,12 +404,8 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local, case RXRPC_CONN_SERVICE: write_lock(&call->state_lock); - if (call->state < RXRPC_CALL_COMPLETE) { - if (rx->discard_new_call) - call->state = RXRPC_CALL_SERVER_RECV_REQUEST; - else - call->state = RXRPC_CALL_SERVER_ACCEPTING; - } + if (call->state < RXRPC_CALL_COMPLETE) + call->state = RXRPC_CALL_SERVER_RECV_REQUEST; write_unlock(&call->state_lock); break; @@ -440,9 +425,6 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local, rxrpc_send_ping(call, skb); - if (call->state == RXRPC_CALL_SERVER_ACCEPTING) - rxrpc_notify_socket(call); - /* We have to discard the prealloc queue's ref here and rely on a * combination of the RCU read lock and refs held either by the socket * (recvmsg queue, to-be-accepted queue or user ID tree) or the kernel @@ -460,187 +442,18 @@ no_call: } /* - * handle acceptance of a call by userspace - * - assign the user call ID to the call at the front of the queue - * - called with the socket locked. + * Charge up socket with preallocated calls, attaching user call IDs. */ -struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx, - unsigned long user_call_ID, - rxrpc_notify_rx_t notify_rx) - __releases(&rx->sk.sk_lock.slock) - __acquires(call->user_mutex) +int rxrpc_user_charge_accept(struct rxrpc_sock *rx, unsigned long user_call_ID) { - struct rxrpc_call *call; - struct rb_node *parent, **pp; - int ret; - - _enter(",%lx", user_call_ID); - - ASSERT(!irqs_disabled()); - - write_lock(&rx->call_lock); - - if (list_empty(&rx->to_be_accepted)) { - write_unlock(&rx->call_lock); - release_sock(&rx->sk); - kleave(" = -ENODATA [empty]"); - return ERR_PTR(-ENODATA); - } - - /* check the user ID isn't already in use */ - pp = &rx->calls.rb_node; - parent = NULL; - while (*pp) { - parent = *pp; - call = rb_entry(parent, struct rxrpc_call, sock_node); - - if (user_call_ID < call->user_call_ID) - pp = &(*pp)->rb_left; - else if (user_call_ID > call->user_call_ID) - pp = &(*pp)->rb_right; - else - goto id_in_use; - } - - /* Dequeue the first call and check it's still valid. We gain - * responsibility for the queue's reference. - */ - call = list_entry(rx->to_be_accepted.next, - struct rxrpc_call, accept_link); - write_unlock(&rx->call_lock); - - /* We need to gain the mutex from the interrupt handler without - * upsetting lockdep, so we have to release it there and take it here. - * We are, however, still holding the socket lock, so other accepts - * must wait for us and no one can add the user ID behind our backs. - */ - if (mutex_lock_interruptible(&call->user_mutex) < 0) { - release_sock(&rx->sk); - kleave(" = -ERESTARTSYS"); - return ERR_PTR(-ERESTARTSYS); - } - - write_lock(&rx->call_lock); - list_del_init(&call->accept_link); - sk_acceptq_removed(&rx->sk); - rxrpc_see_call(call); - - /* Find the user ID insertion point. */ - pp = &rx->calls.rb_node; - parent = NULL; - while (*pp) { - parent = *pp; - call = rb_entry(parent, struct rxrpc_call, sock_node); - - if (user_call_ID < call->user_call_ID) - pp = &(*pp)->rb_left; - else if (user_call_ID > call->user_call_ID) - pp = &(*pp)->rb_right; - else - BUG(); - } - - write_lock_bh(&call->state_lock); - switch (call->state) { - case RXRPC_CALL_SERVER_ACCEPTING: - call->state = RXRPC_CALL_SERVER_RECV_REQUEST; - break; - case RXRPC_CALL_COMPLETE: - ret = call->error; - goto out_release; - default: - BUG(); - } - - /* formalise the acceptance */ - call->notify_rx = notify_rx; - call->user_call_ID = user_call_ID; - rxrpc_get_call(call, rxrpc_call_got_userid); - rb_link_node(&call->sock_node, parent, pp); - rb_insert_color(&call->sock_node, &rx->calls); - if (test_and_set_bit(RXRPC_CALL_HAS_USERID, &call->flags)) - BUG(); - - write_unlock_bh(&call->state_lock); - write_unlock(&rx->call_lock); - rxrpc_notify_socket(call); - rxrpc_service_prealloc(rx, GFP_KERNEL); - release_sock(&rx->sk); - _leave(" = %p{%d}", call, call->debug_id); - return call; - -out_release: - _debug("release %p", call); - write_unlock_bh(&call->state_lock); - write_unlock(&rx->call_lock); - rxrpc_release_call(rx, call); - rxrpc_put_call(call, rxrpc_call_put); - goto out; - -id_in_use: - ret = -EBADSLT; - write_unlock(&rx->call_lock); -out: - rxrpc_service_prealloc(rx, GFP_KERNEL); - release_sock(&rx->sk); - _leave(" = %d", ret); - return ERR_PTR(ret); -} - -/* - * Handle rejection of a call by userspace - * - reject the call at the front of the queue - */ -int rxrpc_reject_call(struct rxrpc_sock *rx) -{ - struct rxrpc_call *call; - bool abort = false; - int ret; - - _enter(""); - - ASSERT(!irqs_disabled()); - - write_lock(&rx->call_lock); - - if (list_empty(&rx->to_be_accepted)) { - write_unlock(&rx->call_lock); - return -ENODATA; - } - - /* Dequeue the first call and check it's still valid. We gain - * responsibility for the queue's reference. - */ - call = list_entry(rx->to_be_accepted.next, - struct rxrpc_call, accept_link); - list_del_init(&call->accept_link); - sk_acceptq_removed(&rx->sk); - rxrpc_see_call(call); + struct rxrpc_backlog *b = rx->backlog; - write_lock_bh(&call->state_lock); - switch (call->state) { - case RXRPC_CALL_SERVER_ACCEPTING: - __rxrpc_abort_call("REJ", call, 1, RX_USER_ABORT, -ECONNABORTED); - abort = true; - fallthrough; - case RXRPC_CALL_COMPLETE: - ret = call->error; - goto out_discard; - default: - BUG(); - } + if (rx->sk.sk_state == RXRPC_CLOSE) + return -ESHUTDOWN; -out_discard: - write_unlock_bh(&call->state_lock); - write_unlock(&rx->call_lock); - if (abort) { - rxrpc_send_abort_packet(call); - rxrpc_release_call(rx, call); - rxrpc_put_call(call, rxrpc_call_put); - } - rxrpc_service_prealloc(rx, GFP_KERNEL); - _leave(" = %d", ret); - return ret; + return rxrpc_service_prealloc_one(rx, b, NULL, NULL, user_call_ID, + GFP_KERNEL, + atomic_inc_return(&rxrpc_debug_id)); } /* diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c index a40fae013942..ed49769b459d 100644 --- a/net/rxrpc/call_object.c +++ b/net/rxrpc/call_object.c @@ -23,7 +23,6 @@ const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = { [RXRPC_CALL_CLIENT_RECV_REPLY] = "ClRcvRpl", [RXRPC_CALL_SERVER_PREALLOC] = "SvPrealc", [RXRPC_CALL_SERVER_SECURING] = "SvSecure", - [RXRPC_CALL_SERVER_ACCEPTING] = "SvAccept", [RXRPC_CALL_SERVER_RECV_REQUEST] = "SvRcvReq", [RXRPC_CALL_SERVER_ACK_REQUEST] = "SvAckReq", [RXRPC_CALL_SERVER_SEND_REPLY] = "SvSndRpl", @@ -352,9 +351,7 @@ void rxrpc_incoming_call(struct rxrpc_sock *rx, call->call_id = sp->hdr.callNumber; call->service_id = sp->hdr.serviceId; call->cid = sp->hdr.cid; - call->state = RXRPC_CALL_SERVER_ACCEPTING; - if (sp->hdr.securityIndex > 0) - call->state = RXRPC_CALL_SERVER_SECURING; + call->state = RXRPC_CALL_SERVER_SECURING; call->cong_tstamp = skb->tstamp; /* Set the channel for this call. We don't get channel_lock as we're diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c index 447f55ca6886..64ace2960ecc 100644 --- a/net/rxrpc/conn_event.c +++ b/net/rxrpc/conn_event.c @@ -269,7 +269,7 @@ static void rxrpc_call_is_secure(struct rxrpc_call *call) if (call) { write_lock_bh(&call->state_lock); if (call->state == RXRPC_CALL_SERVER_SECURING) { - call->state = RXRPC_CALL_SERVER_ACCEPTING; + call->state = RXRPC_CALL_SERVER_RECV_REQUEST; rxrpc_notify_socket(call); } write_unlock_bh(&call->state_lock); @@ -340,18 +340,18 @@ static int rxrpc_process_event(struct rxrpc_connection *conn, return ret; spin_lock(&conn->channel_lock); - spin_lock(&conn->state_lock); + spin_lock_bh(&conn->state_lock); if (conn->state == RXRPC_CONN_SERVICE_CHALLENGING) { conn->state = RXRPC_CONN_SERVICE; - spin_unlock(&conn->state_lock); + spin_unlock_bh(&conn->state_lock); for (loop = 0; loop < RXRPC_MAXCALLS; loop++) rxrpc_call_is_secure( rcu_dereference_protected( conn->channels[loop].call, lockdep_is_held(&conn->channel_lock))); } else { - spin_unlock(&conn->state_lock); + spin_unlock_bh(&conn->state_lock); } spin_unlock(&conn->channel_lock); diff --git a/net/rxrpc/key.c b/net/rxrpc/key.c index 94c3df392651..2e8bd3b97301 100644 --- a/net/rxrpc/key.c +++ b/net/rxrpc/key.c @@ -903,7 +903,7 @@ int rxrpc_request_key(struct rxrpc_sock *rx, sockptr_t optval, int optlen) _enter(""); - if (optlen <= 0 || optlen > PAGE_SIZE - 1) + if (optlen <= 0 || optlen > PAGE_SIZE - 1 || rx->securities) return -EINVAL; description = memdup_sockptr_nul(optval, optlen); @@ -940,7 +940,7 @@ int rxrpc_server_keyring(struct rxrpc_sock *rx, sockptr_t optval, int optlen) if (IS_ERR(description)) return PTR_ERR(description); - key = request_key_net(&key_type_keyring, description, sock_net(&rx->sk), NULL); + key = request_key(&key_type_keyring, description, NULL); if (IS_ERR(key)) { kfree(description); _leave(" = %ld", PTR_ERR(key)); @@ -1072,7 +1072,7 @@ static long rxrpc_read(const struct key *key, switch (token->security_index) { case RXRPC_SECURITY_RXKAD: - toksize += 9 * 4; /* viceid, kvno, key*2 + len, begin, + toksize += 8 * 4; /* viceid, kvno, key*2, begin, * end, primary, tktlen */ toksize += RND(token->kad->ticket_len); break; @@ -1107,7 +1107,8 @@ static long rxrpc_read(const struct key *key, break; default: /* we have a ticket we can't encode */ - BUG(); + pr_err("Unsupported key token type (%u)\n", + token->security_index); continue; } @@ -1138,6 +1139,14 @@ static long rxrpc_read(const struct key *key, memcpy((u8 *)xdr + _l, &zero, 4 - (_l & 3)); \ xdr += (_l + 3) >> 2; \ } while(0) +#define ENCODE_BYTES(l, s) \ + do { \ + u32 _l = (l); \ + memcpy(xdr, (s), _l); \ + if (_l & 3) \ + memcpy((u8 *)xdr + _l, &zero, 4 - (_l & 3)); \ + xdr += (_l + 3) >> 2; \ + } while(0) #define ENCODE64(x) \ do { \ __be64 y = cpu_to_be64(x); \ @@ -1165,7 +1174,7 @@ static long rxrpc_read(const struct key *key, case RXRPC_SECURITY_RXKAD: ENCODE(token->kad->vice_id); ENCODE(token->kad->kvno); - ENCODE_DATA(8, token->kad->session_key); + ENCODE_BYTES(8, token->kad->session_key); ENCODE(token->kad->start); ENCODE(token->kad->expiry); ENCODE(token->kad->primary_flag); @@ -1215,7 +1224,6 @@ static long rxrpc_read(const struct key *key, break; default: - BUG(); break; } diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c index c4684dde1f16..2c842851d72e 100644 --- a/net/rxrpc/recvmsg.c +++ b/net/rxrpc/recvmsg.c @@ -179,37 +179,6 @@ static int rxrpc_recvmsg_term(struct rxrpc_call *call, struct msghdr *msg) } /* - * Pass back notification of a new call. The call is added to the - * to-be-accepted list. This means that the next call to be accepted might not - * be the last call seen awaiting acceptance, but unless we leave this on the - * front of the queue and block all other messages until someone gives us a - * user_ID for it, there's not a lot we can do. - */ -static int rxrpc_recvmsg_new_call(struct rxrpc_sock *rx, - struct rxrpc_call *call, - struct msghdr *msg, int flags) -{ - int tmp = 0, ret; - - ret = put_cmsg(msg, SOL_RXRPC, RXRPC_NEW_CALL, 0, &tmp); - - if (ret == 0 && !(flags & MSG_PEEK)) { - _debug("to be accepted"); - write_lock_bh(&rx->recvmsg_lock); - list_del_init(&call->recvmsg_link); - write_unlock_bh(&rx->recvmsg_lock); - - rxrpc_get_call(call, rxrpc_call_got); - write_lock(&rx->call_lock); - list_add_tail(&call->accept_link, &rx->to_be_accepted); - write_unlock(&rx->call_lock); - } - - trace_rxrpc_recvmsg(call, rxrpc_recvmsg_to_be_accepted, 1, 0, 0, ret); - return ret; -} - -/* * End the packet reception phase. */ static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial) @@ -630,9 +599,6 @@ try_again: } switch (READ_ONCE(call->state)) { - case RXRPC_CALL_SERVER_ACCEPTING: - ret = rxrpc_recvmsg_new_call(rx, call, msg, flags); - break; case RXRPC_CALL_CLIENT_RECV_REPLY: case RXRPC_CALL_SERVER_RECV_REQUEST: case RXRPC_CALL_SERVER_ACK_REQUEST: @@ -728,7 +694,7 @@ int rxrpc_kernel_recv_data(struct socket *sock, struct rxrpc_call *call, call->debug_id, rxrpc_call_states[call->state], iov_iter_count(iter), want_more); - ASSERTCMP(call->state, !=, RXRPC_CALL_SERVER_ACCEPTING); + ASSERTCMP(call->state, !=, RXRPC_CALL_SERVER_SECURING); mutex_lock(&call->user_mutex); diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c index 0824e103d037..d27140c836cc 100644 --- a/net/rxrpc/sendmsg.c +++ b/net/rxrpc/sendmsg.c @@ -530,10 +530,10 @@ static int rxrpc_sendmsg_cmsg(struct msghdr *msg, struct rxrpc_send_params *p) return -EINVAL; break; - case RXRPC_ACCEPT: + case RXRPC_CHARGE_ACCEPT: if (p->command != RXRPC_CMD_SEND_DATA) return -EINVAL; - p->command = RXRPC_CMD_ACCEPT; + p->command = RXRPC_CMD_CHARGE_ACCEPT; if (len != 0) return -EINVAL; break; @@ -659,16 +659,12 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) if (ret < 0) goto error_release_sock; - if (p.command == RXRPC_CMD_ACCEPT) { + if (p.command == RXRPC_CMD_CHARGE_ACCEPT) { ret = -EINVAL; if (rx->sk.sk_state != RXRPC_SERVER_LISTENING) goto error_release_sock; - call = rxrpc_accept_call(rx, p.call.user_call_ID, NULL); - /* The socket is now unlocked. */ - if (IS_ERR(call)) - return PTR_ERR(call); - ret = 0; - goto out_put_unlock; + ret = rxrpc_user_charge_accept(rx, p.call.user_call_ID); + goto error_release_sock; } call = rxrpc_find_call_by_user_ID(rx, p.call.user_call_ID); @@ -690,7 +686,6 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) case RXRPC_CALL_CLIENT_AWAIT_CONN: case RXRPC_CALL_SERVER_PREALLOC: case RXRPC_CALL_SERVER_SECURING: - case RXRPC_CALL_SERVER_ACCEPTING: rxrpc_put_call(call, rxrpc_call_put); ret = -EBUSY; goto error_release_sock; |