diff options
author | Anna Schumaker <anna.schumaker@oracle.com> | 2024-09-23 21:00:07 +0200 |
---|---|---|
committer | Anna Schumaker <anna.schumaker@oracle.com> | 2024-09-23 21:00:07 +0200 |
commit | 8c04a6d6e07ce565928ea98ae8c534cac871af19 (patch) | |
tree | f01f93522af90b4dfad449a3238e2754a3efa48d /net | |
parent | Linux 6.11-rc7 (diff) | |
parent | xdrgen: Prevent reordering of encoder and decoder functions (diff) | |
download | linux-8c04a6d6e07ce565928ea98ae8c534cac871af19.tar.xz linux-8c04a6d6e07ce565928ea98ae8c534cac871af19.zip |
Merge tag 'nfsd-6.12' into linux-next-with-localio
NFSD 6.12 Release Notes
Notable features of this release include:
- Pre-requisites for automatically determining the RPC server thread
count
- Clean-up and preparation for supporting LOCALIO, which will be
merged via the NFS client tree
- Enhancements and fixes to NFSv4.2 COPY offload
- A new Python-based tool for generating kernel SunRPC XDR encoding
and decoding functions, added as an aid for prototyping features
in protocols based on the Linux kernel's SunRPC implementation.
As always I am grateful to the NFSD contributors, reviewers,
testers, and bug reporters who participated during this cycle.
Diffstat (limited to 'net')
-rw-r--r-- | net/sunrpc/sunrpc.h | 4 | ||||
-rw-r--r-- | net/sunrpc/svc.c | 130 | ||||
-rw-r--r-- | net/sunrpc/svc_xprt.c | 9 | ||||
-rw-r--r-- | net/sunrpc/svcauth.c | 1 | ||||
-rw-r--r-- | net/sunrpc/svcsock.c | 1 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma_transport.c | 18 |
6 files changed, 85 insertions, 78 deletions
diff --git a/net/sunrpc/sunrpc.h b/net/sunrpc/sunrpc.h index d4a362c9e4b3..e3c6e3b63f0b 100644 --- a/net/sunrpc/sunrpc.h +++ b/net/sunrpc/sunrpc.h @@ -36,7 +36,11 @@ static inline int sock_is_loopback(struct sock *sk) return loopback; } +struct svc_serv; +struct svc_rqst; int rpc_clients_notifier_register(void); void rpc_clients_notifier_unregister(void); void auth_domain_cleanup(void); +void svc_sock_update_bufs(struct svc_serv *serv); +enum svc_auth_status svc_authenticate(struct svc_rqst *rqstp); #endif /* _NET_SUNRPC_SUNRPC_H */ diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 88a59cfa5583..9aff845196ce 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -32,6 +32,7 @@ #include <trace/events/sunrpc.h> #include "fail.h" +#include "sunrpc.h" #define RPCDBG_FACILITY RPCDBG_SVCDSP @@ -417,7 +418,7 @@ struct svc_pool *svc_pool_for_cpu(struct svc_serv *serv) return &serv->sv_pools[pidx % serv->sv_nrpools]; } -int svc_rpcb_setup(struct svc_serv *serv, struct net *net) +static int svc_rpcb_setup(struct svc_serv *serv, struct net *net) { int err; @@ -429,7 +430,6 @@ int svc_rpcb_setup(struct svc_serv *serv, struct net *net) svc_unregister(serv, net); return 0; } -EXPORT_SYMBOL_GPL(svc_rpcb_setup); void svc_rpcb_cleanup(struct svc_serv *serv, struct net *net) { @@ -664,8 +664,21 @@ svc_release_buffer(struct svc_rqst *rqstp) put_page(rqstp->rq_pages[i]); } -struct svc_rqst * -svc_rqst_alloc(struct svc_serv *serv, struct svc_pool *pool, int node) +static void +svc_rqst_free(struct svc_rqst *rqstp) +{ + folio_batch_release(&rqstp->rq_fbatch); + svc_release_buffer(rqstp); + if (rqstp->rq_scratch_page) + put_page(rqstp->rq_scratch_page); + kfree(rqstp->rq_resp); + kfree(rqstp->rq_argp); + kfree(rqstp->rq_auth_data); + kfree_rcu(rqstp, rq_rcu_head); +} + +static struct svc_rqst * +svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node) { struct svc_rqst *rqstp; @@ -693,27 +706,10 @@ svc_rqst_alloc(struct svc_serv *serv, struct svc_pool *pool, int node) if (!svc_init_buffer(rqstp, serv->sv_max_mesg, node)) goto out_enomem; - return rqstp; -out_enomem: - svc_rqst_free(rqstp); - return NULL; -} -EXPORT_SYMBOL_GPL(svc_rqst_alloc); - -static struct svc_rqst * -svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node) -{ - struct svc_rqst *rqstp; - - rqstp = svc_rqst_alloc(serv, pool, node); - if (!rqstp) - return ERR_PTR(-ENOMEM); + rqstp->rq_err = -EAGAIN; /* No error yet */ - spin_lock_bh(&serv->sv_lock); serv->sv_nrthreads += 1; - spin_unlock_bh(&serv->sv_lock); - - atomic_inc(&pool->sp_nrthreads); + pool->sp_nrthreads += 1; /* Protected by whatever lock the service uses when calling * svc_set_num_threads() @@ -721,6 +717,10 @@ svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node) list_add_rcu(&rqstp->rq_all, &pool->sp_all_threads); return rqstp; + +out_enomem: + svc_rqst_free(rqstp); + return NULL; } /** @@ -768,31 +768,22 @@ svc_pool_victim(struct svc_serv *serv, struct svc_pool *target_pool, struct svc_pool *pool; unsigned int i; -retry: pool = target_pool; - if (pool != NULL) { - if (atomic_inc_not_zero(&pool->sp_nrthreads)) - goto found_pool; - return NULL; - } else { + if (!pool) { for (i = 0; i < serv->sv_nrpools; i++) { pool = &serv->sv_pools[--(*state) % serv->sv_nrpools]; - if (atomic_inc_not_zero(&pool->sp_nrthreads)) - goto found_pool; + if (pool->sp_nrthreads) + break; } - return NULL; } -found_pool: - set_bit(SP_VICTIM_REMAINS, &pool->sp_flags); - set_bit(SP_NEED_VICTIM, &pool->sp_flags); - if (!atomic_dec_and_test(&pool->sp_nrthreads)) + if (pool && pool->sp_nrthreads) { + set_bit(SP_VICTIM_REMAINS, &pool->sp_flags); + set_bit(SP_NEED_VICTIM, &pool->sp_flags); return pool; - /* Nothing left in this pool any more */ - clear_bit(SP_NEED_VICTIM, &pool->sp_flags); - clear_bit(SP_VICTIM_REMAINS, &pool->sp_flags); - goto retry; + } + return NULL; } static int @@ -803,6 +794,7 @@ svc_start_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) struct svc_pool *chosen_pool; unsigned int state = serv->sv_nrthreads-1; int node; + int err; do { nrservs--; @@ -810,8 +802,8 @@ svc_start_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) node = svc_pool_map_get_node(chosen_pool->sp_id); rqstp = svc_prepare_thread(serv, chosen_pool, node); - if (IS_ERR(rqstp)) - return PTR_ERR(rqstp); + if (!rqstp) + return -ENOMEM; task = kthread_create_on_node(serv->sv_threadfn, rqstp, node, "%s", serv->sv_name); if (IS_ERR(task)) { @@ -825,6 +817,13 @@ svc_start_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) svc_sock_update_bufs(serv); wake_up_process(task); + + wait_var_event(&rqstp->rq_err, rqstp->rq_err != -EAGAIN); + err = rqstp->rq_err; + if (err) { + svc_exit_thread(rqstp); + return err; + } } while (nrservs > 0); return 0; @@ -871,7 +870,7 @@ svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) if (!pool) nrservs -= serv->sv_nrthreads; else - nrservs -= atomic_read(&pool->sp_nrthreads); + nrservs -= pool->sp_nrthreads; if (nrservs > 0) return svc_start_kthreads(serv, pool, nrservs); @@ -933,25 +932,21 @@ void svc_rqst_release_pages(struct svc_rqst *rqstp) } } -/* - * Called from a server thread as it's exiting. Caller must hold the "service - * mutex" for the service. +/** + * svc_exit_thread - finalise the termination of a sunrpc server thread + * @rqstp: the svc_rqst which represents the thread. + * + * When a thread started with svc_new_thread() exits it must call + * svc_exit_thread() as its last act. This must be done with the + * service mutex held. Normally this is held by a DIFFERENT thread, the + * one that is calling svc_set_num_threads() and which will wait for + * SP_VICTIM_REMAINS to be cleared before dropping the mutex. If the + * thread exits for any reason other than svc_thread_should_stop() + * returning %true (which indicated that svc_set_num_threads() is + * waiting for it to exit), then it must take the service mutex itself, + * which can only safely be done using mutex_try_lock(). */ void -svc_rqst_free(struct svc_rqst *rqstp) -{ - folio_batch_release(&rqstp->rq_fbatch); - svc_release_buffer(rqstp); - if (rqstp->rq_scratch_page) - put_page(rqstp->rq_scratch_page); - kfree(rqstp->rq_resp); - kfree(rqstp->rq_argp); - kfree(rqstp->rq_auth_data); - kfree_rcu(rqstp, rq_rcu_head); -} -EXPORT_SYMBOL_GPL(svc_rqst_free); - -void svc_exit_thread(struct svc_rqst *rqstp) { struct svc_serv *serv = rqstp->rq_server; @@ -959,11 +954,8 @@ svc_exit_thread(struct svc_rqst *rqstp) list_del_rcu(&rqstp->rq_all); - atomic_dec(&pool->sp_nrthreads); - - spin_lock_bh(&serv->sv_lock); + pool->sp_nrthreads -= 1; serv->sv_nrthreads -= 1; - spin_unlock_bh(&serv->sv_lock); svc_sock_update_bufs(serv); svc_rqst_free(rqstp); @@ -1098,6 +1090,7 @@ static int __svc_register(struct net *net, const char *progname, return error; } +static int svc_rpcbind_set_version(struct net *net, const struct svc_program *progp, u32 version, int family, @@ -1108,7 +1101,6 @@ int svc_rpcbind_set_version(struct net *net, version, family, proto, port); } -EXPORT_SYMBOL_GPL(svc_rpcbind_set_version); int svc_generic_rpcbind_set(struct net *net, const struct svc_program *progp, @@ -1526,6 +1518,14 @@ err_system_err: goto sendit; } +/* + * Drop request + */ +static void svc_drop(struct svc_rqst *rqstp) +{ + trace_svc_drop(rqstp); +} + /** * svc_process - Execute one RPC transaction * @rqstp: RPC transaction context diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index d3735ab3e6d1..53ebc719ff5a 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -905,15 +905,6 @@ void svc_recv(struct svc_rqst *rqstp) } EXPORT_SYMBOL_GPL(svc_recv); -/* - * Drop request - */ -void svc_drop(struct svc_rqst *rqstp) -{ - trace_svc_drop(rqstp); -} -EXPORT_SYMBOL_GPL(svc_drop); - /** * svc_send - Return reply to client * @rqstp: RPC transaction context diff --git a/net/sunrpc/svcauth.c b/net/sunrpc/svcauth.c index 1619211f0960..93d9e949e265 100644 --- a/net/sunrpc/svcauth.c +++ b/net/sunrpc/svcauth.c @@ -98,7 +98,6 @@ enum svc_auth_status svc_authenticate(struct svc_rqst *rqstp) rqstp->rq_authop = aops; return aops->accept(rqstp); } -EXPORT_SYMBOL_GPL(svc_authenticate); /** * svc_set_client - Assign an appropriate 'auth_domain' as the client diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 6b3f01beb294..825ec5357691 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -1378,7 +1378,6 @@ void svc_sock_update_bufs(struct svc_serv *serv) set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags); spin_unlock_bh(&serv->sv_lock); } -EXPORT_SYMBOL_GPL(svc_sock_update_bufs); /* * Initialize socket for RPC use and create svc_sock struct diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index f15750cacacf..c3fbf0779d4a 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c @@ -339,7 +339,6 @@ static int svc_rdma_cma_handler(struct rdma_cm_id *cma_id, svc_xprt_enqueue(xprt); break; case RDMA_CM_EVENT_DISCONNECTED: - case RDMA_CM_EVENT_DEVICE_REMOVAL: svc_xprt_deferred_close(xprt); break; default: @@ -370,7 +369,7 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv, listen_id = svc_rdma_create_listen_id(net, sa, cma_xprt); if (IS_ERR(listen_id)) { kfree(cma_xprt); - return (struct svc_xprt *)listen_id; + return ERR_CAST(listen_id); } cma_xprt->sc_cm_id = listen_id; @@ -384,6 +383,16 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv, return &cma_xprt->sc_xprt; } +static void svc_rdma_xprt_done(struct rpcrdma_notification *rn) +{ + struct svcxprt_rdma *rdma = container_of(rn, struct svcxprt_rdma, + sc_rn); + struct rdma_cm_id *id = rdma->sc_cm_id; + + trace_svcrdma_device_removal(id); + svc_xprt_close(&rdma->sc_xprt); +} + /* * This is the xpo_recvfrom function for listening endpoints. Its * purpose is to accept incoming connections. The CMA callback handler @@ -425,6 +434,9 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) dev = newxprt->sc_cm_id->device; newxprt->sc_port_num = newxprt->sc_cm_id->port_num; + if (rpcrdma_rn_register(dev, &newxprt->sc_rn, svc_rdma_xprt_done)) + goto errout; + newxprt->sc_max_req_size = svcrdma_max_req_size; newxprt->sc_max_requests = svcrdma_max_requests; newxprt->sc_max_bc_requests = svcrdma_max_bc_requests; @@ -580,6 +592,7 @@ static void __svc_rdma_free(struct work_struct *work) { struct svcxprt_rdma *rdma = container_of(work, struct svcxprt_rdma, sc_work); + struct ib_device *device = rdma->sc_cm_id->device; /* This blocks until the Completion Queues are empty */ if (rdma->sc_qp && !IS_ERR(rdma->sc_qp)) @@ -608,6 +621,7 @@ static void __svc_rdma_free(struct work_struct *work) /* Destroy the CM ID */ rdma_destroy_id(rdma->sc_cm_id); + rpcrdma_rn_unregister(device, &rdma->sc_rn); kfree(rdma); } |