diff options
author | Chuck Lever <chuck.lever@oracle.com> | 2023-07-09 17:45:16 +0200 |
---|---|---|
committer | Chuck Lever <chuck.lever@oracle.com> | 2023-08-29 23:45:22 +0200 |
commit | 35308e7f0fc3942edc87d9c6dc78c4a096428957 (patch) | |
tree | 7602c16df0152088c0f6556b20b9ec03edcb6c83 /fs/nfsd | |
parent | SUNRPC: Remove net/sunrpc/auth_gss/gss_krb5_seqnum.c (diff) | |
download | linux-35308e7f0fc3942edc87d9c6dc78c4a096428957.tar.xz linux-35308e7f0fc3942edc87d9c6dc78c4a096428957.zip |
NFSD: Refactor nfsd_reply_cache_free_locked()
To reduce contention on the bucket locks, we must avoid calling
kfree() while each bucket lock is held.
Start by refactoring nfsd_reply_cache_free_locked() into a helper
that removes an entry from the bucket (and must therefore run under
the lock) and a second helper that frees the entry (which does not
need to hold the lock).
For readability, rename the helpers nfsd_cacherep_<verb>.
Reviewed-by: Jeff Layton <jlayton@kernel.org>
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Diffstat (limited to 'fs/nfsd')
-rw-r--r-- | fs/nfsd/nfscache.c | 27 |
1 files changed, 20 insertions, 7 deletions
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c index a8eda1c85829..3fde7f6e4ef8 100644 --- a/fs/nfsd/nfscache.c +++ b/fs/nfsd/nfscache.c @@ -110,21 +110,33 @@ nfsd_reply_cache_alloc(struct svc_rqst *rqstp, __wsum csum, return rp; } +static void nfsd_cacherep_free(struct svc_cacherep *rp) +{ + if (rp->c_type == RC_REPLBUFF) + kfree(rp->c_replvec.iov_base); + kmem_cache_free(drc_slab, rp); +} + static void -nfsd_reply_cache_free_locked(struct nfsd_drc_bucket *b, struct svc_cacherep *rp, - struct nfsd_net *nn) +nfsd_cacherep_unlink_locked(struct nfsd_net *nn, struct nfsd_drc_bucket *b, + struct svc_cacherep *rp) { - if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) { + if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) nfsd_stats_drc_mem_usage_sub(nn, rp->c_replvec.iov_len); - kfree(rp->c_replvec.iov_base); - } if (rp->c_state != RC_UNUSED) { rb_erase(&rp->c_node, &b->rb_head); list_del(&rp->c_lru); atomic_dec(&nn->num_drc_entries); nfsd_stats_drc_mem_usage_sub(nn, sizeof(*rp)); } - kmem_cache_free(drc_slab, rp); +} + +static void +nfsd_reply_cache_free_locked(struct nfsd_drc_bucket *b, struct svc_cacherep *rp, + struct nfsd_net *nn) +{ + nfsd_cacherep_unlink_locked(nn, b, rp); + nfsd_cacherep_free(rp); } static void @@ -132,8 +144,9 @@ nfsd_reply_cache_free(struct nfsd_drc_bucket *b, struct svc_cacherep *rp, struct nfsd_net *nn) { spin_lock(&b->cache_lock); - nfsd_reply_cache_free_locked(b, rp, nn); + nfsd_cacherep_unlink_locked(nn, b, rp); spin_unlock(&b->cache_lock); + nfsd_cacherep_free(rp); } int nfsd_drc_slab_create(void) |