diff options
author | Christian Schoenebeck <linux_oss@crudebyte.com> | 2022-07-15 23:33:56 +0200 |
---|---|---|
committer | Dominique Martinet <asmadeus@codewreck.org> | 2022-10-05 00:05:41 +0200 |
commit | 60ece0833b6c2bc1465eb2803fec20b670e2ee93 (patch) | |
tree | 544ee183389b5867645c4bd9f3183b865a0f745e /net/9p/client.c | |
parent | net/9p: add 'pooled_rbuffers' flag to struct p9_trans_module (diff) | |
download | linux-60ece0833b6c2bc1465eb2803fec20b670e2ee93.tar.xz linux-60ece0833b6c2bc1465eb2803fec20b670e2ee93.zip |
net/9p: allocate appropriate reduced message buffers
So far 'msize' was simply used for all 9p message types, which is far
too much and slowed down performance tremendously with large values
for user configurable 'msize' option.
Let's stop this waste by using the new p9_msg_buf_size() function for
allocating more appropriate, smaller buffers according to what is
actually sent over the wire.
Only exception: RDMA transport is currently excluded from this message
size optimization - for its response buffers that is - as RDMA transport
would not cope with it, due to its response buffers being pulled from a
shared pool. [1]
Link: https://lore.kernel.org/all/Ys3jjg52EIyITPua@codewreck.org/ [1]
Link: https://lkml.kernel.org/r/3f51590535dc96ed0a165b8218c57639cfa5c36c.1657920926.git.linux_oss@crudebyte.com
Signed-off-by: Christian Schoenebeck <linux_oss@crudebyte.com>
Signed-off-by: Dominique Martinet <asmadeus@codewreck.org>
Diffstat (limited to 'net/9p/client.c')
-rw-r--r-- | net/9p/client.c | 42 |
1 files changed, 35 insertions, 7 deletions
diff --git a/net/9p/client.c b/net/9p/client.c index 0bd7e43e5c4f..aaa37b07e30a 100644 --- a/net/9p/client.c +++ b/net/9p/client.c @@ -255,19 +255,35 @@ static struct kmem_cache *p9_req_cache; * p9_tag_alloc - Allocate a new request. * @c: Client session. * @type: Transaction type. - * @t_size: Buffer size for holding this request. - * @r_size: Buffer size for holding server's reply on this request. + * @t_size: Buffer size for holding this request + * (automatic calculation by format template if 0). + * @r_size: Buffer size for holding server's reply on this request + * (automatic calculation by format template if 0). + * @fmt: Format template for assembling 9p request message + * (see p9pdu_vwritef). + * @ap: Variable arguments to be fed to passed format template + * (see p9pdu_vwritef). * * Context: Process context. * Return: Pointer to new request. */ static struct p9_req_t * -p9_tag_alloc(struct p9_client *c, int8_t type, uint t_size, uint r_size) +p9_tag_alloc(struct p9_client *c, int8_t type, uint t_size, uint r_size, + const char *fmt, va_list ap) { struct p9_req_t *req = kmem_cache_alloc(p9_req_cache, GFP_NOFS); - int alloc_tsize = min(c->msize, t_size); - int alloc_rsize = min(c->msize, r_size); + int alloc_tsize; + int alloc_rsize; int tag; + va_list apc; + + va_copy(apc, ap); + alloc_tsize = min_t(size_t, c->msize, + t_size ?: p9_msg_buf_size(c, type, fmt, apc)); + va_end(apc); + + alloc_rsize = min_t(size_t, c->msize, + r_size ?: p9_msg_buf_size(c, type + 1, fmt, ap)); if (!req) return ERR_PTR(-ENOMEM); @@ -599,6 +615,7 @@ static struct p9_req_t *p9_client_prepare_req(struct p9_client *c, { int err; struct p9_req_t *req; + va_list apc; p9_debug(P9_DEBUG_MUX, "client %p op %d\n", c, type); @@ -610,7 +627,9 @@ static struct p9_req_t *p9_client_prepare_req(struct p9_client *c, if (c->status == BeginDisconnect && type != P9_TCLUNK) return ERR_PTR(-EIO); - req = p9_tag_alloc(c, type, t_size, r_size); + va_copy(apc, ap); + req = p9_tag_alloc(c, type, t_size, r_size, fmt, apc); + va_end(apc); if (IS_ERR(req)) return req; @@ -645,9 +664,18 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...) int sigpending, err; unsigned long flags; struct p9_req_t *req; + /* Passing zero for tsize/rsize to p9_client_prepare_req() tells it to + * auto determine an appropriate (small) request/response size + * according to actual message data being sent. Currently RDMA + * transport is excluded from this response message size optimization, + * as it would not cope with it, due to its pooled response buffers + * (using an optimized request size for RDMA as well though). + */ + const uint tsize = 0; + const uint rsize = c->trans_mod->pooled_rbuffers ? c->msize : 0; va_start(ap, fmt); - req = p9_client_prepare_req(c, type, c->msize, c->msize, fmt, ap); + req = p9_client_prepare_req(c, type, tsize, rsize, fmt, ap); va_end(ap); if (IS_ERR(req)) return req; |