diff options
author | Jens Axboe <axboe@kernel.dk> | 2019-03-14 23:30:06 +0100 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2019-03-18 17:44:44 +0100 |
commit | fd6fab2cb78d3b6023c26ec53e0aa6f0b477d2f7 (patch) | |
tree | 6f30a949748bf44ad926b9331cfa27556e9715cd /fs | |
parent | io_uring: fix poll races (diff) | |
download | linux-fd6fab2cb78d3b6023c26ec53e0aa6f0b477d2f7.tar.xz linux-fd6fab2cb78d3b6023c26ec53e0aa6f0b477d2f7.zip |
io_uring: retry bulk slab allocs as single allocs
I've seen cases where bulk alloc fails, since the bulk alloc API
is all-or-nothing - either we get the number we ask for, or it
returns 0 as number of entries.
If we fail a batch bulk alloc, retry a "normal" kmem_cache_alloc()
and just use that instead of failing with -EAGAIN.
While in there, ensure we use GFP_KERNEL. That was an oversight in
the original code, when we switched away from GFP_ATOMIC.
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/io_uring.c | 19 |
1 files changed, 14 insertions, 5 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c index 12bb238aed6b..4c6a5e60ddbe 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -399,13 +399,14 @@ static void io_ring_drop_ctx_refs(struct io_ring_ctx *ctx, unsigned refs) static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx, struct io_submit_state *state) { + gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; struct io_kiocb *req; if (!percpu_ref_tryget(&ctx->refs)) return NULL; if (!state) { - req = kmem_cache_alloc(req_cachep, __GFP_NOWARN); + req = kmem_cache_alloc(req_cachep, gfp); if (unlikely(!req)) goto out; } else if (!state->free_reqs) { @@ -413,10 +414,18 @@ static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx, int ret; sz = min_t(size_t, state->ios_left, ARRAY_SIZE(state->reqs)); - ret = kmem_cache_alloc_bulk(req_cachep, __GFP_NOWARN, sz, - state->reqs); - if (unlikely(ret <= 0)) - goto out; + ret = kmem_cache_alloc_bulk(req_cachep, gfp, sz, state->reqs); + + /* + * Bulk alloc is all-or-nothing. If we fail to get a batch, + * retry single alloc to be on the safe side. + */ + if (unlikely(ret <= 0)) { + state->reqs[0] = kmem_cache_alloc(req_cachep, gfp); + if (!state->reqs[0]) + goto out; + ret = 1; + } state->free_reqs = ret - 1; state->cur_req = 1; req = state->reqs[0]; |