summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2019-11-07 18:57:36 +0100
committerJens Axboe <axboe@kernel.dk>2019-11-07 19:57:17 +0100
commit5f8fd2d3e0a7aa7fc9d97226be24286edd289835 (patch)
tree4f2d850b8a843da6bdc86cff648a61a0b8ad8888
parentio-wq: add support for bounded vs unbunded work (diff)
downloadlinux-5f8fd2d3e0a7aa7fc9d97226be24286edd289835.tar.xz
linux-5f8fd2d3e0a7aa7fc9d97226be24286edd289835.zip
io_uring: properly mark async work as bounded vs unbounded
Now that io-wq supports separating the two request lifetime types, mark the following IO as having unbounded runtimes: - Any read/write to a non-regular file - Any specific networked IO - Any poll command Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--fs/io_uring.c16
1 files changed, 15 insertions, 1 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 831bea0fbc75..02a4f5e8a6e4 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -506,6 +506,20 @@ static inline bool io_prep_async_work(struct io_kiocb *req)
case IORING_OP_WRITEV:
case IORING_OP_WRITE_FIXED:
do_hashed = true;
+ /* fall-through */
+ case IORING_OP_READV:
+ case IORING_OP_READ_FIXED:
+ case IORING_OP_SENDMSG:
+ case IORING_OP_RECVMSG:
+ case IORING_OP_ACCEPT:
+ case IORING_OP_POLL_ADD:
+ /*
+ * We know REQ_F_ISREG is not set on some of these
+ * opcodes, but this enables us to keep the check in
+ * just one place.
+ */
+ if (!(req->flags & REQ_F_ISREG))
+ req->work.flags |= IO_WQ_WORK_UNBOUND;
break;
}
if (io_sqe_needs_user(req->submit.sqe))
@@ -3745,7 +3759,7 @@ static int io_sq_offload_start(struct io_ring_ctx *ctx,
/* Do QD, or 4 * CPUS, whatever is smallest */
concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
- ctx->io_wq = io_wq_create(concurrency, ctx->sqo_mm, NULL);
+ ctx->io_wq = io_wq_create(concurrency, ctx->sqo_mm, ctx->user);
if (IS_ERR(ctx->io_wq)) {
ret = PTR_ERR(ctx->io_wq);
ctx->io_wq = NULL;