summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-10-23 05:34:31 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2021-10-23 05:34:31 +0200
commitda4d34b669723508601a4c29daa22cdc669ee005 (patch)
tree06690a1c419a76c45c7951e4e84d0b41a4192d66 /fs
parentMerge tag 'fuse-fixes-5.15-rc7' of git://git.kernel.org/pub/scm/linux/kernel/... (diff)
parentio_uring: apply worker limits to previous users (diff)
downloadlinux-da4d34b669723508601a4c29daa22cdc669ee005.tar.xz
linux-da4d34b669723508601a4c29daa22cdc669ee005.zip
Merge tag 'io_uring-5.15-2021-10-22' of git://git.kernel.dk/linux-block
Pull io_uring fixes from Jens Axboe: "Two fixes for the max workers limit API that was introduced this series: one fix for an issue with that code, and one fixing a linked timeout regression in this series" * tag 'io_uring-5.15-2021-10-22' of git://git.kernel.dk/linux-block: io_uring: apply worker limits to previous users io_uring: fix ltimeout unprep io_uring: apply max_workers limit to all future users io-wq: max_worker fixes
Diffstat (limited to 'fs')
-rw-r--r--fs/io-wq.c7
-rw-r--r--fs/io_uring.c54
2 files changed, 47 insertions, 14 deletions
diff --git a/fs/io-wq.c b/fs/io-wq.c
index 5bf8aa81715e..422a7ed6a9bd 100644
--- a/fs/io-wq.c
+++ b/fs/io-wq.c
@@ -253,7 +253,7 @@ static bool io_wqe_create_worker(struct io_wqe *wqe, struct io_wqe_acct *acct)
pr_warn_once("io-wq is not configured for unbound workers");
raw_spin_lock(&wqe->lock);
- if (acct->nr_workers == acct->max_workers) {
+ if (acct->nr_workers >= acct->max_workers) {
raw_spin_unlock(&wqe->lock);
return true;
}
@@ -1291,15 +1291,18 @@ int io_wq_max_workers(struct io_wq *wq, int *new_count)
rcu_read_lock();
for_each_node(node) {
+ struct io_wqe *wqe = wq->wqes[node];
struct io_wqe_acct *acct;
+ raw_spin_lock(&wqe->lock);
for (i = 0; i < IO_WQ_ACCT_NR; i++) {
- acct = &wq->wqes[node]->acct[i];
+ acct = &wqe->acct[i];
prev = max_t(int, acct->max_workers, prev);
if (new_count[i])
acct->max_workers = new_count[i];
new_count[i] = prev;
}
+ raw_spin_unlock(&wqe->lock);
}
rcu_read_unlock();
return 0;
diff --git a/fs/io_uring.c b/fs/io_uring.c
index e68d27829bb2..bc18af5e0a93 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -456,6 +456,8 @@ struct io_ring_ctx {
struct work_struct exit_work;
struct list_head tctx_list;
struct completion ref_comp;
+ u32 iowq_limits[2];
+ bool iowq_limits_set;
};
};
@@ -1368,11 +1370,6 @@ static void io_req_track_inflight(struct io_kiocb *req)
}
}
-static inline void io_unprep_linked_timeout(struct io_kiocb *req)
-{
- req->flags &= ~REQ_F_LINK_TIMEOUT;
-}
-
static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req)
{
if (WARN_ON_ONCE(!req->link))
@@ -6983,7 +6980,7 @@ issue_sqe:
switch (io_arm_poll_handler(req)) {
case IO_APOLL_READY:
if (linked_timeout)
- io_unprep_linked_timeout(req);
+ io_queue_linked_timeout(linked_timeout);
goto issue_sqe;
case IO_APOLL_ABORTED:
/*
@@ -9638,7 +9635,16 @@ static int __io_uring_add_tctx_node(struct io_ring_ctx *ctx)
ret = io_uring_alloc_task_context(current, ctx);
if (unlikely(ret))
return ret;
+
tctx = current->io_uring;
+ if (ctx->iowq_limits_set) {
+ unsigned int limits[2] = { ctx->iowq_limits[0],
+ ctx->iowq_limits[1], };
+
+ ret = io_wq_max_workers(tctx->io_wq, limits);
+ if (ret)
+ return ret;
+ }
}
if (!xa_load(&tctx->xa, (unsigned long)ctx)) {
node = kmalloc(sizeof(*node), GFP_KERNEL);
@@ -10643,7 +10649,9 @@ static int io_unregister_iowq_aff(struct io_ring_ctx *ctx)
static int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
void __user *arg)
+ __must_hold(&ctx->uring_lock)
{
+ struct io_tctx_node *node;
struct io_uring_task *tctx = NULL;
struct io_sq_data *sqd = NULL;
__u32 new_count[2];
@@ -10674,13 +10682,19 @@ static int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
tctx = current->io_uring;
}
- ret = -EINVAL;
- if (!tctx || !tctx->io_wq)
- goto err;
+ BUILD_BUG_ON(sizeof(new_count) != sizeof(ctx->iowq_limits));
- ret = io_wq_max_workers(tctx->io_wq, new_count);
- if (ret)
- goto err;
+ memcpy(ctx->iowq_limits, new_count, sizeof(new_count));
+ ctx->iowq_limits_set = true;
+
+ ret = -EINVAL;
+ if (tctx && tctx->io_wq) {
+ ret = io_wq_max_workers(tctx->io_wq, new_count);
+ if (ret)
+ goto err;
+ } else {
+ memset(new_count, 0, sizeof(new_count));
+ }
if (sqd) {
mutex_unlock(&sqd->lock);
@@ -10690,6 +10704,22 @@ static int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
if (copy_to_user(arg, new_count, sizeof(new_count)))
return -EFAULT;
+ /* that's it for SQPOLL, only the SQPOLL task creates requests */
+ if (sqd)
+ return 0;
+
+ /* now propagate the restriction to all registered users */
+ list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
+ struct io_uring_task *tctx = node->task->io_uring;
+
+ if (WARN_ON_ONCE(!tctx->io_wq))
+ continue;
+
+ for (i = 0; i < ARRAY_SIZE(new_count); i++)
+ new_count[i] = ctx->iowq_limits[i];
+ /* ignore errors, it always returns zero anyway */
+ (void)io_wq_max_workers(tctx->io_wq, new_count);
+ }
return 0;
err:
if (sqd) {