diff options
author | Jens Axboe <axboe@kernel.dk> | 2019-11-13 06:31:31 +0100 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2019-11-13 19:37:54 +0100 |
commit | 7d7230652e7c788ef908536fd79f4cca077f269f (patch) | |
tree | 111768c5fef3d709f65a336ba42aaefd772c1efe /fs/io-wq.c | |
parent | io_uring: check for validity of ->rings in teardown (diff) | |
download | linux-7d7230652e7c788ef908536fd79f4cca077f269f.tar.xz linux-7d7230652e7c788ef908536fd79f4cca077f269f.zip |
io_wq: add get/put_work handlers to io_wq_create()
For cancellation, we need to ensure that the work item stays valid for
as long as ->cur_work is valid. Right now we can't safely dereference
the work item even under the wqe->lock, because while the ->cur_work
pointer will remain valid, the work could be completing and be freed
in parallel.
Only invoke ->get/put_work() on items we know that the caller queued
themselves. Add IO_WQ_WORK_INTERNAL for io-wq to use, which is needed
when we're queueing a flush item, for instance.
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io-wq.c')
-rw-r--r-- | fs/io-wq.c | 25 |
1 files changed, 23 insertions, 2 deletions
diff --git a/fs/io-wq.c b/fs/io-wq.c index 33b14b85752b..26d81540c1fc 100644 --- a/fs/io-wq.c +++ b/fs/io-wq.c @@ -106,6 +106,9 @@ struct io_wq { unsigned long state; unsigned nr_wqes; + get_work_fn *get_work; + put_work_fn *put_work; + struct task_struct *manager; struct user_struct *user; struct mm_struct *mm; @@ -392,7 +395,7 @@ static struct io_wq_work *io_get_next_work(struct io_wqe *wqe, unsigned *hash) static void io_worker_handle_work(struct io_worker *worker) __releases(wqe->lock) { - struct io_wq_work *work, *old_work; + struct io_wq_work *work, *old_work = NULL, *put_work = NULL; struct io_wqe *wqe = worker->wqe; struct io_wq *wq = wqe->wq; @@ -424,6 +427,8 @@ static void io_worker_handle_work(struct io_worker *worker) wqe->flags |= IO_WQE_FLAG_STALLED; spin_unlock_irq(&wqe->lock); + if (put_work && wq->put_work) + wq->put_work(old_work); if (!work) break; next: @@ -444,6 +449,11 @@ next: if (worker->mm) work->flags |= IO_WQ_WORK_HAS_MM; + if (wq->get_work && !(work->flags & IO_WQ_WORK_INTERNAL)) { + put_work = work; + wq->get_work(work); + } + old_work = work; work->func(&work); @@ -455,6 +465,12 @@ next: } if (work && work != old_work) { spin_unlock_irq(&wqe->lock); + + if (put_work && wq->put_work) { + wq->put_work(put_work); + put_work = NULL; + } + /* dependent work not hashed */ hash = -1U; goto next; @@ -950,13 +966,15 @@ void io_wq_flush(struct io_wq *wq) init_completion(&data.done); INIT_IO_WORK(&data.work, io_wq_flush_func); + data.work.flags |= IO_WQ_WORK_INTERNAL; io_wqe_enqueue(wqe, &data.work); wait_for_completion(&data.done); } } struct io_wq *io_wq_create(unsigned bounded, struct mm_struct *mm, - struct user_struct *user) + struct user_struct *user, get_work_fn *get_work, + put_work_fn *put_work) { int ret = -ENOMEM, i, node; struct io_wq *wq; @@ -972,6 +990,9 @@ struct io_wq *io_wq_create(unsigned bounded, struct mm_struct *mm, return ERR_PTR(-ENOMEM); } + wq->get_work = get_work; + wq->put_work = put_work; + /* caller must already hold a reference to this */ wq->user = user; |