diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2021-01-16 20:12:02 +0100 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-01-16 20:12:02 +0100 |
commit | 11c0239ae26450709d37e0d7f658aa0875047229 (patch) | |
tree | 103743e0b8974dd3c43c876643e809bf7bb973b7 /fs | |
parent | Merge tag 'riscv-for-linus-5.11-rc4' of git://git.kernel.org/pub/scm/linux/ke... (diff) | |
parent | io_uring: ensure finish_wait() is always called in __io_uring_task_cancel() (diff) | |
download | linux-11c0239ae26450709d37e0d7f658aa0875047229.tar.xz linux-11c0239ae26450709d37e0d7f658aa0875047229.zip |
Merge tag 'io_uring-5.11-2021-01-16' of git://git.kernel.dk/linux-block
Pull io_uring fixes from Jens Axboe:
"We still have a pending fix for a cancelation issue, but it's still
being investigated. In the meantime:
- Dead mm handling fix (Pavel)
- SQPOLL setup error handling (Pavel)
- Flush timeout sequence fix (Marcelo)
- Missing finish_wait() for one exit case"
* tag 'io_uring-5.11-2021-01-16' of git://git.kernel.dk/linux-block:
io_uring: ensure finish_wait() is always called in __io_uring_task_cancel()
io_uring: flush timeouts that should already have expired
io_uring: do sqo disable on install_fd error
io_uring: fix null-deref in io_disable_sqo_submit
io_uring: don't take files/mm for a dead task
io_uring: drop mm and files after task_work_run
Diffstat (limited to 'fs')
-rw-r--r-- | fs/io_uring.c | 46 |
1 files changed, 41 insertions, 5 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c index 2f305c097bd5..985a9e3f976d 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -354,6 +354,7 @@ struct io_ring_ctx { unsigned cq_entries; unsigned cq_mask; atomic_t cq_timeouts; + unsigned cq_last_tm_flush; unsigned long cq_check_overflow; struct wait_queue_head cq_wait; struct fasync_struct *cq_fasync; @@ -1106,6 +1107,9 @@ static void io_sq_thread_drop_mm_files(void) static int __io_sq_thread_acquire_files(struct io_ring_ctx *ctx) { + if (current->flags & PF_EXITING) + return -EFAULT; + if (!current->files) { struct files_struct *files; struct nsproxy *nsproxy; @@ -1133,6 +1137,8 @@ static int __io_sq_thread_acquire_mm(struct io_ring_ctx *ctx) { struct mm_struct *mm; + if (current->flags & PF_EXITING) + return -EFAULT; if (current->mm) return 0; @@ -1634,19 +1640,38 @@ static void __io_queue_deferred(struct io_ring_ctx *ctx) static void io_flush_timeouts(struct io_ring_ctx *ctx) { - while (!list_empty(&ctx->timeout_list)) { + u32 seq; + + if (list_empty(&ctx->timeout_list)) + return; + + seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts); + + do { + u32 events_needed, events_got; struct io_kiocb *req = list_first_entry(&ctx->timeout_list, struct io_kiocb, timeout.list); if (io_is_timeout_noseq(req)) break; - if (req->timeout.target_seq != ctx->cached_cq_tail - - atomic_read(&ctx->cq_timeouts)) + + /* + * Since seq can easily wrap around over time, subtract + * the last seq at which timeouts were flushed before comparing. + * Assuming not more than 2^31-1 events have happened since, + * these subtractions won't have wrapped, so we can check if + * target is in [last_seq, current_seq] by comparing the two. + */ + events_needed = req->timeout.target_seq - ctx->cq_last_tm_flush; + events_got = seq - ctx->cq_last_tm_flush; + if (events_got < events_needed) break; list_del_init(&req->timeout.list); io_kill_timeout(req); - } + } while (!list_empty(&ctx->timeout_list)); + + ctx->cq_last_tm_flush = seq; } static void io_commit_cqring(struct io_ring_ctx *ctx) @@ -5832,6 +5857,12 @@ static int io_timeout(struct io_kiocb *req) tail = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts); req->timeout.target_seq = tail + off; + /* Update the last seq here in case io_flush_timeouts() hasn't. + * This is safe because ->completion_lock is held, and submissions + * and completions are never mixed in the same ->completion_lock section. + */ + ctx->cq_last_tm_flush = tail; + /* * Insertion sort, ensuring the first entry in the list is always * the one we need first. @@ -7056,6 +7087,7 @@ static int io_sq_thread(void *data) if (sqt_spin || !time_after(jiffies, timeout)) { io_run_task_work(); + io_sq_thread_drop_mm_files(); cond_resched(); if (sqt_spin) timeout = jiffies + sqd->sq_thread_idle; @@ -7093,6 +7125,7 @@ static int io_sq_thread(void *data) } io_run_task_work(); + io_sq_thread_drop_mm_files(); if (cur_css) io_sq_thread_unassociate_blkcg(); @@ -8888,7 +8921,8 @@ static void io_disable_sqo_submit(struct io_ring_ctx *ctx) mutex_unlock(&ctx->uring_lock); /* make sure callers enter the ring to get error */ - io_ring_set_wakeup_flag(ctx); + if (ctx->rings) + io_ring_set_wakeup_flag(ctx); } /* @@ -9067,6 +9101,7 @@ void __io_uring_task_cancel(void) finish_wait(&tctx->wait, &wait); } while (1); + finish_wait(&tctx->wait, &wait); atomic_dec(&tctx->in_idle); io_uring_remove_task_files(tctx); @@ -9700,6 +9735,7 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p, */ ret = io_uring_install_fd(ctx, file); if (ret < 0) { + io_disable_sqo_submit(ctx); /* fput will clean it up */ fput(file); return ret; |