summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-02-24 20:08:15 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2022-02-24 20:08:15 +0100
commit3a5f59b17f9dec448976626663a73841460d7ab4 (patch)
treefae81eafb1361a0dd97a1d5c01cc4e019e6bd345 /fs
parentMerge tag 'platform-drivers-x86-v5.17-4' of git://git.kernel.org/pub/scm/linu... (diff)
parentio_uring: disallow modification of rsrc_data during quiesce (diff)
downloadlinux-3a5f59b17f9dec448976626663a73841460d7ab4.tar.xz
linux-3a5f59b17f9dec448976626663a73841460d7ab4.zip
Merge tag 'io_uring-5.17-2022-02-23' of git://git.kernel.dk/linux-block
Pull io_uring fixes from Jens Axboe: - Add a conditional schedule point in io_add_buffers() (Eric) - Fix for a quiesce speedup merged in this release (Dylan) - Don't convert to jiffies for event timeout waiting, it's way too coarse when we accept a timespec as input (me) * tag 'io_uring-5.17-2022-02-23' of git://git.kernel.dk/linux-block: io_uring: disallow modification of rsrc_data during quiesce io_uring: don't convert to jiffies for waiting on timeouts io_uring: add a schedule point in io_add_buffers()
Diffstat (limited to 'fs')
-rw-r--r--fs/io_uring.c24
1 files changed, 17 insertions, 7 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 77b9c7e4793b..4715980e9015 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -4567,6 +4567,7 @@ static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head)
} else {
list_add_tail(&buf->list, &(*head)->list);
}
+ cond_resched();
}
return i ? i : -ENOMEM;
@@ -7693,7 +7694,7 @@ static int io_run_task_work_sig(void)
/* when returns >0, the caller should retry */
static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
struct io_wait_queue *iowq,
- signed long *timeout)
+ ktime_t timeout)
{
int ret;
@@ -7705,8 +7706,9 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
if (test_bit(0, &ctx->check_cq_overflow))
return 1;
- *timeout = schedule_timeout(*timeout);
- return !*timeout ? -ETIME : 1;
+ if (!schedule_hrtimeout(&timeout, HRTIMER_MODE_ABS))
+ return -ETIME;
+ return 1;
}
/*
@@ -7719,7 +7721,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
{
struct io_wait_queue iowq;
struct io_rings *rings = ctx->rings;
- signed long timeout = MAX_SCHEDULE_TIMEOUT;
+ ktime_t timeout = KTIME_MAX;
int ret;
do {
@@ -7735,7 +7737,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
if (get_timespec64(&ts, uts))
return -EFAULT;
- timeout = timespec64_to_jiffies(&ts);
+ timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
}
if (sig) {
@@ -7767,7 +7769,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
}
prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq,
TASK_INTERRUPTIBLE);
- ret = io_cqring_wait_schedule(ctx, &iowq, &timeout);
+ ret = io_cqring_wait_schedule(ctx, &iowq, timeout);
finish_wait(&ctx->cq_wait, &iowq.wq);
cond_resched();
} while (ret > 0);
@@ -7924,7 +7926,15 @@ static __cold int io_rsrc_ref_quiesce(struct io_rsrc_data *data,
ret = wait_for_completion_interruptible(&data->done);
if (!ret) {
mutex_lock(&ctx->uring_lock);
- break;
+ if (atomic_read(&data->refs) > 0) {
+ /*
+ * it has been revived by another thread while
+ * we were unlocked
+ */
+ mutex_unlock(&ctx->uring_lock);
+ } else {
+ break;
+ }
}
atomic_inc(&data->refs);