summaryrefslogtreecommitdiffstats
path: root/fs/io-wq.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2020-02-25 16:47:30 +0100
committerJens Axboe <axboe@kernel.dk>2020-02-25 16:57:37 +0100
commit3030fd4cb783377eca0e2a3eee63724a5c66ee15 (patch)
tree6949a688812bbf7a87aaea690873598364e4eb2a /fs/io-wq.c
parentio_uring: fix poll_list race for SETUP_IOPOLL|SETUP_SQPOLL (diff)
downloadlinux-3030fd4cb783377eca0e2a3eee63724a5c66ee15.tar.xz
linux-3030fd4cb783377eca0e2a3eee63724a5c66ee15.zip
io-wq: remove spin-for-work optimization
Andres reports that buffered IO seems to suck up more cycles than we would like, and he narrowed it down to the fact that the io-wq workers will briefly spin for more work on completion of a work item. This was a win on the networking side, but apparently some other cases take a hit because of it. Remove the optimization to avoid burning more CPU than we have to for disk IO. Reported-by: Andres Freund <andres@anarazel.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io-wq.c')
-rw-r--r--fs/io-wq.c19
1 files changed, 0 insertions, 19 deletions
diff --git a/fs/io-wq.c b/fs/io-wq.c
index 0a5ab1a8f69a..bf8ed1b0b90a 100644
--- a/fs/io-wq.c
+++ b/fs/io-wq.c
@@ -535,42 +535,23 @@ next:
} while (1);
}
-static inline void io_worker_spin_for_work(struct io_wqe *wqe)
-{
- int i = 0;
-
- while (++i < 1000) {
- if (io_wqe_run_queue(wqe))
- break;
- if (need_resched())
- break;
- cpu_relax();
- }
-}
-
static int io_wqe_worker(void *data)
{
struct io_worker *worker = data;
struct io_wqe *wqe = worker->wqe;
struct io_wq *wq = wqe->wq;
- bool did_work;
io_worker_start(wqe, worker);
- did_work = false;
while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
set_current_state(TASK_INTERRUPTIBLE);
loop:
- if (did_work)
- io_worker_spin_for_work(wqe);
spin_lock_irq(&wqe->lock);
if (io_wqe_run_queue(wqe)) {
__set_current_state(TASK_RUNNING);
io_worker_handle_work(worker);
- did_work = true;
goto loop;
}
- did_work = false;
/* drops the lock on success, retry */
if (__io_worker_idle(wqe, worker)) {
__release(&wqe->lock);