diff options
author | Jens Axboe <axboe@kernel.dk> | 2018-11-26 16:24:43 +0100 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2018-11-26 16:25:53 +0100 |
commit | 0a1b8b87d064a47fad9ec475316002da28559207 (patch) | |
tree | 9bc87a52b3fcc1f476d52ae94d6bb7e69e2bfd94 /fs/block_dev.c | |
parent | nvme: remove opportunistic polling from bdev target (diff) | |
download | linux-0a1b8b87d064a47fad9ec475316002da28559207.tar.xz linux-0a1b8b87d064a47fad9ec475316002da28559207.zip |
block: make blk_poll() take a parameter on whether to spin or not
blk_poll() has always kept spinning until it found an IO. This is
fine for SYNC polling, since we need to find one request we have
pending, but in preparation for ASYNC polling it can be beneficial
to just check if we have any entries available or not.
Existing callers are converted to pass in 'spin == true', to retain
the old behavior.
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/block_dev.c')
-rw-r--r-- | fs/block_dev.c | 4 |
1 files changed, 2 insertions, 2 deletions
diff --git a/fs/block_dev.c b/fs/block_dev.c index 64ba27b8b754..d233a59ea364 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -243,7 +243,7 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter, break; if (!(iocb->ki_flags & IOCB_HIPRI) || - !blk_poll(bdev_get_queue(bdev), qc)) + !blk_poll(bdev_get_queue(bdev), qc, true)) io_schedule(); } __set_current_state(TASK_RUNNING); @@ -423,7 +423,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages) break; if (!(iocb->ki_flags & IOCB_HIPRI) || - !blk_poll(bdev_get_queue(bdev), qc)) + !blk_poll(bdev_get_queue(bdev), qc, true)) io_schedule(); } __set_current_state(TASK_RUNNING); |