summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2018-11-16 16:37:34 +0100
committerJens Axboe <axboe@kernel.dk>2018-11-19 16:34:49 +0100
commit849a370016a5489c49253338507ee6cc4a08df4b (patch)
tree3e8794bb4312b0fa73e3092849b8b6e163437a2d
parentnvme: default to 0 poll queues (diff)
downloadlinux-849a370016a5489c49253338507ee6cc4a08df4b.tar.xz
linux-849a370016a5489c49253338507ee6cc4a08df4b.zip
block: avoid ordered task state change for polled IO
For the core poll helper, the task state setting don't need to imply any atomics, as it's the current task itself that is being modified and we're not going to sleep. For IRQ driven, the wakeup path have the necessary barriers to not need us using the heavy handed version of the task state setting. Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--block/blk-mq.c4
-rw-r--r--fs/block_dev.c7
-rw-r--r--fs/iomap.c3
-rw-r--r--mm/page_io.c3
4 files changed, 11 insertions, 6 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 32b246ed44c0..7fc4abb4cc36 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -3331,12 +3331,12 @@ static bool __blk_mq_poll(struct blk_mq_hw_ctx *hctx, struct request *rq)
ret = q->mq_ops->poll(hctx, rq->tag);
if (ret > 0) {
hctx->poll_success++;
- set_current_state(TASK_RUNNING);
+ __set_current_state(TASK_RUNNING);
return true;
}
if (signal_pending_state(state, current))
- set_current_state(TASK_RUNNING);
+ __set_current_state(TASK_RUNNING);
if (current->state == TASK_RUNNING)
return true;
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 4d79bc80fb41..64ba27b8b754 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -237,9 +237,11 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter,
qc = submit_bio(&bio);
for (;;) {
- set_current_state(TASK_UNINTERRUPTIBLE);
+ __set_current_state(TASK_UNINTERRUPTIBLE);
+
if (!READ_ONCE(bio.bi_private))
break;
+
if (!(iocb->ki_flags & IOCB_HIPRI) ||
!blk_poll(bdev_get_queue(bdev), qc))
io_schedule();
@@ -415,7 +417,8 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
return -EIOCBQUEUED;
for (;;) {
- set_current_state(TASK_UNINTERRUPTIBLE);
+ __set_current_state(TASK_UNINTERRUPTIBLE);
+
if (!READ_ONCE(dio->waiter))
break;
diff --git a/fs/iomap.c b/fs/iomap.c
index b0462b363bad..c5df035ace6f 100644
--- a/fs/iomap.c
+++ b/fs/iomap.c
@@ -1888,7 +1888,8 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
return -EIOCBQUEUED;
for (;;) {
- set_current_state(TASK_UNINTERRUPTIBLE);
+ __set_current_state(TASK_UNINTERRUPTIBLE);
+
if (!READ_ONCE(dio->submit.waiter))
break;
diff --git a/mm/page_io.c b/mm/page_io.c
index 57572ff46016..a7271fa481f6 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -405,7 +405,8 @@ int swap_readpage(struct page *page, bool synchronous)
bio_get(bio);
qc = submit_bio(bio);
while (synchronous) {
- set_current_state(TASK_UNINTERRUPTIBLE);
+ __set_current_state(TASK_UNINTERRUPTIBLE);
+
if (!READ_ONCE(bio->bi_private))
break;