summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2018-12-21 17:10:46 +0100
committerJens Axboe <axboe@kernel.dk>2019-02-24 16:20:17 +0100
commit0bbb280d7b767e7c86a5adfc87c76a6f09ab0423 (patch)
tree5e2b9970b0144b91c5b8def73827aacca82845f4
parentblock: wire up block device iopoll method (diff)
downloadlinux-0bbb280d7b767e7c86a5adfc87c76a6f09ab0423.tar.xz
linux-0bbb280d7b767e7c86a5adfc87c76a6f09ab0423.zip
block: add bio_set_polled() helper
For the upcoming async polled IO, we can't sleep allocating requests. If we do, then we introduce a deadlock where the submitter already has async polled IO in-flight, but can't wait for them to complete since polled requests must be active found and reaped. Utilize the helper in the blockdev DIRECT_IO code. Reviewed-by: Hannes Reinecke <hare@suse.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--fs/block_dev.c4
-rw-r--r--include/linux/bio.h14
2 files changed, 16 insertions, 2 deletions
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 1fe498b08f1b..e9faa52bb489 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -248,7 +248,7 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter,
task_io_account_write(ret);
}
if (iocb->ki_flags & IOCB_HIPRI)
- bio.bi_opf |= REQ_HIPRI;
+ bio_set_polled(&bio, iocb);
qc = submit_bio(&bio);
for (;;) {
@@ -419,7 +419,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
bool polled = false;
if (iocb->ki_flags & IOCB_HIPRI) {
- bio->bi_opf |= REQ_HIPRI;
+ bio_set_polled(bio, iocb);
polled = true;
}
diff --git a/include/linux/bio.h b/include/linux/bio.h
index bdd11d4c2f05..bb6090aa165d 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -826,5 +826,19 @@ static inline int bio_integrity_add_page(struct bio *bio, struct page *page,
#endif /* CONFIG_BLK_DEV_INTEGRITY */
+/*
+ * Mark a bio as polled. Note that for async polled IO, the caller must
+ * expect -EWOULDBLOCK if we cannot allocate a request (or other resources).
+ * We cannot block waiting for requests on polled IO, as those completions
+ * must be found by the caller. This is different than IRQ driven IO, where
+ * it's safe to wait for IO to complete.
+ */
+static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb)
+{
+ bio->bi_opf |= REQ_HIPRI;
+ if (!is_sync_kiocb(kiocb))
+ bio->bi_opf |= REQ_NOWAIT;
+}
+
#endif /* CONFIG_BLOCK */
#endif /* __LINUX_BIO_H */