summaryrefslogtreecommitdiffstats
path: root/fs/iomap
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2020-09-21 17:58:40 +0200
committerDarrick J. Wong <darrick.wong@oracle.com>2020-09-21 17:59:26 +0200
commit7d636676d2841ba5d92462dfa99a89c2f2da8919 (patch)
tree2576896da6201cf60b064b2c6f165c17093bc3a7 /fs/iomap
parentiomap: Support arbitrarily many blocks per page (diff)
downloadlinux-7d636676d2841ba5d92462dfa99a89c2f2da8919.tar.xz
linux-7d636676d2841ba5d92462dfa99a89c2f2da8919.zip
iomap: Convert read_count to read_bytes_pending
Instead of counting bio segments, count the number of bytes submitted. This insulates us from the block layer's definition of what a 'same page' is, which is not necessarily clear once THPs are involved. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com> Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com> Reviewed-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'fs/iomap')
-rw-r--r--fs/iomap/buffered-io.c41
1 files changed, 12 insertions, 29 deletions
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 9f0fa495ab69..46370cea30c8 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -26,7 +26,7 @@
* to track sub-page uptodate status and I/O completions.
*/
struct iomap_page {
- atomic_t read_count;
+ atomic_t read_bytes_pending;
atomic_t write_count;
spinlock_t uptodate_lock;
unsigned long uptodate[];
@@ -72,7 +72,7 @@ iomap_page_release(struct page *page)
if (!iop)
return;
- WARN_ON_ONCE(atomic_read(&iop->read_count));
+ WARN_ON_ONCE(atomic_read(&iop->read_bytes_pending));
WARN_ON_ONCE(atomic_read(&iop->write_count));
WARN_ON_ONCE(bitmap_full(iop->uptodate, nr_blocks) !=
PageUptodate(page));
@@ -168,13 +168,6 @@ iomap_set_range_uptodate(struct page *page, unsigned off, unsigned len)
}
static void
-iomap_read_finish(struct iomap_page *iop, struct page *page)
-{
- if (!iop || atomic_dec_and_test(&iop->read_count))
- unlock_page(page);
-}
-
-static void
iomap_read_page_end_io(struct bio_vec *bvec, int error)
{
struct page *page = bvec->bv_page;
@@ -187,7 +180,8 @@ iomap_read_page_end_io(struct bio_vec *bvec, int error)
iomap_set_range_uptodate(page, bvec->bv_offset, bvec->bv_len);
}
- iomap_read_finish(iop, page);
+ if (!iop || atomic_sub_and_test(bvec->bv_len, &iop->read_bytes_pending))
+ unlock_page(page);
}
static void
@@ -267,30 +261,19 @@ iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
}
ctx->cur_page_in_bio = true;
+ if (iop)
+ atomic_add(plen, &iop->read_bytes_pending);
- /*
- * Try to merge into a previous segment if we can.
- */
+ /* Try to merge into a previous segment if we can */
sector = iomap_sector(iomap, pos);
- if (ctx->bio && bio_end_sector(ctx->bio) == sector)
+ if (ctx->bio && bio_end_sector(ctx->bio) == sector) {
+ if (__bio_try_merge_page(ctx->bio, page, plen, poff,
+ &same_page))
+ goto done;
is_contig = true;
-
- if (is_contig &&
- __bio_try_merge_page(ctx->bio, page, plen, poff, &same_page)) {
- if (!same_page && iop)
- atomic_inc(&iop->read_count);
- goto done;
}
- /*
- * If we start a new segment we need to increase the read count, and we
- * need to do so before submitting any previous full bio to make sure
- * that we don't prematurely unlock the page.
- */
- if (iop)
- atomic_inc(&iop->read_count);
-
- if (!ctx->bio || !is_contig || bio_full(ctx->bio, plen)) {
+ if (!is_contig || bio_full(ctx->bio, plen)) {
gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
gfp_t orig_gfp = gfp;
int nr_vecs = (length + PAGE_SIZE - 1) >> PAGE_SHIFT;