summaryrefslogtreecommitdiffstats
path: root/fs/iomap
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-03-23 02:26:56 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2022-03-23 02:26:56 +0100
commit6b1f86f8e9c7f9de7ca1cb987b2cf25e99b1ae3a (patch)
treeaf9dd0afd9ae6f044db13f8b6f7688d9b1fd1402 /fs/iomap
parentMerge tag 'folio-5.18c' of git://git.infradead.org/users/willy/pagecache (diff)
parentfs: Remove aops ->set_page_dirty (diff)
downloadlinux-6b1f86f8e9c7f9de7ca1cb987b2cf25e99b1ae3a.tar.xz
linux-6b1f86f8e9c7f9de7ca1cb987b2cf25e99b1ae3a.zip
Merge tag 'folio-5.18b' of git://git.infradead.org/users/willy/pagecache
Pull filesystem folio updates from Matthew Wilcox: "Primarily this series converts some of the address_space operations to take a folio instead of a page. Notably: - a_ops->is_partially_uptodate() takes a folio instead of a page and changes the type of the 'from' and 'count' arguments to make it obvious they're bytes. - a_ops->invalidatepage() becomes ->invalidate_folio() and has a similar type change. - a_ops->launder_page() becomes ->launder_folio() - a_ops->set_page_dirty() becomes ->dirty_folio() and adds the address_space as an argument. There are a couple of other misc changes up front that weren't worth separating into their own pull request" * tag 'folio-5.18b' of git://git.infradead.org/users/willy/pagecache: (53 commits) fs: Remove aops ->set_page_dirty fb_defio: Use noop_dirty_folio() fs: Convert __set_page_dirty_no_writeback to noop_dirty_folio fs: Convert __set_page_dirty_buffers to block_dirty_folio nilfs: Convert nilfs_set_page_dirty() to nilfs_dirty_folio() mm: Convert swap_set_page_dirty() to swap_dirty_folio() ubifs: Convert ubifs_set_page_dirty to ubifs_dirty_folio f2fs: Convert f2fs_set_node_page_dirty to f2fs_dirty_node_folio f2fs: Convert f2fs_set_data_page_dirty to f2fs_dirty_data_folio f2fs: Convert f2fs_set_meta_page_dirty to f2fs_dirty_meta_folio afs: Convert afs_dir_set_page_dirty() to afs_dir_dirty_folio() btrfs: Convert extent_range_redirty_for_io() to use folios fs: Convert trivial uses of __set_page_dirty_nobuffers to filemap_dirty_folio btrfs: Convert from set_page_dirty to dirty_folio fscache: Convert fscache_set_page_dirty() to fscache_dirty_folio() fs: Add aops->dirty_folio fs: Remove aops->launder_page orangefs: Convert launder_page to launder_folio nfs: Convert from launder_page to launder_folio fuse: Convert from launder_page to launder_folio ...
Diffstat (limited to 'fs/iomap')
-rw-r--r--fs/iomap/buffered-io.c46
-rw-r--r--fs/iomap/trace.h2
2 files changed, 19 insertions, 29 deletions
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 4653f3d07a1d..dffd2cac8113 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -425,37 +425,33 @@ void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops)
EXPORT_SYMBOL_GPL(iomap_readahead);
/*
- * iomap_is_partially_uptodate checks whether blocks within a page are
+ * iomap_is_partially_uptodate checks whether blocks within a folio are
* uptodate or not.
*
- * Returns true if all blocks which correspond to a file portion
- * we want to read within the page are uptodate.
+ * Returns true if all blocks which correspond to the specified part
+ * of the folio are uptodate.
*/
-int
-iomap_is_partially_uptodate(struct page *page, unsigned long from,
- unsigned long count)
+bool iomap_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
{
- struct folio *folio = page_folio(page);
struct iomap_page *iop = to_iomap_page(folio);
- struct inode *inode = page->mapping->host;
- unsigned len, first, last;
- unsigned i;
+ struct inode *inode = folio->mapping->host;
+ size_t len;
+ unsigned first, last, i;
+
+ if (!iop)
+ return false;
- /* Limit range to one page */
- len = min_t(unsigned, PAGE_SIZE - from, count);
+ /* Limit range to this folio */
+ len = min(folio_size(folio) - from, count);
/* First and last blocks in range within page */
first = from >> inode->i_blkbits;
last = (from + len - 1) >> inode->i_blkbits;
- if (iop) {
- for (i = first; i <= last; i++)
- if (!test_bit(i, iop->uptodate))
- return 0;
- return 1;
- }
-
- return 0;
+ for (i = first; i <= last; i++)
+ if (!test_bit(i, iop->uptodate))
+ return false;
+ return true;
}
EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
@@ -481,7 +477,8 @@ EXPORT_SYMBOL_GPL(iomap_releasepage);
void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len)
{
- trace_iomap_invalidatepage(folio->mapping->host, offset, len);
+ trace_iomap_invalidate_folio(folio->mapping->host,
+ folio_pos(folio) + offset, len);
/*
* If we're invalidating the entire folio, clear the dirty state
@@ -500,13 +497,6 @@ void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len)
}
EXPORT_SYMBOL_GPL(iomap_invalidate_folio);
-void iomap_invalidatepage(struct page *page, unsigned int offset,
- unsigned int len)
-{
- iomap_invalidate_folio(page_folio(page), offset, len);
-}
-EXPORT_SYMBOL_GPL(iomap_invalidatepage);
-
#ifdef CONFIG_MIGRATION
int
iomap_migrate_page(struct address_space *mapping, struct page *newpage,
diff --git a/fs/iomap/trace.h b/fs/iomap/trace.h
index 65e39785c284..a6689a563c6e 100644
--- a/fs/iomap/trace.h
+++ b/fs/iomap/trace.h
@@ -81,7 +81,7 @@ DEFINE_EVENT(iomap_range_class, name, \
TP_ARGS(inode, off, len))
DEFINE_RANGE_EVENT(iomap_writepage);
DEFINE_RANGE_EVENT(iomap_releasepage);
-DEFINE_RANGE_EVENT(iomap_invalidatepage);
+DEFINE_RANGE_EVENT(iomap_invalidate_folio);
DEFINE_RANGE_EVENT(iomap_dio_invalidate_fail);
#define IOMAP_TYPE_STRINGS \