diff options
author | Matthew Wilcox (Oracle) <willy@infradead.org> | 2021-03-19 13:58:36 +0100 |
---|---|---|
committer | Matthew Wilcox (Oracle) <willy@infradead.org> | 2022-01-03 02:28:49 +0100 |
commit | 22b3c8d6612e09f5fcecba1009d399aaf7f934f6 (patch) | |
tree | 2d4aca790c22b52ecb0cd648860e2be273b7c682 /fs/fs-writeback.c | |
parent | filemap: Remove PageHWPoison check from next_uptodate_page() (diff) | |
download | linux-22b3c8d6612e09f5fcecba1009d399aaf7f934f6.tar.xz linux-22b3c8d6612e09f5fcecba1009d399aaf7f934f6.zip |
fs/writeback: Convert inode_switch_wbs_work_fn to folios
This gets the statistics correct for large folios by modifying the
counters by the number of pages in the folio instead of by 1.
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: William Kucharski <william.kucharski@oracle.com>
Diffstat (limited to 'fs/fs-writeback.c')
-rw-r--r-- | fs/fs-writeback.c | 24 |
1 files changed, 13 insertions, 11 deletions
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 67f0e88eed01..4f680f848c8b 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -372,7 +372,7 @@ static bool inode_do_switch_wbs(struct inode *inode, { struct address_space *mapping = inode->i_mapping; XA_STATE(xas, &mapping->i_pages, 0); - struct page *page; + struct folio *folio; bool switched = false; spin_lock(&inode->i_lock); @@ -389,21 +389,23 @@ static bool inode_do_switch_wbs(struct inode *inode, /* * Count and transfer stats. Note that PAGECACHE_TAG_DIRTY points - * to possibly dirty pages while PAGECACHE_TAG_WRITEBACK points to - * pages actually under writeback. + * to possibly dirty folios while PAGECACHE_TAG_WRITEBACK points to + * folios actually under writeback. */ - xas_for_each_marked(&xas, page, ULONG_MAX, PAGECACHE_TAG_DIRTY) { - if (PageDirty(page)) { - dec_wb_stat(old_wb, WB_RECLAIMABLE); - inc_wb_stat(new_wb, WB_RECLAIMABLE); + xas_for_each_marked(&xas, folio, ULONG_MAX, PAGECACHE_TAG_DIRTY) { + if (folio_test_dirty(folio)) { + long nr = folio_nr_pages(folio); + wb_stat_mod(old_wb, WB_RECLAIMABLE, -nr); + wb_stat_mod(new_wb, WB_RECLAIMABLE, nr); } } xas_set(&xas, 0); - xas_for_each_marked(&xas, page, ULONG_MAX, PAGECACHE_TAG_WRITEBACK) { - WARN_ON_ONCE(!PageWriteback(page)); - dec_wb_stat(old_wb, WB_WRITEBACK); - inc_wb_stat(new_wb, WB_WRITEBACK); + xas_for_each_marked(&xas, folio, ULONG_MAX, PAGECACHE_TAG_WRITEBACK) { + long nr = folio_nr_pages(folio); + WARN_ON_ONCE(!folio_test_writeback(folio)); + wb_stat_mod(old_wb, WB_WRITEBACK, -nr); + wb_stat_mod(new_wb, WB_WRITEBACK, nr); } if (mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK)) { |