summaryrefslogtreecommitdiffstats
path: root/mm/page-writeback.c
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2021-04-27 05:53:10 +0200
committerMatthew Wilcox (Oracle) <willy@infradead.org>2021-10-18 13:49:39 +0200
commitb5e84594cafb934e023ee7d4ea4208b6c6b65fcc (patch)
treead0e351763d2e8086850d2fc257eb529b22febd4 /mm/page-writeback.c
parentmm/writeback: Add folio_start_writeback() (diff)
downloadlinux-b5e84594cafb934e023ee7d4ea4208b6c6b65fcc.tar.xz
linux-b5e84594cafb934e023ee7d4ea4208b6c6b65fcc.zip
mm/writeback: Add folio_mark_dirty()
Reimplement set_page_dirty() as a wrapper around folio_mark_dirty(). There is no change to filesystems as they were already being called with the compound_head of the page being marked dirty. We avoid several calls to compound_head(), both statically (through using folio_test_dirty() instead of PageDirty() and dynamically by calling folio_mapping() instead of page_mapping(). Also return bool instead of int to show the range of values actually returned, and add kernel-doc. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: David Howells <dhowells@redhat.com> Acked-by: Vlastimil Babka <vbabka@suse.cz>
Diffstat (limited to 'mm/page-writeback.c')
-rw-r--r--mm/page-writeback.c35
1 files changed, 19 insertions, 16 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 82938b037103..c79801656f5b 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2581,18 +2581,21 @@ int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page)
}
EXPORT_SYMBOL(redirty_page_for_writepage);
-/*
- * Dirty a page.
+/**
+ * folio_mark_dirty - Mark a folio as being modified.
+ * @folio: The folio.
+ *
+ * For folios with a mapping this should be done under the page lock
+ * for the benefit of asynchronous memory errors who prefer a consistent
+ * dirty state. This rule can be broken in some special cases,
+ * but should be better not to.
*
- * For pages with a mapping this should be done under the page lock for the
- * benefit of asynchronous memory errors who prefer a consistent dirty state.
- * This rule can be broken in some special cases, but should be better not to.
+ * Return: True if the folio was newly dirtied, false if it was already dirty.
*/
-int set_page_dirty(struct page *page)
+bool folio_mark_dirty(struct folio *folio)
{
- struct address_space *mapping = page_mapping(page);
+ struct address_space *mapping = folio_mapping(folio);
- page = compound_head(page);
if (likely(mapping)) {
/*
* readahead/lru_deactivate_page could remain
@@ -2604,17 +2607,17 @@ int set_page_dirty(struct page *page)
* it will confuse readahead and make it restart the size rampup
* process. But it's a trivial problem.
*/
- if (PageReclaim(page))
- ClearPageReclaim(page);
- return mapping->a_ops->set_page_dirty(page);
+ if (folio_test_reclaim(folio))
+ folio_clear_reclaim(folio);
+ return mapping->a_ops->set_page_dirty(&folio->page);
}
- if (!PageDirty(page)) {
- if (!TestSetPageDirty(page))
- return 1;
+ if (!folio_test_dirty(folio)) {
+ if (!folio_test_set_dirty(folio))
+ return true;
}
- return 0;
+ return false;
}
-EXPORT_SYMBOL(set_page_dirty);
+EXPORT_SYMBOL(folio_mark_dirty);
/*
* set_page_dirty() is racy if the caller has no reference against