diff options
author | Matthew Wilcox (Oracle) <willy@infradead.org> | 2023-06-12 23:01:34 +0200 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2023-06-20 01:19:31 +0200 |
commit | fe181377a23cce5987fc32f1877cfcd223561609 (patch) | |
tree | 72e706063b9f7ec7c31d071435808b4e79f25012 /fs/buffer.c | |
parent | buffer: make block_write_full_page() handle large folios correctly (diff) | |
download | linux-fe181377a23cce5987fc32f1877cfcd223561609.tar.xz linux-fe181377a23cce5987fc32f1877cfcd223561609.zip |
buffer: convert block_page_mkwrite() to use a folio
If any page in a folio is dirtied, dirty the entire folio. Removes a
number of hidden calls to compound_head() and references to page->mapping
and page->index. Fixes a pre-existing bug where we could mark a folio as
dirty if the file is truncated to a multiple of the page size just as we
take the page fault. I don't believe this bug has any bad effect, it's
just inefficient.
Link: https://lkml.kernel.org/r/20230612210141.730128-8-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Andreas Gruenbacher <agruenba@redhat.com>
Cc: Bob Peterson <rpeterso@redhat.com>
Cc: Hannes Reinecke <hare@suse.com>
Cc: Luis Chamberlain <mcgrof@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'fs/buffer.c')
-rw-r--r-- | fs/buffer.c | 27 |
1 files changed, 13 insertions, 14 deletions
diff --git a/fs/buffer.c b/fs/buffer.c index 34ecf55d2f12..0af167e8a9c6 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -2564,38 +2564,37 @@ EXPORT_SYMBOL(block_commit_write); int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, get_block_t get_block) { - struct page *page = vmf->page; + struct folio *folio = page_folio(vmf->page); struct inode *inode = file_inode(vma->vm_file); unsigned long end; loff_t size; int ret; - lock_page(page); + folio_lock(folio); size = i_size_read(inode); - if ((page->mapping != inode->i_mapping) || - (page_offset(page) > size)) { + if ((folio->mapping != inode->i_mapping) || + (folio_pos(folio) >= size)) { /* We overload EFAULT to mean page got truncated */ ret = -EFAULT; goto out_unlock; } - /* page is wholly or partially inside EOF */ - if (((page->index + 1) << PAGE_SHIFT) > size) - end = size & ~PAGE_MASK; - else - end = PAGE_SIZE; + end = folio_size(folio); + /* folio is wholly or partially inside EOF */ + if (folio_pos(folio) + end > size) + end = size - folio_pos(folio); - ret = __block_write_begin(page, 0, end, get_block); + ret = __block_write_begin_int(folio, 0, end, get_block, NULL); if (!ret) - ret = block_commit_write(page, 0, end); + ret = block_commit_write(&folio->page, 0, end); if (unlikely(ret < 0)) goto out_unlock; - set_page_dirty(page); - wait_for_stable_page(page); + folio_mark_dirty(folio); + folio_wait_stable(folio); return 0; out_unlock: - unlock_page(page); + folio_unlock(folio); return ret; } EXPORT_SYMBOL(block_page_mkwrite); |