diff options
author | David Howells <dhowells@redhat.com> | 2021-08-11 10:49:13 +0200 |
---|---|---|
committer | David Howells <dhowells@redhat.com> | 2021-11-10 22:16:56 +0100 |
commit | 78525c74d9e7d1a6ce69bd4388f045f6e474a20b (patch) | |
tree | 53b2a3a3f9577e71c98d0654ad5d816720f4bee2 /fs/9p | |
parent | folio: Add a function to get the host inode for a folio (diff) | |
download | linux-78525c74d9e7d1a6ce69bd4388f045f6e474a20b.tar.xz linux-78525c74d9e7d1a6ce69bd4388f045f6e474a20b.zip |
netfs, 9p, afs, ceph: Use folios
Convert the netfs helper library to use folios throughout, convert the 9p
and afs filesystems to use folios in their file I/O paths and convert the
ceph filesystem to use just enough folios to compile.
With these changes, afs passes -g quick xfstests.
Changes
=======
ver #5:
- Got rid of folio_end{io,_read,_write}() and inlined the stuff it does
instead (Willy decided he didn't want this after all).
ver #4:
- Fixed a bug in afs_redirty_page() whereby it didn't set the next page
index in the loop and returned too early.
- Simplified a check in v9fs_vfs_write_folio_locked()[1].
- Undid a change to afs_symlink_readpage()[1].
- Used offset_in_folio() in afs_write_end()[1].
- Changed from using page_endio() to folio_end{io,_read,_write}()[1].
ver #2:
- Add 9p foliation.
Signed-off-by: David Howells <dhowells@redhat.com>
Reviewed-by: Jeff Layton <jlayton@kernel.org>
Tested-by: Jeff Layton <jlayton@kernel.org>
Tested-by: Dominique Martinet <asmadeus@codewreck.org>
Tested-by: kafs-testing@auristor.com
cc: Matthew Wilcox (Oracle) <willy@infradead.org>
cc: Marc Dionne <marc.dionne@auristor.com>
cc: Ilya Dryomov <idryomov@gmail.com>
cc: Dominique Martinet <asmadeus@codewreck.org>
cc: v9fs-developer@lists.sourceforge.net
cc: linux-afs@lists.infradead.org
cc: ceph-devel@vger.kernel.org
cc: linux-cachefs@redhat.com
Link: https://lore.kernel.org/r/YYKa3bfQZxK5/wDN@casper.infradead.org/ [1]
Link: https://lore.kernel.org/r/2408234.1628687271@warthog.procyon.org.uk/ # rfc
Link: https://lore.kernel.org/r/162877311459.3085614.10601478228012245108.stgit@warthog.procyon.org.uk/
Link: https://lore.kernel.org/r/162981153551.1901565.3124454657133703341.stgit@warthog.procyon.org.uk/
Link: https://lore.kernel.org/r/163005745264.2472992.9852048135392188995.stgit@warthog.procyon.org.uk/ # v2
Link: https://lore.kernel.org/r/163584187452.4023316.500389675405550116.stgit@warthog.procyon.org.uk/ # v3
Link: https://lore.kernel.org/r/163649328026.309189.1124218109373941936.stgit@warthog.procyon.org.uk/ # v4
Link: https://lore.kernel.org/r/163657852454.834781.9265101983152100556.stgit@warthog.procyon.org.uk/ # v5
Diffstat (limited to 'fs/9p')
-rw-r--r-- | fs/9p/vfs_addr.c | 83 | ||||
-rw-r--r-- | fs/9p/vfs_file.c | 20 |
2 files changed, 56 insertions, 47 deletions
diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c index adafdf86f42f..fac918ccb305 100644 --- a/fs/9p/vfs_addr.c +++ b/fs/9p/vfs_addr.c @@ -108,7 +108,9 @@ static const struct netfs_read_request_ops v9fs_req_ops = { */ static int v9fs_vfs_readpage(struct file *file, struct page *page) { - return netfs_readpage(file, page, &v9fs_req_ops, NULL); + struct folio *folio = page_folio(page); + + return netfs_readpage(file, folio, &v9fs_req_ops, NULL); } /** @@ -130,13 +132,15 @@ static void v9fs_vfs_readahead(struct readahead_control *ractl) static int v9fs_release_page(struct page *page, gfp_t gfp) { - if (PagePrivate(page)) + struct folio *folio = page_folio(page); + + if (folio_test_private(folio)) return 0; #ifdef CONFIG_9P_FSCACHE - if (PageFsCache(page)) { + if (folio_test_fscache(folio)) { if (!(gfp & __GFP_DIRECT_RECLAIM) || !(gfp & __GFP_FS)) return 0; - wait_on_page_fscache(page); + folio_wait_fscache(folio); } #endif return 1; @@ -152,55 +156,58 @@ static int v9fs_release_page(struct page *page, gfp_t gfp) static void v9fs_invalidate_page(struct page *page, unsigned int offset, unsigned int length) { - wait_on_page_fscache(page); + struct folio *folio = page_folio(page); + + folio_wait_fscache(folio); } -static int v9fs_vfs_writepage_locked(struct page *page) +static int v9fs_vfs_write_folio_locked(struct folio *folio) { - struct inode *inode = page->mapping->host; + struct inode *inode = folio_inode(folio); struct v9fs_inode *v9inode = V9FS_I(inode); - loff_t start = page_offset(page); - loff_t size = i_size_read(inode); + loff_t start = folio_pos(folio); + loff_t i_size = i_size_read(inode); struct iov_iter from; - int err, len; + size_t len = folio_size(folio); + int err; + + if (start >= i_size) + return 0; /* Simultaneous truncation occurred */ - if (page->index == size >> PAGE_SHIFT) - len = size & ~PAGE_MASK; - else - len = PAGE_SIZE; + len = min_t(loff_t, i_size - start, len); - iov_iter_xarray(&from, WRITE, &page->mapping->i_pages, start, len); + iov_iter_xarray(&from, WRITE, &folio_mapping(folio)->i_pages, start, len); /* We should have writeback_fid always set */ BUG_ON(!v9inode->writeback_fid); - set_page_writeback(page); + folio_start_writeback(folio); p9_client_write(v9inode->writeback_fid, start, &from, &err); - end_page_writeback(page); + folio_end_writeback(folio); return err; } static int v9fs_vfs_writepage(struct page *page, struct writeback_control *wbc) { + struct folio *folio = page_folio(page); int retval; - p9_debug(P9_DEBUG_VFS, "page %p\n", page); + p9_debug(P9_DEBUG_VFS, "folio %p\n", folio); - retval = v9fs_vfs_writepage_locked(page); + retval = v9fs_vfs_write_folio_locked(folio); if (retval < 0) { if (retval == -EAGAIN) { - redirty_page_for_writepage(wbc, page); + folio_redirty_for_writepage(wbc, folio); retval = 0; } else { - SetPageError(page); - mapping_set_error(page->mapping, retval); + mapping_set_error(folio_mapping(folio), retval); } } else retval = 0; - unlock_page(page); + folio_unlock(folio); return retval; } @@ -213,14 +220,15 @@ static int v9fs_vfs_writepage(struct page *page, struct writeback_control *wbc) static int v9fs_launder_page(struct page *page) { + struct folio *folio = page_folio(page); int retval; - if (clear_page_dirty_for_io(page)) { - retval = v9fs_vfs_writepage_locked(page); + if (folio_clear_dirty_for_io(folio)) { + retval = v9fs_vfs_write_folio_locked(folio); if (retval) return retval; } - wait_on_page_fscache(page); + folio_wait_fscache(folio); return 0; } @@ -265,10 +273,10 @@ v9fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) static int v9fs_write_begin(struct file *filp, struct address_space *mapping, loff_t pos, unsigned int len, unsigned int flags, - struct page **pagep, void **fsdata) + struct page **subpagep, void **fsdata) { int retval; - struct page *page; + struct folio *folio; struct v9fs_inode *v9inode = V9FS_I(mapping->host); p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping); @@ -279,31 +287,32 @@ static int v9fs_write_begin(struct file *filp, struct address_space *mapping, * file. We need to do this before we get a lock on the page in case * there's more than one writer competing for the same cache block. */ - retval = netfs_write_begin(filp, mapping, pos, len, flags, &page, fsdata, + retval = netfs_write_begin(filp, mapping, pos, len, flags, &folio, fsdata, &v9fs_req_ops, NULL); if (retval < 0) return retval; - *pagep = find_subpage(page, pos / PAGE_SIZE); + *subpagep = &folio->page; return retval; } static int v9fs_write_end(struct file *filp, struct address_space *mapping, loff_t pos, unsigned int len, unsigned int copied, - struct page *page, void *fsdata) + struct page *subpage, void *fsdata) { loff_t last_pos = pos + copied; - struct inode *inode = page->mapping->host; + struct folio *folio = page_folio(subpage); + struct inode *inode = mapping->host; p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping); - if (!PageUptodate(page)) { + if (!folio_test_uptodate(folio)) { if (unlikely(copied < len)) { copied = 0; goto out; } - SetPageUptodate(page); + folio_mark_uptodate(folio); } /* @@ -314,10 +323,10 @@ static int v9fs_write_end(struct file *filp, struct address_space *mapping, inode_add_bytes(inode, last_pos - inode->i_size); i_size_write(inode, last_pos); } - set_page_dirty(page); + folio_mark_dirty(folio); out: - unlock_page(page); - put_page(page); + folio_unlock(folio); + folio_put(folio); return copied; } diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c index 4244d48398ef..612e297f3763 100644 --- a/fs/9p/vfs_file.c +++ b/fs/9p/vfs_file.c @@ -528,13 +528,13 @@ static vm_fault_t v9fs_vm_page_mkwrite(struct vm_fault *vmf) { struct v9fs_inode *v9inode; - struct page *page = vmf->page; + struct folio *folio = page_folio(vmf->page); struct file *filp = vmf->vma->vm_file; struct inode *inode = file_inode(filp); - p9_debug(P9_DEBUG_VFS, "page %p fid %lx\n", - page, (unsigned long)filp->private_data); + p9_debug(P9_DEBUG_VFS, "folio %p fid %lx\n", + folio, (unsigned long)filp->private_data); v9inode = V9FS_I(inode); @@ -542,24 +542,24 @@ v9fs_vm_page_mkwrite(struct vm_fault *vmf) * be modified. We then assume the entire page will need writing back. */ #ifdef CONFIG_9P_FSCACHE - if (PageFsCache(page) && - wait_on_page_fscache_killable(page) < 0) - return VM_FAULT_RETRY; + if (folio_test_fscache(folio) && + folio_wait_fscache_killable(folio) < 0) + return VM_FAULT_NOPAGE; #endif /* Update file times before taking page lock */ file_update_time(filp); BUG_ON(!v9inode->writeback_fid); - if (lock_page_killable(page) < 0) + if (folio_lock_killable(folio) < 0) return VM_FAULT_RETRY; - if (page->mapping != inode->i_mapping) + if (folio_mapping(folio) != inode->i_mapping) goto out_unlock; - wait_for_stable_page(page); + folio_wait_stable(folio); return VM_FAULT_LOCKED; out_unlock: - unlock_page(page); + folio_unlock(folio); return VM_FAULT_NOPAGE; } |