summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-01-12 21:37:02 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2022-01-12 21:37:02 +0100
commit6020c204be997e3f5129839ff9c801800fb4336e (patch)
tree2125670ece9bed7951946b8badd6f40cf5963570 /lib
parentMerge tag 'spdx-5.17-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gr... (diff)
parentmm: Use multi-index entries in the page cache (diff)
downloadlinux-6020c204be997e3f5129839ff9c801800fb4336e.tar.xz
linux-6020c204be997e3f5129839ff9c801800fb4336e.zip
Merge tag 'folio-5.17' of git://git.infradead.org/users/willy/pagecache
Pull folio conversion updates from Matthew Wilcox: "Convert much of the page cache to use folios This stops just short of actually enabling large folios. It converts everything that I noticed needs to be converted, but there may still be places I've overlooked which still have page size assumptions. The big change here is using large entries in the page cache XArray instead of many small entries. That only affects shmem for now, but it's a pretty big change for shmem since it changes where memory needs to be allocated (at split time instead of insertion)" * tag 'folio-5.17' of git://git.infradead.org/users/willy/pagecache: (49 commits) mm: Use multi-index entries in the page cache XArray: Add xas_advance() truncate,shmem: Handle truncates that split large folios truncate: Convert invalidate_inode_pages2_range to folios fs: Convert vfs_dedupe_file_range_compare to folios mm: Remove pagevec_remove_exceptionals() mm: Convert find_lock_entries() to use a folio_batch filemap: Return only folios from find_get_entries() filemap: Convert filemap_get_read_batch() to use a folio_batch filemap: Convert filemap_read() to use a folio truncate: Add invalidate_complete_folio2() truncate: Convert invalidate_inode_pages2_range() to use a folio truncate: Skip known-truncated indices truncate,shmem: Add truncate_inode_folio() shmem: Convert part of shmem_undo_range() to use a folio mm: Add unmap_mapping_folio() truncate: Add truncate_cleanup_folio() filemap: Add filemap_release_folio() filemap: Use a folio in filemap_page_mkwrite filemap: Use a folio in filemap_map_pages ...
Diffstat (limited to 'lib')
-rw-r--r--lib/iov_iter.c30
-rw-r--r--lib/xarray.c6
2 files changed, 17 insertions, 19 deletions
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 66a740e6e153..b0e0acdf96c1 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -69,42 +69,40 @@
#define iterate_xarray(i, n, base, len, __off, STEP) { \
__label__ __out; \
size_t __off = 0; \
- struct page *head = NULL; \
+ struct folio *folio; \
loff_t start = i->xarray_start + i->iov_offset; \
- unsigned offset = start % PAGE_SIZE; \
pgoff_t index = start / PAGE_SIZE; \
- int j; \
- \
XA_STATE(xas, i->xarray, index); \
\
+ len = PAGE_SIZE - offset_in_page(start); \
rcu_read_lock(); \
- xas_for_each(&xas, head, ULONG_MAX) { \
+ xas_for_each(&xas, folio, ULONG_MAX) { \
unsigned left; \
- if (xas_retry(&xas, head)) \
+ size_t offset; \
+ if (xas_retry(&xas, folio)) \
continue; \
- if (WARN_ON(xa_is_value(head))) \
+ if (WARN_ON(xa_is_value(folio))) \
break; \
- if (WARN_ON(PageHuge(head))) \
+ if (WARN_ON(folio_test_hugetlb(folio))) \
break; \
- for (j = (head->index < index) ? index - head->index : 0; \
- j < thp_nr_pages(head); j++) { \
- void *kaddr = kmap_local_page(head + j); \
- base = kaddr + offset; \
- len = PAGE_SIZE - offset; \
+ offset = offset_in_folio(folio, start + __off); \
+ while (offset < folio_size(folio)) { \
+ base = kmap_local_folio(folio, offset); \
len = min(n, len); \
left = (STEP); \
- kunmap_local(kaddr); \
+ kunmap_local(base); \
len -= left; \
__off += len; \
n -= len; \
if (left || n == 0) \
goto __out; \
- offset = 0; \
+ offset += len; \
+ len = PAGE_SIZE; \
} \
} \
__out: \
rcu_read_unlock(); \
- i->iov_offset += __off; \
+ i->iov_offset += __off; \
n = __off; \
}
diff --git a/lib/xarray.c b/lib/xarray.c
index f5d8f54907b4..6f47f6375808 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -157,7 +157,7 @@ static void xas_move_index(struct xa_state *xas, unsigned long offset)
xas->xa_index += offset << shift;
}
-static void xas_advance(struct xa_state *xas)
+static void xas_next_offset(struct xa_state *xas)
{
xas->xa_offset++;
xas_move_index(xas, xas->xa_offset);
@@ -1250,7 +1250,7 @@ void *xas_find(struct xa_state *xas, unsigned long max)
xas->xa_offset = ((xas->xa_index - 1) & XA_CHUNK_MASK) + 1;
}
- xas_advance(xas);
+ xas_next_offset(xas);
while (xas->xa_node && (xas->xa_index <= max)) {
if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) {
@@ -1268,7 +1268,7 @@ void *xas_find(struct xa_state *xas, unsigned long max)
if (entry && !xa_is_sibling(entry))
return entry;
- xas_advance(xas);
+ xas_next_offset(xas);
}
if (!xas->xa_node)