summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMatthew Wilcox <willy@infradead.org>2018-05-17 06:08:30 +0200
committerMatthew Wilcox <willy@infradead.org>2018-10-21 16:46:35 +0200
commit070e807c690bf9a648d4a878f3c68ea9f5f5ce14 (patch)
tree8619c4d10c81696f4f293145a933284215b1577d
parentpage cache: Convert find_get_entries_tag to XArray (diff)
downloadlinux-070e807c690bf9a648d4a878f3c68ea9f5f5ce14.tar.xz
linux-070e807c690bf9a648d4a878f3c68ea9f5f5ce14.zip
page cache: Convert filemap_map_pages to XArray
Slight change of strategy here; if we have trouble getting hold of a page for whatever reason (eg a compound page is split underneath us), don't spin to stabilise the page, just continue the iteration, like we would if we failed to trylock the page. Since this is a speculative optimisation, it feels like we should allow the process to take an extra fault if it turns out to need this page instead of spending time to pin down a page it may not need. Signed-off-by: Matthew Wilcox <willy@infradead.org>
-rw-r--r--mm/filemap.c42
1 files changed, 13 insertions, 29 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 35ae011971e1..ae1fcaa24f97 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2516,45 +2516,31 @@ EXPORT_SYMBOL(filemap_fault);
void filemap_map_pages(struct vm_fault *vmf,
pgoff_t start_pgoff, pgoff_t end_pgoff)
{
- struct radix_tree_iter iter;
- void **slot;
struct file *file = vmf->vma->vm_file;
struct address_space *mapping = file->f_mapping;
pgoff_t last_pgoff = start_pgoff;
unsigned long max_idx;
+ XA_STATE(xas, &mapping->i_pages, start_pgoff);
struct page *head, *page;
rcu_read_lock();
- radix_tree_for_each_slot(slot, &mapping->i_pages, &iter, start_pgoff) {
- if (iter.index > end_pgoff)
- break;
-repeat:
- page = radix_tree_deref_slot(slot);
- if (unlikely(!page))
- goto next;
- if (radix_tree_exception(page)) {
- if (radix_tree_deref_retry(page)) {
- slot = radix_tree_iter_retry(&iter);
- continue;
- }
+ xas_for_each(&xas, page, end_pgoff) {
+ if (xas_retry(&xas, page))
+ continue;
+ if (xa_is_value(page))
goto next;
- }
head = compound_head(page);
if (!page_cache_get_speculative(head))
- goto repeat;
+ goto next;
/* The page was split under us? */
- if (compound_head(page) != head) {
- put_page(head);
- goto repeat;
- }
+ if (compound_head(page) != head)
+ goto skip;
/* Has the page moved? */
- if (unlikely(page != *slot)) {
- put_page(head);
- goto repeat;
- }
+ if (unlikely(page != xas_reload(&xas)))
+ goto skip;
if (!PageUptodate(page) ||
PageReadahead(page) ||
@@ -2573,10 +2559,10 @@ repeat:
if (file->f_ra.mmap_miss > 0)
file->f_ra.mmap_miss--;
- vmf->address += (iter.index - last_pgoff) << PAGE_SHIFT;
+ vmf->address += (xas.xa_index - last_pgoff) << PAGE_SHIFT;
if (vmf->pte)
- vmf->pte += iter.index - last_pgoff;
- last_pgoff = iter.index;
+ vmf->pte += xas.xa_index - last_pgoff;
+ last_pgoff = xas.xa_index;
if (alloc_set_pte(vmf, NULL, page))
goto unlock;
unlock_page(page);
@@ -2589,8 +2575,6 @@ next:
/* Huge page is mapped? No need to proceed. */
if (pmd_trans_huge(*vmf->pmd))
break;
- if (iter.index == end_pgoff)
- break;
}
rcu_read_unlock();
}