diff options
author | Matthew Wilcox <willy@infradead.org> | 2017-11-22 14:37:38 +0100 |
---|---|---|
committer | Matthew Wilcox <willy@infradead.org> | 2018-10-21 16:46:41 +0200 |
commit | ef3038a573aa8bf2f3797b110f7244b55a0e519c (patch) | |
tree | c57863631710a2f8cd9147fd43406d35b447b7a9 /mm | |
parent | memfd: Convert memfd_wait_for_pins to XArray (diff) | |
download | linux-ef3038a573aa8bf2f3797b110f7244b55a0e519c.tar.xz linux-ef3038a573aa8bf2f3797b110f7244b55a0e519c.zip |
memfd: Convert memfd_tag_pins to XArray
Switch to a batch-processing model like memfd_wait_for_pins() and
use the xa_state previously set up by memfd_wait_for_pins().
Signed-off-by: Matthew Wilcox <willy@infradead.org>
Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memfd.c | 44 |
1 files changed, 18 insertions, 26 deletions
diff --git a/mm/memfd.c b/mm/memfd.c index cf0ddd5fc859..97264c79d2cd 100644 --- a/mm/memfd.c +++ b/mm/memfd.c @@ -28,37 +28,29 @@ #define MEMFD_TAG_PINNED PAGECACHE_TAG_TOWRITE #define LAST_SCAN 4 /* about 150ms max */ -static void memfd_tag_pins(struct address_space *mapping) +static void memfd_tag_pins(struct xa_state *xas) { - struct radix_tree_iter iter; - void __rcu **slot; - pgoff_t start; struct page *page; + unsigned int tagged = 0; lru_add_drain(); - start = 0; - rcu_read_lock(); - - radix_tree_for_each_slot(slot, &mapping->i_pages, &iter, start) { - page = radix_tree_deref_slot(slot); - if (!page || radix_tree_exception(page)) { - if (radix_tree_deref_retry(page)) { - slot = radix_tree_iter_retry(&iter); - continue; - } - } else if (page_count(page) - page_mapcount(page) > 1) { - xa_lock_irq(&mapping->i_pages); - radix_tree_tag_set(&mapping->i_pages, iter.index, - MEMFD_TAG_PINNED); - xa_unlock_irq(&mapping->i_pages); - } - if (need_resched()) { - slot = radix_tree_iter_resume(slot, &iter); - cond_resched_rcu(); - } + xas_lock_irq(xas); + xas_for_each(xas, page, ULONG_MAX) { + if (xa_is_value(page)) + continue; + if (page_count(page) - page_mapcount(page) > 1) + xas_set_mark(xas, MEMFD_TAG_PINNED); + + if (++tagged % XA_CHECK_SCHED) + continue; + + xas_pause(xas); + xas_unlock_irq(xas); + cond_resched(); + xas_lock_irq(xas); } - rcu_read_unlock(); + xas_unlock_irq(xas); } /* @@ -76,7 +68,7 @@ static int memfd_wait_for_pins(struct address_space *mapping) struct page *page; int error, scan; - memfd_tag_pins(mapping); + memfd_tag_pins(&xas); error = 0; for (scan = 0; scan <= LAST_SCAN; scan++) { |