diff options
author | David Hildenbrand <david@redhat.com> | 2024-06-20 23:29:35 +0200 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2024-07-04 04:30:26 +0200 |
commit | ee86814b0562f18255b55c5e6a01a022895994cf (patch) | |
tree | de40f5622bc4b5640f5e1a77335e7d62043e4775 /mm/migrate.c | |
parent | mm/migrate: make migrate_misplaced_folio() return 0 on success (diff) | |
download | linux-ee86814b0562f18255b55c5e6a01a022895994cf.tar.xz linux-ee86814b0562f18255b55c5e6a01a022895994cf.zip |
mm/migrate: move NUMA hinting fault folio isolation + checks under PTL
Currently we always take a folio reference even if migration will not even
be tried or isolation failed, requiring us to grab+drop an additional
reference.
Further, we end up calling folio_likely_mapped_shared() while the folio
might have already been unmapped, because after we dropped the PTL, that
can easily happen. We want to stop touching mapcounts and friends from
such context, and only call folio_likely_mapped_shared() while the folio
is still mapped: mapcount information is pretty much stale and unreliable
otherwise.
So let's move checks into numamigrate_isolate_folio(), rename that
function to migrate_misplaced_folio_prepare(), and call that function from
callsites where we call migrate_misplaced_folio(), but still with the PTL
held.
We can now stop taking temporary folio references, and really only take a
reference if folio isolation succeeded. Doing the
folio_likely_mapped_shared() + folio isolation under PT lock is now
similar to how we handle MADV_PAGEOUT.
While at it, combine the folio_is_file_lru() checks.
[david@redhat.com: fix list_del() corruption]
Link: https://lkml.kernel.org/r/8f85c31a-e603-4578-bf49-136dae0d4b69@redhat.com
Link: https://lkml.kernel.org/r/20240626191129.658CFC32782@smtp.kernel.org
Link: https://lkml.kernel.org/r/20240620212935.656243-3-david@redhat.com
Signed-off-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Tested-by: Donet Tom <donettom@linux.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to '')
-rw-r--r-- | mm/migrate.c | 83 |
1 files changed, 39 insertions, 44 deletions
diff --git a/mm/migrate.c b/mm/migrate.c index 45e47125c975..e3eb22595ced 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -2519,16 +2519,44 @@ static struct folio *alloc_misplaced_dst_folio(struct folio *src, return __folio_alloc_node(gfp, order, nid); } -static int numamigrate_isolate_folio(pg_data_t *pgdat, struct folio *folio) +/* + * Prepare for calling migrate_misplaced_folio() by isolating the folio if + * permitted. Must be called with the PTL still held. + */ +int migrate_misplaced_folio_prepare(struct folio *folio, + struct vm_area_struct *vma, int node) { int nr_pages = folio_nr_pages(folio); + pg_data_t *pgdat = NODE_DATA(node); + + if (folio_is_file_lru(folio)) { + /* + * Do not migrate file folios that are mapped in multiple + * processes with execute permissions as they are probably + * shared libraries. + * + * See folio_likely_mapped_shared() on possible imprecision + * when we cannot easily detect if a folio is shared. + */ + if ((vma->vm_flags & VM_EXEC) && + folio_likely_mapped_shared(folio)) + return -EACCES; + + /* + * Do not migrate dirty folios as not all filesystems can move + * dirty folios in MIGRATE_ASYNC mode which is a waste of + * cycles. + */ + if (folio_test_dirty(folio)) + return -EAGAIN; + } /* Avoid migrating to a node that is nearly full */ if (!migrate_balanced_pgdat(pgdat, nr_pages)) { int z; if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING)) - return 0; + return -EAGAIN; for (z = pgdat->nr_zones - 1; z >= 0; z--) { if (managed_zone(pgdat->node_zones + z)) break; @@ -2539,65 +2567,37 @@ static int numamigrate_isolate_folio(pg_data_t *pgdat, struct folio *folio) * further. */ if (z < 0) - return 0; + return -EAGAIN; wakeup_kswapd(pgdat->node_zones + z, 0, folio_order(folio), ZONE_MOVABLE); - return 0; + return -EAGAIN; } if (!folio_isolate_lru(folio)) - return 0; + return -EAGAIN; node_stat_mod_folio(folio, NR_ISOLATED_ANON + folio_is_file_lru(folio), nr_pages); - - /* - * Isolating the folio has taken another reference, so the - * caller's reference can be safely dropped without the folio - * disappearing underneath us during migration. - */ - folio_put(folio); - return 1; + return 0; } /* * Attempt to migrate a misplaced folio to the specified destination - * node. Caller is expected to have an elevated reference count on - * the folio that will be dropped by this function before returning. + * node. Caller is expected to have isolated the folio by calling + * migrate_misplaced_folio_prepare(), which will result in an + * elevated reference count on the folio. This function will un-isolate the + * folio, dereferencing the folio before returning. */ int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma, int node) { pg_data_t *pgdat = NODE_DATA(node); - int isolated; int nr_remaining; unsigned int nr_succeeded; LIST_HEAD(migratepages); int nr_pages = folio_nr_pages(folio); - /* - * Don't migrate file folios that are mapped in multiple processes - * with execute permissions as they are probably shared libraries. - * - * See folio_likely_mapped_shared() on possible imprecision when we - * cannot easily detect if a folio is shared. - */ - if (folio_likely_mapped_shared(folio) && folio_is_file_lru(folio) && - (vma->vm_flags & VM_EXEC)) - goto out; - - /* - * Also do not migrate dirty folios as not all filesystems can move - * dirty folios in MIGRATE_ASYNC mode which is a waste of cycles. - */ - if (folio_is_file_lru(folio) && folio_test_dirty(folio)) - goto out; - - isolated = numamigrate_isolate_folio(pgdat, folio); - if (!isolated) - goto out; - list_add(&folio->lru, &migratepages); nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_folio, NULL, node, MIGRATE_ASYNC, @@ -2609,7 +2609,6 @@ int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma, folio_is_file_lru(folio), -nr_pages); folio_putback_lru(folio); } - isolated = 0; } if (nr_succeeded) { count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded); @@ -2618,11 +2617,7 @@ int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma, nr_succeeded); } BUG_ON(!list_empty(&migratepages)); - return isolated ? 0 : -EAGAIN; - -out: - folio_put(folio); - return -EAGAIN; + return nr_remaining ? -EAGAIN : 0; } #endif /* CONFIG_NUMA_BALANCING */ #endif /* CONFIG_NUMA */ |