summaryrefslogtreecommitdiffstats
path: root/mm/migrate.c
diff options
context:
space:
mode:
authorYang Shi <shy828301@gmail.com>2020-12-15 04:13:09 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2020-12-15 21:13:45 +0100
commitc77c5cbafe549eb330e8909861a3e16cbda2c848 (patch)
tree33783592b24532cdb051af449046c1f3cd069ca8 /mm/migrate.c
parentmm: migrate: simplify the logic for handling permanent failure (diff)
downloadlinux-c77c5cbafe549eb330e8909861a3e16cbda2c848.tar.xz
linux-c77c5cbafe549eb330e8909861a3e16cbda2c848.zip
mm: migrate: skip shared exec THP for NUMA balancing
The NUMA balancing skip shared exec base page. Since CONFIG_READ_ONLY_THP_FOR_FS was introduced, there are probably shared exec THP, so skip such THPs for NUMA balancing as well. And Willy's regular filesystem THP support patches could create shared exec THP wven without that config. In addition, the page_is_file_lru() is used to tell if the page is file cache or not, but it filters out shmem page. It sounds like a typical usecase by putting executables in shmem to achieve performance gain via using shmem-THP, so it sounds worth skipping migration for such case too. Link: https://lkml.kernel.org/r/20201113205359.556831-4-shy828301@gmail.com Signed-off-by: Yang Shi <shy828301@gmail.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Jan Kara <jack@suse.cz> Cc: Mel Gorman <mgorman@suse.de> Cc: Michal Hocko <mhocko@suse.com> Cc: Song Liu <songliubraving@fb.com> Cc: Zi Yan <ziy@nvidia.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to '')
-rw-r--r--mm/migrate.c18
1 files changed, 16 insertions, 2 deletions
diff --git a/mm/migrate.c b/mm/migrate.c
index 47aa50d263c2..a7ccda55ca82 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -2071,6 +2071,17 @@ bool pmd_trans_migrating(pmd_t pmd)
return PageLocked(page);
}
+static inline bool is_shared_exec_page(struct vm_area_struct *vma,
+ struct page *page)
+{
+ if (page_mapcount(page) != 1 &&
+ (page_is_file_lru(page) || vma_is_shmem(vma)) &&
+ (vma->vm_flags & VM_EXEC))
+ return true;
+
+ return false;
+}
+
/*
* Attempt to migrate a misplaced page to the specified destination
* node. Caller is expected to have an elevated reference count on
@@ -2088,8 +2099,7 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
* Don't migrate file pages that are mapped in multiple processes
* with execute permissions as they are probably shared libraries.
*/
- if (page_mapcount(page) != 1 && page_is_file_lru(page) &&
- (vma->vm_flags & VM_EXEC))
+ if (is_shared_exec_page(vma, page))
goto out;
/*
@@ -2144,6 +2154,9 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
int page_lru = page_is_file_lru(page);
unsigned long start = address & HPAGE_PMD_MASK;
+ if (is_shared_exec_page(vma, page))
+ goto out;
+
new_page = alloc_pages_node(node,
(GFP_TRANSHUGE_LIGHT | __GFP_THISNODE),
HPAGE_PMD_ORDER);
@@ -2255,6 +2268,7 @@ out_fail:
out_unlock:
unlock_page(page);
+out:
put_page(page);
return 0;
}