From 4248d0083ec5817eebfb916c54950d100b3468ee Mon Sep 17 00:00:00 2001 From: Longlong Xia Date: Fri, 14 Apr 2023 10:17:41 +0800 Subject: mm: ksm: support hwpoison for ksm page hwpoison_user_mappings() is updated to support ksm pages, and add collect_procs_ksm() to collect processes when the error hit an ksm page. The difference from collect_procs_anon() is that it also needs to traverse the rmap-item list on the stable node of the ksm page. At the same time, add_to_kill_ksm() is added to handle ksm pages. And task_in_to_kill_list() is added to avoid duplicate addition of tsk to the to_kill list. This is because when scanning the list, if the pages that make up the ksm page all come from the same process, they may be added repeatedly. Link: https://lkml.kernel.org/r/20230414021741.2597273-3-xialonglong1@huawei.com Signed-off-by: Longlong Xia Tested-by: Naoya Horiguchi Reviewed-by: Kefeng Wang Cc: Miaohe Lin Cc: Nanyong Sun Signed-off-by: Andrew Morton --- mm/ksm.c | 45 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) (limited to 'mm/ksm.c') diff --git a/mm/ksm.c b/mm/ksm.c index 290a3eb6d8de..37c63310bc4e 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -2738,6 +2738,51 @@ again: goto again; } +#ifdef CONFIG_MEMORY_FAILURE +/* + * Collect processes when the error hit an ksm page. + */ +void collect_procs_ksm(struct page *page, struct list_head *to_kill, + int force_early) +{ + struct ksm_stable_node *stable_node; + struct ksm_rmap_item *rmap_item; + struct folio *folio = page_folio(page); + struct vm_area_struct *vma; + struct task_struct *tsk; + + stable_node = folio_stable_node(folio); + if (!stable_node) + return; + hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) { + struct anon_vma *av = rmap_item->anon_vma; + + anon_vma_lock_read(av); + read_lock(&tasklist_lock); + for_each_process(tsk) { + struct anon_vma_chain *vmac; + unsigned long addr; + struct task_struct *t = + task_early_kill(tsk, force_early); + if (!t) + continue; + anon_vma_interval_tree_foreach(vmac, &av->rb_root, 0, + ULONG_MAX) + { + vma = vmac->vma; + if (vma->vm_mm == t->mm) { + addr = rmap_item->address & PAGE_MASK; + add_to_kill_ksm(t, page, vma, to_kill, + addr); + } + } + } + read_unlock(&tasklist_lock); + anon_vma_unlock_read(av); + } +} +#endif + #ifdef CONFIG_MIGRATION void folio_migrate_ksm(struct folio *newfolio, struct folio *folio) { -- cgit v1.2.3