summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2024-03-21 15:24:39 +0100
committerAndrew Morton <akpm@linux-foundation.org>2024-04-26 05:55:59 +0200
commitb7b098cf00a2b65d5654a86dc8edf82f125289c1 (patch)
treefb38a5cbb30ce4eb6ab9f8abaf3d2b280ab01e2c /mm
parentmm/slub: avoid recursive loop with kmemleak (diff)
downloadlinux-b7b098cf00a2b65d5654a86dc8edf82f125289c1.tar.xz
linux-b7b098cf00a2b65d5654a86dc8edf82f125289c1.zip
mm: always initialise folio->_deferred_list
Patch series "Various significant MM patches". These patches all interact in annoying ways which make it tricky to send them out in any way other than a big batch, even though there's not really an overarching theme to connect them. The big effects of this patch series are: - folio_test_hugetlb() becomes reliable, even when called without a page reference - We free up PG_slab, and we could always use more page flags - We no longer need to check PageSlab before calling page_mapcount() This patch (of 9): For compound pages which are at least order-2 (and hence have a deferred_list), initialise it and then we can check at free that the page is not part of a deferred list. We recently found this useful to rule out a source of corruption. [peterx@redhat.com: always initialise folio->_deferred_list] Link: https://lkml.kernel.org/r/20240417211836.2742593-2-peterx@redhat.com Link: https://lkml.kernel.org/r/20240321142448.1645400-1-willy@infradead.org Link: https://lkml.kernel.org/r/20240321142448.1645400-2-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Peter Xu <peterx@redhat.com> Reviewed-by: David Hildenbrand <david@redhat.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Miaohe Lin <linmiaohe@huawei.com> Cc: Muchun Song <muchun.song@linux.dev> Cc: Oscar Salvador <osalvador@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/huge_memory.c2
-rw-r--r--mm/hugetlb.c3
-rw-r--r--mm/internal.h2
-rw-r--r--mm/memcontrol.c3
-rw-r--r--mm/page_alloc.c9
5 files changed, 12 insertions, 7 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 25191ab70631..4cc7133aaa4b 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -793,8 +793,6 @@ void folio_prep_large_rmappable(struct folio *folio)
{
if (!folio || !folio_test_large(folio))
return;
- if (folio_order(folio) > 1)
- INIT_LIST_HEAD(&folio->_deferred_list);
folio_set_large_rmappable(folio);
}
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index ce7be5c24442..378181547b7b 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1796,7 +1796,8 @@ static void __update_and_free_hugetlb_folio(struct hstate *h,
destroy_compound_gigantic_folio(folio, huge_page_order(h));
free_gigantic_folio(folio, huge_page_order(h));
} else {
- __free_pages(&folio->page, huge_page_order(h));
+ INIT_LIST_HEAD(&folio->_deferred_list);
+ folio_put(folio);
}
}
diff --git a/mm/internal.h b/mm/internal.h
index 85c3db43454d..5c0c57c9cd19 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -525,6 +525,8 @@ static inline void prep_compound_head(struct page *page, unsigned int order)
atomic_set(&folio->_entire_mapcount, -1);
atomic_set(&folio->_nr_pages_mapped, 0);
atomic_set(&folio->_pincount, 0);
+ if (order > 1)
+ INIT_LIST_HEAD(&folio->_deferred_list);
}
static inline void prep_compound_tail(struct page *head, int tail_idx)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 896b4bf05b9c..45dd20901282 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -7400,6 +7400,9 @@ static void uncharge_folio(struct folio *folio, struct uncharge_gather *ug)
struct obj_cgroup *objcg;
VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
+ VM_BUG_ON_FOLIO(folio_order(folio) > 1 &&
+ !folio_test_hugetlb(folio) &&
+ !list_empty(&folio->_deferred_list), folio);
/*
* Nobody should be changing or seriously looking at
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index e1241ecef271..7e8f4b751801 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1007,10 +1007,11 @@ static int free_tail_page_prepare(struct page *head_page, struct page *page)
}
break;
case 2:
- /*
- * the second tail page: ->mapping is
- * deferred_list.next -- ignore value.
- */
+ /* the second tail page: deferred_list overlaps ->mapping */
+ if (unlikely(!list_empty(&folio->_deferred_list))) {
+ bad_page(page, "on deferred list");
+ goto out;
+ }
break;
default:
if (page->mapping != TAIL_MAPPING) {