diff options
author | Vlastimil Babka <vbabka@suse.cz> | 2023-04-05 16:28:40 +0200 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2023-04-19 01:29:54 +0200 |
commit | 8666925c498674426de44ecba79fd8bf42d3cda3 (patch) | |
tree | e6d624573dabf8cef00713fa4b6086065f84ccdd /mm/page_alloc.c | |
parent | mm/userfaultfd: don't consider uffd-wp bit of writable migration entries (diff) | |
download | linux-8666925c498674426de44ecba79fd8bf42d3cda3.tar.xz linux-8666925c498674426de44ecba79fd8bf42d3cda3.zip |
mm, page_alloc: use check_pages_enabled static key to check tail pages
Commit 700d2e9a36b9 ("mm, page_alloc: reduce page alloc/free sanity
checks") has introduced a new static key check_pages_enabled to control
when struct pages are sanity checked during allocation and freeing. Mel
Gorman suggested that free_tail_pages_check() could use this static key as
well, instead of relying on CONFIG_DEBUG_VM. That makes sense, so do
that. Also rename the function to free_tail_page_prepare() because it
works on a single tail page and has a struct page preparation component as
well as the optional checking component.
Also remove some unnecessary unlikely() within static_branch_unlikely()
statements that Mel pointed out for commit 700d2e9a36b9.
Link: https://lkml.kernel.org/r/20230405142840.11068-1-vbabka@suse.cz
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Suggested-by: Mel Gorman <mgorman@techsingularity.net>
Acked-by: Mel Gorman <mgorman@techsingularity.net>
Cc: Alexander Halbuer <halbuer@sra.uni-hannover.de>
Cc: Kees Cook <keescook@chromium.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 10 |
1 files changed, 5 insertions, 5 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 9c325e5e6b15..6da423ec356f 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1131,7 +1131,7 @@ static inline bool free_page_is_bad(struct page *page) return true; } -static int free_tail_pages_check(struct page *head_page, struct page *page) +static int free_tail_page_prepare(struct page *head_page, struct page *page) { struct folio *folio = (struct folio *)head_page; int ret = 1; @@ -1142,7 +1142,7 @@ static int free_tail_pages_check(struct page *head_page, struct page *page) */ BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1); - if (!IS_ENABLED(CONFIG_DEBUG_VM)) { + if (!static_branch_unlikely(&check_pages_enabled)) { ret = 0; goto out; } @@ -1276,9 +1276,9 @@ static __always_inline bool free_pages_prepare(struct page *page, ClearPageHasHWPoisoned(page); for (i = 1; i < (1 << order); i++) { if (compound) - bad += free_tail_pages_check(page, page + i); + bad += free_tail_page_prepare(page, page + i); if (is_check_pages_enabled()) { - if (unlikely(free_page_is_bad(page + i))) { + if (free_page_is_bad(page + i)) { bad++; continue; } @@ -1627,7 +1627,7 @@ static inline bool check_new_pages(struct page *page, unsigned int order) for (int i = 0; i < (1 << order); i++) { struct page *p = page + i; - if (unlikely(check_new_page(p))) + if (check_new_page(p)) return true; } } |