summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorMike Rapoport (IBM) <rppt@kernel.org>2023-03-21 18:05:01 +0100
committerAndrew Morton <akpm@linux-foundation.org>2023-04-06 04:42:52 +0200
commitfce0b4213edb960859dcc65ea414c8efb11948e1 (patch)
treedba1b8507e6388651e617887388def25ed5256ee /mm/page_alloc.c
parentmips: fix comment about pgtable_init() (diff)
downloadlinux-fce0b4213edb960859dcc65ea414c8efb11948e1.tar.xz
linux-fce0b4213edb960859dcc65ea414c8efb11948e1.zip
mm/page_alloc: add helper for checking if check_pages_enabled
Instead of duplicating long static_branch_enabled(&check_pages_enabled) wrap it in a helper function is_check_pages_enabled() Link: https://lkml.kernel.org/r/20230321170513.2401534-3-rppt@kernel.org Signed-off-by: Mike Rapoport (IBM) <rppt@kernel.org> Reviewed-by: David Hildenbrand <david@redhat.com> Reviewed-by: Vlastimil Babka <vbabka@suse.cz> Cc: Doug Berger <opendmb@gmail.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Michal Hocko <mhocko@kernel.org> Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c11
1 files changed, 8 insertions, 3 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 4b09711b6f0f..33925488040f 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -245,6 +245,11 @@ EXPORT_SYMBOL(init_on_free);
/* perform sanity checks on struct pages being allocated or freed */
static DEFINE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled);
+static inline bool is_check_pages_enabled(void)
+{
+ return static_branch_unlikely(&check_pages_enabled);
+}
+
static bool _init_on_alloc_enabled_early __read_mostly
= IS_ENABLED(CONFIG_INIT_ON_ALLOC_DEFAULT_ON);
static int __init early_init_on_alloc(char *buf)
@@ -1450,7 +1455,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
for (i = 1; i < (1 << order); i++) {
if (compound)
bad += free_tail_pages_check(page, page + i);
- if (static_branch_unlikely(&check_pages_enabled)) {
+ if (is_check_pages_enabled()) {
if (unlikely(free_page_is_bad(page + i))) {
bad++;
continue;
@@ -1463,7 +1468,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
page->mapping = NULL;
if (memcg_kmem_online() && PageMemcgKmem(page))
__memcg_kmem_uncharge_page(page, order);
- if (static_branch_unlikely(&check_pages_enabled)) {
+ if (is_check_pages_enabled()) {
if (free_page_is_bad(page))
bad++;
if (bad)
@@ -2373,7 +2378,7 @@ static int check_new_page(struct page *page)
static inline bool check_new_pages(struct page *page, unsigned int order)
{
- if (static_branch_unlikely(&check_pages_enabled)) {
+ if (is_check_pages_enabled()) {
for (int i = 0; i < (1 << order); i++) {
struct page *p = page + i;