diff options
author | Vlastimil Babka <vbabka@suse.cz> | 2021-10-26 16:35:02 +0200 |
---|---|---|
committer | Vlastimil Babka <vbabka@suse.cz> | 2022-01-06 12:26:01 +0100 |
commit | 45387b8c14143623dfe905b5260836f9d62e1371 (patch) | |
tree | 3be8a7b7ab46f64f54abfdef833488defa4f8537 /mm/slub.c | |
parent | mm/slub: Convert print_page_info() to print_slab_info() (diff) | |
download | linux-45387b8c14143623dfe905b5260836f9d62e1371.tar.xz linux-45387b8c14143623dfe905b5260836f9d62e1371.zip |
mm/slub: Convert alloc_slab_page() to return a struct slab
Preparatory, callers convert back to struct page for now.
Also move setting page flags to alloc_slab_page() where we still operate
on a struct page. This means the page->slab_cache pointer is now set
later than the PageSlab flag, which could theoretically confuse some pfn
walker assuming PageSlab means there would be a valid cache pointer. But
as the code had no barriers and used __set_bit() anyway, it could have
happened already, so there shouldn't be such a walker.
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Reviewed-by: Roman Gushchin <guro@fb.com>
Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Tested-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 26 |
1 files changed, 16 insertions, 10 deletions
diff --git a/mm/slub.c b/mm/slub.c index d3e9e322b4e2..65cbdeae7edb 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1788,18 +1788,27 @@ static void *setup_object(struct kmem_cache *s, struct page *page, /* * Slab allocation and freeing */ -static inline struct page *alloc_slab_page(struct kmem_cache *s, +static inline struct slab *alloc_slab_page(struct kmem_cache *s, gfp_t flags, int node, struct kmem_cache_order_objects oo) { - struct page *page; + struct folio *folio; + struct slab *slab; unsigned int order = oo_order(oo); if (node == NUMA_NO_NODE) - page = alloc_pages(flags, order); + folio = (struct folio *)alloc_pages(flags, order); else - page = __alloc_pages_node(node, flags, order); + folio = (struct folio *)__alloc_pages_node(node, flags, order); - return page; + if (!folio) + return NULL; + + slab = folio_slab(folio); + __folio_set_slab(folio); + if (page_is_pfmemalloc(folio_page(folio, 0))) + slab_set_pfmemalloc(slab); + + return slab; } #ifdef CONFIG_SLAB_FREELIST_RANDOM @@ -1932,7 +1941,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min)) alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~(__GFP_RECLAIM|__GFP_NOFAIL); - page = alloc_slab_page(s, alloc_gfp, node, oo); + page = slab_page(alloc_slab_page(s, alloc_gfp, node, oo)); if (unlikely(!page)) { oo = s->min; alloc_gfp = flags; @@ -1940,7 +1949,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) * Allocation may have failed due to fragmentation. * Try a lower order alloc if possible */ - page = alloc_slab_page(s, alloc_gfp, node, oo); + page = slab_page(alloc_slab_page(s, alloc_gfp, node, oo)); if (unlikely(!page)) goto out; stat(s, ORDER_FALLBACK); @@ -1951,9 +1960,6 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) account_slab(page_slab(page), oo_order(oo), s, flags); page->slab_cache = s; - __SetPageSlab(page); - if (page_is_pfmemalloc(page)) - SetPageSlabPfmemalloc(page); kasan_poison_slab(page); |