summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2021-10-04 15:45:59 +0200
committerVlastimil Babka <vbabka@suse.cz>2022-01-06 12:25:57 +0100
commitd835eef4fc26241258c524cb32b91cbb3551ca62 (patch)
tree01d3fd83a25c54168fbce44ec5fb64dd68435030
parentmm/slub: Convert detached_freelist to use a struct slab (diff)
downloadlinux-d835eef4fc26241258c524cb32b91cbb3551ca62.tar.xz
linux-d835eef4fc26241258c524cb32b91cbb3551ca62.zip
mm/slub: Convert kfree() to use a struct slab
Convert kfree(), kmem_cache_free() and ___cache_free() to resolve object addresses to struct slab, using folio as intermediate step where needed. Keep passing the result as struct page for now in preparation for mass conversion of internal functions. [ vbabka@suse.cz: Use folio as intermediate step when checking for large kmalloc pages, and when freeing them - rename free_nonslab_page() to free_large_kmalloc() that takes struct folio ] Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Reviewed-by: Roman Gushchin <guro@fb.com>
-rw-r--r--mm/slub.c29
1 files changed, 16 insertions, 13 deletions
diff --git a/mm/slub.c b/mm/slub.c
index a45b74d2712f..acf2608a57c5 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3517,7 +3517,7 @@ static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
#ifdef CONFIG_KASAN_GENERIC
void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr)
{
- do_slab_free(cache, virt_to_head_page(x), x, NULL, 1, addr);
+ do_slab_free(cache, slab_page(virt_to_slab(x)), x, NULL, 1, addr);
}
#endif
@@ -3527,7 +3527,7 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
if (!s)
return;
trace_kmem_cache_free(_RET_IP_, x, s->name);
- slab_free(s, virt_to_head_page(x), x, NULL, 1, _RET_IP_);
+ slab_free(s, slab_page(virt_to_slab(x)), x, NULL, 1, _RET_IP_);
}
EXPORT_SYMBOL(kmem_cache_free);
@@ -3539,16 +3539,17 @@ struct detached_freelist {
struct kmem_cache *s;
};
-static inline void free_nonslab_page(struct page *page, void *object)
+static inline void free_large_kmalloc(struct folio *folio, void *object)
{
- unsigned int order = compound_order(page);
+ unsigned int order = folio_order(folio);
- if (WARN_ON_ONCE(!PageCompound(page)))
+ if (WARN_ON_ONCE(order == 0))
pr_warn_once("object pointer: 0x%p\n", object);
kfree_hook(object);
- mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B, -(PAGE_SIZE << order));
- __free_pages(page, order);
+ mod_lruvec_page_state(folio_page(folio, 0), NR_SLAB_UNRECLAIMABLE_B,
+ -(PAGE_SIZE << order));
+ __free_pages(folio_page(folio, 0), order);
}
/*
@@ -3588,7 +3589,7 @@ int build_detached_freelist(struct kmem_cache *s, size_t size,
if (!s) {
/* Handle kalloc'ed objects */
if (unlikely(!folio_test_slab(folio))) {
- free_nonslab_page(folio_page(folio, 0), object);
+ free_large_kmalloc(folio, object);
p[size] = NULL; /* mark object processed */
return size;
}
@@ -4547,7 +4548,8 @@ EXPORT_SYMBOL(__ksize);
void kfree(const void *x)
{
- struct page *page;
+ struct folio *folio;
+ struct slab *slab;
void *object = (void *)x;
trace_kfree(_RET_IP_, x);
@@ -4555,12 +4557,13 @@ void kfree(const void *x)
if (unlikely(ZERO_OR_NULL_PTR(x)))
return;
- page = virt_to_head_page(x);
- if (unlikely(!PageSlab(page))) {
- free_nonslab_page(page, object);
+ folio = virt_to_folio(x);
+ if (unlikely(!folio_test_slab(folio))) {
+ free_large_kmalloc(folio, object);
return;
}
- slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_);
+ slab = folio_slab(folio);
+ slab_free(slab->slab_cache, slab_page(slab), object, NULL, 1, _RET_IP_);
}
EXPORT_SYMBOL(kfree);