diff options
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 98 |
1 files changed, 37 insertions, 61 deletions
diff --git a/mm/slub.c b/mm/slub.c index b1281b8654bd..862dbd9af4f5 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3257,7 +3257,7 @@ void *__kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru, { void *ret = slab_alloc(s, lru, gfpflags, _RET_IP_, s->object_size); - trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size, + trace_kmem_cache_alloc(_RET_IP_, ret, s, s->object_size, s->size, gfpflags); return ret; @@ -3280,7 +3280,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_lru); void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) { void *ret = slab_alloc(s, NULL, gfpflags, _RET_IP_, size); - trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags); + trace_kmalloc(_RET_IP_, ret, s, size, s->size, gfpflags); ret = kasan_kmalloc(s, ret, size, gfpflags); return ret; } @@ -3292,7 +3292,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) { void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, s->object_size); - trace_kmem_cache_alloc_node(_RET_IP_, ret, + trace_kmem_cache_alloc_node(_RET_IP_, ret, s, s->object_size, s->size, gfpflags, node); return ret; @@ -3306,7 +3306,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *s, { void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, size); - trace_kmalloc_node(_RET_IP_, ret, + trace_kmalloc_node(_RET_IP_, ret, s, size, s->size, gfpflags, node); ret = kasan_kmalloc(s, ret, size, gfpflags); @@ -3464,9 +3464,6 @@ static __always_inline void do_slab_free(struct kmem_cache *s, struct kmem_cache_cpu *c; unsigned long tid; - /* memcg_slab_free_hook() is already called for bulk free. */ - if (!tail) - memcg_slab_free_hook(s, &head, 1); redo: /* * Determine the currently cpus per cpu slab. @@ -3526,9 +3523,10 @@ redo: } static __always_inline void slab_free(struct kmem_cache *s, struct slab *slab, - void *head, void *tail, int cnt, + void *head, void *tail, void **p, int cnt, unsigned long addr) { + memcg_slab_free_hook(s, slab, p, cnt); /* * With KASAN enabled slab_free_freelist_hook modifies the freelist * to remove objects, whose reuse must be delayed. @@ -3550,7 +3548,7 @@ void kmem_cache_free(struct kmem_cache *s, void *x) if (!s) return; trace_kmem_cache_free(_RET_IP_, x, s->name); - slab_free(s, virt_to_slab(x), x, NULL, 1, _RET_IP_); + slab_free(s, virt_to_slab(x), x, NULL, &x, 1, _RET_IP_); } EXPORT_SYMBOL(kmem_cache_free); @@ -3591,88 +3589,67 @@ static inline int build_detached_freelist(struct kmem_cache *s, size_t size, void **p, struct detached_freelist *df) { - size_t first_skipped_index = 0; int lookahead = 3; void *object; struct folio *folio; - struct slab *slab; - - /* Always re-init detached_freelist */ - df->slab = NULL; - - do { - object = p[--size]; - /* Do we need !ZERO_OR_NULL_PTR(object) here? (for kfree) */ - } while (!object && size); - - if (!object) - return 0; + size_t same; + object = p[--size]; folio = virt_to_folio(object); if (!s) { /* Handle kalloc'ed objects */ if (unlikely(!folio_test_slab(folio))) { free_large_kmalloc(folio, object); - p[size] = NULL; /* mark object processed */ + df->slab = NULL; return size; } /* Derive kmem_cache from object */ - slab = folio_slab(folio); - df->s = slab->slab_cache; + df->slab = folio_slab(folio); + df->s = df->slab->slab_cache; } else { - slab = folio_slab(folio); + df->slab = folio_slab(folio); df->s = cache_from_obj(s, object); /* Support for memcg */ } - if (is_kfence_address(object)) { - slab_free_hook(df->s, object, false); - __kfence_free(object); - p[size] = NULL; /* mark object processed */ - return size; - } - /* Start new detached freelist */ - df->slab = slab; - set_freepointer(df->s, object, NULL); df->tail = object; df->freelist = object; - p[size] = NULL; /* mark object processed */ df->cnt = 1; + if (is_kfence_address(object)) + return size; + + set_freepointer(df->s, object, NULL); + + same = size; while (size) { object = p[--size]; - if (!object) - continue; /* Skip processed objects */ - /* df->slab is always set at this point */ if (df->slab == virt_to_slab(object)) { /* Opportunity build freelist */ set_freepointer(df->s, object, df->freelist); df->freelist = object; df->cnt++; - p[size] = NULL; /* mark object processed */ - + same--; + if (size != same) + swap(p[size], p[same]); continue; } /* Limit look ahead search */ if (!--lookahead) break; - - if (!first_skipped_index) - first_skipped_index = size + 1; } - return first_skipped_index; + return same; } /* Note that interrupts must be enabled when calling this function. */ void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p) { - if (WARN_ON(!size)) + if (!size) return; - memcg_slab_free_hook(s, p, size); do { struct detached_freelist df; @@ -3680,7 +3657,8 @@ void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p) if (!df.slab) continue; - slab_free(df.s, df.slab, df.freelist, df.tail, df.cnt, _RET_IP_); + slab_free(df.s, df.slab, df.freelist, df.tail, &p[size], df.cnt, + _RET_IP_); } while (likely(size)); } EXPORT_SYMBOL(kmem_cache_free_bulk); @@ -3760,7 +3738,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, error: slub_put_cpu_ptr(s->cpu_slab); slab_post_alloc_hook(s, objcg, flags, i, p, false); - __kmem_cache_free_bulk(s, i, p); + kmem_cache_free_bulk(s, i, p); return 0; } EXPORT_SYMBOL(kmem_cache_alloc_bulk); @@ -4441,7 +4419,7 @@ void *__kmalloc(size_t size, gfp_t flags) ret = slab_alloc(s, NULL, flags, _RET_IP_, size); - trace_kmalloc(_RET_IP_, ret, size, s->size, flags); + trace_kmalloc(_RET_IP_, ret, s, size, s->size, flags); ret = kasan_kmalloc(s, ret, size, flags); @@ -4475,7 +4453,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) { ret = kmalloc_large_node(size, flags, node); - trace_kmalloc_node(_RET_IP_, ret, + trace_kmalloc_node(_RET_IP_, ret, NULL, size, PAGE_SIZE << get_order(size), flags, node); @@ -4489,7 +4467,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) ret = slab_alloc_node(s, NULL, flags, node, _RET_IP_, size); - trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node); + trace_kmalloc_node(_RET_IP_, ret, s, size, s->size, flags, node); ret = kasan_kmalloc(s, ret, size, flags); @@ -4581,7 +4559,7 @@ void kfree(const void *x) return; } slab = folio_slab(folio); - slab_free(slab->slab_cache, slab, object, NULL, 1, _RET_IP_); + slab_free(slab->slab_cache, slab, object, NULL, &object, 1, _RET_IP_); } EXPORT_SYMBOL(kfree); @@ -4890,6 +4868,9 @@ __kmem_cache_alias(const char *name, unsigned int size, unsigned int align, s = find_mergeable(size, align, flags, name, ctor); if (s) { + if (sysfs_slab_alias(s, name)) + return NULL; + s->refcount++; /* @@ -4898,11 +4879,6 @@ __kmem_cache_alias(const char *name, unsigned int size, unsigned int align, */ s->object_size = max(s->object_size, size); s->inuse = max(s->inuse, ALIGN(size, sizeof(void *))); - - if (sysfs_slab_alias(s, name)) { - s->refcount--; - s = NULL; - } } return s; @@ -4948,7 +4924,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) ret = slab_alloc(s, NULL, gfpflags, caller, size); /* Honor the call site pointer we received. */ - trace_kmalloc(caller, ret, size, s->size, gfpflags); + trace_kmalloc(caller, ret, s, size, s->size, gfpflags); return ret; } @@ -4964,7 +4940,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) { ret = kmalloc_large_node(size, gfpflags, node); - trace_kmalloc_node(caller, ret, + trace_kmalloc_node(caller, ret, NULL, size, PAGE_SIZE << get_order(size), gfpflags, node); @@ -4979,7 +4955,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, ret = slab_alloc_node(s, NULL, gfpflags, node, caller, size); /* Honor the call site pointer we received. */ - trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node); + trace_kmalloc_node(caller, ret, s, size, s->size, gfpflags, node); return ret; } |