summaryrefslogtreecommitdiffstats
path: root/mm/slab_common.c
diff options
context:
space:
mode:
authorFaiyaz Mohammed <faiyazm@codeaurora.org>2021-06-29 04:34:55 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2021-06-29 19:53:47 +0200
commit64dd68497be76ab4e237cca06f5324e220d0f050 (patch)
tree0fdae33947a31f0e26039ef8015aa271ddcc979c /mm/slab_common.c
parentslub: force on no_hash_pointers when slub_debug is enabled (diff)
downloadlinux-64dd68497be76ab4e237cca06f5324e220d0f050.tar.xz
linux-64dd68497be76ab4e237cca06f5324e220d0f050.zip
mm: slub: move sysfs slab alloc/free interfaces to debugfs
alloc_calls and free_calls implementation in sysfs have two issues, one is PAGE_SIZE limitation of sysfs and other is it does not adhere to "one value per file" rule. To overcome this issues, move the alloc_calls and free_calls implementation to debugfs. Debugfs cache will be created if SLAB_STORE_USER flag is set. Rename the alloc_calls/free_calls to alloc_traces/free_traces, to be inline with what it does. [faiyazm@codeaurora.org: fix the leak of alloc/free traces debugfs interface] Link: https://lkml.kernel.org/r/1624248060-30286-1-git-send-email-faiyazm@codeaurora.org Link: https://lkml.kernel.org/r/1623438200-19361-1-git-send-email-faiyazm@codeaurora.org Signed-off-by: Faiyaz Mohammed <faiyazm@codeaurora.org> Reviewed-by: Vlastimil Babka <vbabka@suse.cz> Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slab_common.c')
-rw-r--r--mm/slab_common.c2
1 files changed, 2 insertions, 0 deletions
diff --git a/mm/slab_common.c b/mm/slab_common.c
index b97b6fa8a7c6..6c0db9f9bd8a 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -448,6 +448,7 @@ static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
rcu_barrier();
list_for_each_entry_safe(s, s2, &to_destroy, list) {
+ debugfs_slab_release(s);
kfence_shutdown_cache(s);
#ifdef SLAB_SUPPORTS_SYSFS
sysfs_slab_release(s);
@@ -475,6 +476,7 @@ static int shutdown_cache(struct kmem_cache *s)
schedule_work(&slab_caches_to_rcu_destroy_work);
} else {
kfence_shutdown_cache(s);
+ debugfs_slab_release(s);
#ifdef SLAB_SUPPORTS_SYSFS
sysfs_slab_unlink(s);
sysfs_slab_release(s);