summaryrefslogtreecommitdiffstats
path: root/mm/slab_common.c
diff options
context:
space:
mode:
authorVlastimil Babka <vbabka@suse.cz>2024-09-13 11:13:03 +0200
committerVlastimil Babka <vbabka@suse.cz>2024-09-13 11:13:03 +0200
commitecc4d6af979b3bd4d239ff80bbba455c90d3f4f3 (patch)
treef42df8888b5032a54a5bf80721435ae9a31137d5 /mm/slab_common.c
parentMerge branch 'slab/for-6.12/rcu_barriers' into slab/for-next (diff)
parentmm, slab: restore kerneldoc for kmem_cache_create() (diff)
downloadlinux-ecc4d6af979b3bd4d239ff80bbba455c90d3f4f3.tar.xz
linux-ecc4d6af979b3bd4d239ff80bbba455c90d3f4f3.zip
Merge branch 'slab/for-6.12/kmem_cache_args' into slab/for-next
Merge kmem_cache_create() refactoring by Christian Brauner. Note this includes a merge of the vfs.file tree that contains the prerequisity kmem_cache_create_rcu() work.
Diffstat (limited to 'mm/slab_common.c')
-rw-r--r--mm/slab_common.c139
1 files changed, 43 insertions, 96 deletions
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 11ef221bce17..61f32420230a 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -215,32 +215,29 @@ struct kmem_cache *find_mergeable(unsigned int size, unsigned int align,
}
static struct kmem_cache *create_cache(const char *name,
- unsigned int object_size, unsigned int align,
- slab_flags_t flags, unsigned int useroffset,
- unsigned int usersize, void (*ctor)(void *),
- struct kmem_cache *root_cache)
+ unsigned int object_size,
+ struct kmem_cache_args *args,
+ slab_flags_t flags)
{
struct kmem_cache *s;
int err;
- if (WARN_ON(useroffset + usersize > object_size))
- useroffset = usersize = 0;
+ if (WARN_ON(args->useroffset + args->usersize > object_size))
+ args->useroffset = args->usersize = 0;
+
+ /* If a custom freelist pointer is requested make sure it's sane. */
+ err = -EINVAL;
+ if (args->use_freeptr_offset &&
+ (args->freeptr_offset >= object_size ||
+ !(flags & SLAB_TYPESAFE_BY_RCU) ||
+ !IS_ALIGNED(args->freeptr_offset, sizeof(freeptr_t))))
+ goto out;
err = -ENOMEM;
s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
if (!s)
goto out;
-
- s->name = name;
- s->size = s->object_size = object_size;
- s->align = align;
- s->ctor = ctor;
-#ifdef CONFIG_HARDENED_USERCOPY
- s->useroffset = useroffset;
- s->usersize = usersize;
-#endif
-
- err = __kmem_cache_create(s, flags);
+ err = do_kmem_cache_create(s, name, object_size, args, flags);
if (err)
goto out_free_cache;
@@ -255,39 +252,24 @@ out:
}
/**
- * kmem_cache_create_usercopy - Create a cache with a region suitable
- * for copying to userspace
+ * __kmem_cache_create_args - Create a kmem cache.
* @name: A string which is used in /proc/slabinfo to identify this cache.
- * @size: The size of objects to be created in this cache.
- * @align: The required alignment for the objects.
- * @flags: SLAB flags
- * @useroffset: Usercopy region offset
- * @usersize: Usercopy region size
- * @ctor: A constructor for the objects.
+ * @object_size: The size of objects to be created in this cache.
+ * @args: Additional arguments for the cache creation (see
+ * &struct kmem_cache_args).
+ * @flags: See %SLAB_* flags for an explanation of individual @flags.
*
- * Cannot be called within a interrupt, but can be interrupted.
- * The @ctor is run when new pages are allocated by the cache.
+ * Not to be called directly, use the kmem_cache_create() wrapper with the same
+ * parameters.
*
- * The flags are
- *
- * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
- * to catch references to uninitialised memory.
- *
- * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check
- * for buffer overruns.
- *
- * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
- * cacheline. This can be beneficial if you're counting cycles as closely
- * as davem.
+ * Context: Cannot be called within a interrupt, but can be interrupted.
*
* Return: a pointer to the cache on success, NULL on failure.
*/
-struct kmem_cache *
-kmem_cache_create_usercopy(const char *name,
- unsigned int size, unsigned int align,
- slab_flags_t flags,
- unsigned int useroffset, unsigned int usersize,
- void (*ctor)(void *))
+struct kmem_cache *__kmem_cache_create_args(const char *name,
+ unsigned int object_size,
+ struct kmem_cache_args *args,
+ slab_flags_t flags)
{
struct kmem_cache *s = NULL;
const char *cache_name;
@@ -309,7 +291,7 @@ kmem_cache_create_usercopy(const char *name,
mutex_lock(&slab_mutex);
- err = kmem_cache_sanity_check(name, size);
+ err = kmem_cache_sanity_check(name, object_size);
if (err) {
goto out_unlock;
}
@@ -330,12 +312,14 @@ kmem_cache_create_usercopy(const char *name,
/* Fail closed on bad usersize of useroffset values. */
if (!IS_ENABLED(CONFIG_HARDENED_USERCOPY) ||
- WARN_ON(!usersize && useroffset) ||
- WARN_ON(size < usersize || size - usersize < useroffset))
- usersize = useroffset = 0;
-
- if (!usersize)
- s = __kmem_cache_alias(name, size, align, flags, ctor);
+ WARN_ON(!args->usersize && args->useroffset) ||
+ WARN_ON(object_size < args->usersize ||
+ object_size - args->usersize < args->useroffset))
+ args->usersize = args->useroffset = 0;
+
+ if (!args->usersize)
+ s = __kmem_cache_alias(name, object_size, args->align, flags,
+ args->ctor);
if (s)
goto out_unlock;
@@ -345,9 +329,8 @@ kmem_cache_create_usercopy(const char *name,
goto out_unlock;
}
- s = create_cache(cache_name, size,
- calculate_alignment(flags, align, size),
- flags, useroffset, usersize, ctor, NULL);
+ args->align = calculate_alignment(flags, args->align, object_size);
+ s = create_cache(cache_name, object_size, args, flags);
if (IS_ERR(s)) {
err = PTR_ERR(s);
kfree_const(cache_name);
@@ -369,41 +352,7 @@ out_unlock:
}
return s;
}
-EXPORT_SYMBOL(kmem_cache_create_usercopy);
-
-/**
- * kmem_cache_create - Create a cache.
- * @name: A string which is used in /proc/slabinfo to identify this cache.
- * @size: The size of objects to be created in this cache.
- * @align: The required alignment for the objects.
- * @flags: SLAB flags
- * @ctor: A constructor for the objects.
- *
- * Cannot be called within a interrupt, but can be interrupted.
- * The @ctor is run when new pages are allocated by the cache.
- *
- * The flags are
- *
- * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
- * to catch references to uninitialised memory.
- *
- * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check
- * for buffer overruns.
- *
- * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
- * cacheline. This can be beneficial if you're counting cycles as closely
- * as davem.
- *
- * Return: a pointer to the cache on success, NULL on failure.
- */
-struct kmem_cache *
-kmem_cache_create(const char *name, unsigned int size, unsigned int align,
- slab_flags_t flags, void (*ctor)(void *))
-{
- return kmem_cache_create_usercopy(name, size, align, flags, 0, 0,
- ctor);
-}
-EXPORT_SYMBOL(kmem_cache_create);
+EXPORT_SYMBOL(__kmem_cache_create_args);
static struct kmem_cache *kmem_buckets_cache __ro_after_init;
@@ -689,9 +638,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name,
{
int err;
unsigned int align = ARCH_KMALLOC_MINALIGN;
-
- s->name = name;
- s->size = s->object_size = size;
+ struct kmem_cache_args kmem_args = {};
/*
* kmalloc caches guarantee alignment of at least the largest
@@ -700,14 +647,14 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name,
*/
if (flags & SLAB_KMALLOC)
align = max(align, 1U << (ffs(size) - 1));
- s->align = calculate_alignment(flags, align, size);
+ kmem_args.align = calculate_alignment(flags, align, size);
#ifdef CONFIG_HARDENED_USERCOPY
- s->useroffset = useroffset;
- s->usersize = usersize;
+ kmem_args.useroffset = useroffset;
+ kmem_args.usersize = usersize;
#endif
- err = __kmem_cache_create(s, flags);
+ err = do_kmem_cache_create(s, name, size, &kmem_args, flags);
if (err)
panic("Creation of kmalloc slab %s size=%u failed. Reason %d\n",