summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVlastimil Babka <vbabka@suse.cz>2023-11-13 12:02:02 +0100
committerVlastimil Babka <vbabka@suse.cz>2023-12-06 11:57:21 +0100
commit5a9d31d980cbc9cefcee18e186bd4c5d51f3cba2 (patch)
treecaef0f3aa3511eb98524efc244677ca433bea096
parentmm/slab: move kfree() from slab_common.c to slub.c (diff)
downloadlinux-5a9d31d980cbc9cefcee18e186bd4c5d51f3cba2.tar.xz
linux-5a9d31d980cbc9cefcee18e186bd4c5d51f3cba2.zip
mm/slab: move kmalloc_slab() to mm/slab.h
In preparation for the next patch, move the kmalloc_slab() function to the header, as it will have callers from two files, and make it inline. To avoid unnecessary bloat, remove all size checks/warnings from kmalloc_slab() as they just duplicate those in callers, especially after recent changes to kmalloc_size_roundup(). We just need to adjust handling of zero size in __do_kmalloc_node(). Also we can stop handling NULL result from kmalloc_slab() there as that now cannot happen (unless called too early during boot). The size_index array becomes visible so rename it to a more specific kmalloc_size_index. Reviewed-by: Kees Cook <keescook@chromium.org> Acked-by: David Rientjes <rientjes@google.com> Tested-by: David Rientjes <rientjes@google.com> Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Tested-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
-rw-r--r--mm/slab.h28
-rw-r--r--mm/slab_common.c43
2 files changed, 34 insertions, 37 deletions
diff --git a/mm/slab.h b/mm/slab.h
index 35a55c4a407d..7d7cc7af614e 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -389,8 +389,32 @@ extern const struct kmalloc_info_struct {
void setup_kmalloc_cache_index_table(void);
void create_kmalloc_caches(slab_flags_t);
-/* Find the kmalloc slab corresponding for a certain size */
-struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags, unsigned long caller);
+extern u8 kmalloc_size_index[24];
+
+static inline unsigned int size_index_elem(unsigned int bytes)
+{
+ return (bytes - 1) / 8;
+}
+
+/*
+ * Find the kmem_cache structure that serves a given size of
+ * allocation
+ *
+ * This assumes size is larger than zero and not larger than
+ * KMALLOC_MAX_CACHE_SIZE and the caller must check that.
+ */
+static inline struct kmem_cache *
+kmalloc_slab(size_t size, gfp_t flags, unsigned long caller)
+{
+ unsigned int index;
+
+ if (size <= 192)
+ index = kmalloc_size_index[size_index_elem(size)];
+ else
+ index = fls(size - 1);
+
+ return kmalloc_caches[kmalloc_type(flags, caller)][index];
+}
void *__kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags,
int node, size_t orig_size,
diff --git a/mm/slab_common.c b/mm/slab_common.c
index f4f275613d2a..31ade17a7ad9 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -665,7 +665,7 @@ EXPORT_SYMBOL(random_kmalloc_seed);
* of two cache sizes there. The size of larger slabs can be determined using
* fls.
*/
-static u8 size_index[24] __ro_after_init = {
+u8 kmalloc_size_index[24] __ro_after_init = {
3, /* 8 */
4, /* 16 */
5, /* 24 */
@@ -692,33 +692,6 @@ static u8 size_index[24] __ro_after_init = {
2 /* 192 */
};
-static inline unsigned int size_index_elem(unsigned int bytes)
-{
- return (bytes - 1) / 8;
-}
-
-/*
- * Find the kmem_cache structure that serves a given size of
- * allocation
- */
-struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags, unsigned long caller)
-{
- unsigned int index;
-
- if (size <= 192) {
- if (!size)
- return ZERO_SIZE_PTR;
-
- index = size_index[size_index_elem(size)];
- } else {
- if (WARN_ON_ONCE(size > KMALLOC_MAX_CACHE_SIZE))
- return NULL;
- index = fls(size - 1);
- }
-
- return kmalloc_caches[kmalloc_type(flags, caller)][index];
-}
-
size_t kmalloc_size_roundup(size_t size)
{
if (size && size <= KMALLOC_MAX_CACHE_SIZE) {
@@ -843,9 +816,9 @@ void __init setup_kmalloc_cache_index_table(void)
for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
unsigned int elem = size_index_elem(i);
- if (elem >= ARRAY_SIZE(size_index))
+ if (elem >= ARRAY_SIZE(kmalloc_size_index))
break;
- size_index[elem] = KMALLOC_SHIFT_LOW;
+ kmalloc_size_index[elem] = KMALLOC_SHIFT_LOW;
}
if (KMALLOC_MIN_SIZE >= 64) {
@@ -854,7 +827,7 @@ void __init setup_kmalloc_cache_index_table(void)
* is 64 byte.
*/
for (i = 64 + 8; i <= 96; i += 8)
- size_index[size_index_elem(i)] = 7;
+ kmalloc_size_index[size_index_elem(i)] = 7;
}
@@ -865,7 +838,7 @@ void __init setup_kmalloc_cache_index_table(void)
* instead.
*/
for (i = 128 + 8; i <= 192; i += 8)
- size_index[size_index_elem(i)] = 8;
+ kmalloc_size_index[size_index_elem(i)] = 8;
}
}
@@ -977,10 +950,10 @@ void *__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller
return ret;
}
- s = kmalloc_slab(size, flags, caller);
+ if (unlikely(!size))
+ return ZERO_SIZE_PTR;
- if (unlikely(ZERO_OR_NULL_PTR(s)))
- return s;
+ s = kmalloc_slab(size, flags, caller);
ret = __kmem_cache_alloc_node(s, flags, node, size, caller);
ret = kasan_kmalloc(s, ret, size, flags);