summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--mm/kfence/core.c9
-rw-r--r--mm/slab.h48
2 files changed, 43 insertions, 14 deletions
diff --git a/mm/kfence/core.c b/mm/kfence/core.c
index 4eb60cf5ff8b..267dfde43b91 100644
--- a/mm/kfence/core.c
+++ b/mm/kfence/core.c
@@ -427,10 +427,11 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g
/* Set required slab fields. */
slab = virt_to_slab((void *)meta->addr);
slab->slab_cache = cache;
- if (IS_ENABLED(CONFIG_SLUB))
- slab->objects = 1;
- if (IS_ENABLED(CONFIG_SLAB))
- slab->s_mem = addr;
+#if defined(CONFIG_SLUB)
+ slab->objects = 1;
+#elif defined(CONFIG_SLAB)
+ slab->s_mem = addr;
+#endif
/* Memory initialization. */
for_each_canary(meta, set_canary_byte);
diff --git a/mm/slab.h b/mm/slab.h
index b8d7ed42cca8..ad15f624a6b5 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -8,9 +8,24 @@
/* Reuses the bits in struct page */
struct slab {
unsigned long __page_flags;
+
+#if defined(CONFIG_SLAB)
+
union {
struct list_head slab_list;
- struct { /* Partial pages */
+ struct rcu_head rcu_head;
+ };
+ struct kmem_cache *slab_cache;
+ void *freelist; /* array of free object indexes */
+ void *s_mem; /* first object */
+ unsigned int active;
+
+#elif defined(CONFIG_SLUB)
+
+ union {
+ struct list_head slab_list;
+ struct rcu_head rcu_head;
+ struct {
struct slab *next;
#ifdef CONFIG_64BIT
int slabs; /* Nr of slabs left */
@@ -18,25 +33,32 @@ struct slab {
short int slabs;
#endif
};
- struct rcu_head rcu_head;
};
- struct kmem_cache *slab_cache; /* not slob */
+ struct kmem_cache *slab_cache;
/* Double-word boundary */
void *freelist; /* first free object */
union {
- void *s_mem; /* slab: first object */
- unsigned long counters; /* SLUB */
- struct { /* SLUB */
+ unsigned long counters;
+ struct {
unsigned inuse:16;
unsigned objects:15;
unsigned frozen:1;
};
};
+ unsigned int __unused;
+
+#elif defined(CONFIG_SLOB)
+
+ struct list_head slab_list;
+ void *__unused_1;
+ void *freelist; /* first free block */
+ void *__unused_2;
+ int units;
+
+#else
+#error "Unexpected slab allocator configured"
+#endif
- union {
- unsigned int active; /* SLAB */
- int units; /* SLOB */
- };
atomic_t __page_refcount;
#ifdef CONFIG_MEMCG
unsigned long memcg_data;
@@ -48,10 +70,14 @@ struct slab {
SLAB_MATCH(flags, __page_flags);
SLAB_MATCH(compound_head, slab_list); /* Ensure bit 0 is clear */
SLAB_MATCH(slab_list, slab_list);
+#ifndef CONFIG_SLOB
SLAB_MATCH(rcu_head, rcu_head);
SLAB_MATCH(slab_cache, slab_cache);
+#endif
+#ifdef CONFIG_SLAB
SLAB_MATCH(s_mem, s_mem);
SLAB_MATCH(active, active);
+#endif
SLAB_MATCH(_refcount, __page_refcount);
#ifdef CONFIG_MEMCG
SLAB_MATCH(memcg_data, memcg_data);
@@ -599,6 +625,7 @@ static inline void memcg_slab_free_hook(struct kmem_cache *s,
}
#endif /* CONFIG_MEMCG_KMEM */
+#ifndef CONFIG_SLOB
static inline struct kmem_cache *virt_to_cache(const void *obj)
{
struct slab *slab;
@@ -645,6 +672,7 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
print_tracking(cachep, x);
return cachep;
}
+#endif /* CONFIG_SLOB */
static inline size_t slab_ksize(const struct kmem_cache *s)
{