summaryrefslogtreecommitdiffstats
path: root/mm/slab.h
diff options
context:
space:
mode:
authorVlastimil Babka <vbabka@suse.cz>2022-11-10 10:42:34 +0100
committerVlastimil Babka <vbabka@suse.cz>2022-11-21 10:36:09 +0100
commit76537db3b95cbf5d0189ce185c16db9f93017021 (patch)
treea09d81a797d52a9a64da0470ec25997b81d8499e /mm/slab.h
parentMerge branch 'slab/for-6.2/tools' into slab/for-next (diff)
parentmm/sl[au]b: rearrange struct slab fields to allow larger rcu_head (diff)
downloadlinux-76537db3b95cbf5d0189ce185c16db9f93017021.tar.xz
linux-76537db3b95cbf5d0189ce185c16db9f93017021.zip
Merge branch 'slab/for-6.2/fit_rcu_head' into slab/for-next
A series by myself to reorder fields in struct slab to allow the embedded rcu_head to grow (for debugging purposes). Requires changes to isolate_movable_page() to skip slab pages which can otherwise become false-positive __PageMovable due to its use of low bits in page->mapping.
Diffstat (limited to 'mm/slab.h')
-rw-r--r--mm/slab.h54
1 files changed, 32 insertions, 22 deletions
diff --git a/mm/slab.h b/mm/slab.h
index 19e1baac807c..8c4aafb00bd6 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -11,37 +11,43 @@ struct slab {
#if defined(CONFIG_SLAB)
+ struct kmem_cache *slab_cache;
union {
- struct list_head slab_list;
+ struct {
+ struct list_head slab_list;
+ void *freelist; /* array of free object indexes */
+ void *s_mem; /* first object */
+ };
struct rcu_head rcu_head;
};
- struct kmem_cache *slab_cache;
- void *freelist; /* array of free object indexes */
- void *s_mem; /* first object */
unsigned int active;
#elif defined(CONFIG_SLUB)
- union {
- struct list_head slab_list;
- struct rcu_head rcu_head;
-#ifdef CONFIG_SLUB_CPU_PARTIAL
- struct {
- struct slab *next;
- int slabs; /* Nr of slabs left */
- };
-#endif
- };
struct kmem_cache *slab_cache;
- /* Double-word boundary */
- void *freelist; /* first free object */
union {
- unsigned long counters;
struct {
- unsigned inuse:16;
- unsigned objects:15;
- unsigned frozen:1;
+ union {
+ struct list_head slab_list;
+#ifdef CONFIG_SLUB_CPU_PARTIAL
+ struct {
+ struct slab *next;
+ int slabs; /* Nr of slabs left */
+ };
+#endif
+ };
+ /* Double-word boundary */
+ void *freelist; /* first free object */
+ union {
+ unsigned long counters;
+ struct {
+ unsigned inuse:16;
+ unsigned objects:15;
+ unsigned frozen:1;
+ };
+ };
};
+ struct rcu_head rcu_head;
};
unsigned int __unused;
@@ -66,9 +72,10 @@ struct slab {
#define SLAB_MATCH(pg, sl) \
static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl))
SLAB_MATCH(flags, __page_flags);
-SLAB_MATCH(compound_head, slab_list); /* Ensure bit 0 is clear */
#ifndef CONFIG_SLOB
-SLAB_MATCH(rcu_head, rcu_head);
+SLAB_MATCH(compound_head, slab_cache); /* Ensure bit 0 is clear */
+#else
+SLAB_MATCH(compound_head, slab_list); /* Ensure bit 0 is clear */
#endif
SLAB_MATCH(_refcount, __page_refcount);
#ifdef CONFIG_MEMCG
@@ -76,6 +83,9 @@ SLAB_MATCH(memcg_data, memcg_data);
#endif
#undef SLAB_MATCH
static_assert(sizeof(struct slab) <= sizeof(struct page));
+#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && defined(CONFIG_SLUB)
+static_assert(IS_ALIGNED(offsetof(struct slab, freelist), 2*sizeof(void *)));
+#endif
/**
* folio_slab - Converts from folio to slab.