summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>2013-10-24 03:07:42 +0200
committerPekka Enberg <penberg@iki.fi>2013-10-24 19:17:31 +0200
commit68126702b419fd26ef4946e314bb3a1f57d3a53f (patch)
treeaf7acaf8d13921ab34271cb500d8454940a12e86 /include
parentslab: remove cachep in struct slab_rcu (diff)
downloadlinux-68126702b419fd26ef4946e314bb3a1f57d3a53f.tar.xz
linux-68126702b419fd26ef4946e314bb3a1f57d3a53f.zip
slab: overloading the RCU head over the LRU for RCU free
With build-time size checking, we can overload the RCU head over the LRU of struct page to free pages of a slab in rcu context. This really help to implement to overload the struct slab over the struct page and this eventually reduce memory usage and cache footprint of the SLAB. Acked-by: Andi Kleen <ak@linux.intel.com> Acked-by: Christoph Lameter <cl@linux.com> Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Pekka Enberg <penberg@iki.fi>
Diffstat (limited to 'include')
-rw-r--r--include/linux/mm_types.h3
-rw-r--r--include/linux/slab.h9
2 files changed, 11 insertions, 1 deletions
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index faf4b7c1ad12..959cb369b197 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -130,6 +130,9 @@ struct page {
struct list_head list; /* slobs list of pages */
struct slab *slab_page; /* slab fields */
+ struct rcu_head rcu_head; /* Used by SLAB
+ * when destroying via RCU
+ */
};
/* Remainder is not double word aligned */
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 6c5cc0ea8713..caaad51fee1f 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -51,7 +51,14 @@
* }
* rcu_read_unlock();
*
- * See also the comment on struct slab_rcu in mm/slab.c.
+ * This is useful if we need to approach a kernel structure obliquely,
+ * from its address obtained without the usual locking. We can lock
+ * the structure to stabilize it and check it's still at the given address,
+ * only if we can be sure that the memory has not been meanwhile reused
+ * for some other kind of object (which our subsystem's lock might corrupt).
+ *
+ * rcu_read_lock before reading the address, then rcu_read_unlock after
+ * taking the spinlock within the structure expected at that address.
*/
#define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */
#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */