diff options
author | KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> | 2008-02-07 09:14:17 +0100 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2008-02-07 17:42:20 +0100 |
commit | 217bc3194d57150549e9234e6ddfee30de28cc78 (patch) | |
tree | 1de36e60115164b9c3d86b176ce45989cafbc2f2 /mm | |
parent | memory cgroup enhancements: force_empty interface for dropping all account in... (diff) | |
download | linux-217bc3194d57150549e9234e6ddfee30de28cc78.tar.xz linux-217bc3194d57150549e9234e6ddfee30de28cc78.zip |
memory cgroup enhancements: remember "a page is charged as page cache"
Add a flag to page_cgroup to remember "this page is
charged as cache."
cache here includes page caches and swap cache.
This is useful for implementing precise accounting in memory cgroup.
TODO:
distinguish page-cache and swap-cache
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: YAMAMOTO Takashi <yamamoto@valinux.co.jp>
Cc: Balbir Singh <balbir@linux.vnet.ibm.com>
Cc: Pavel Emelianov <xemul@openvz.org>
Cc: Paul Menage <menage@google.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Kirill Korotaev <dev@sw.ru>
Cc: Herbert Poetzl <herbert@13thfloor.at>
Cc: David Rientjes <rientjes@google.com>
Cc: Vaidyanathan Srinivasan <svaidy@linux.vnet.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memcontrol.c | 24 |
1 files changed, 21 insertions, 3 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index c867612d9c04..975e89935d52 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -83,7 +83,9 @@ struct page_cgroup { struct mem_cgroup *mem_cgroup; atomic_t ref_cnt; /* Helpful when pages move b/w */ /* mapped and cached states */ + int flags; }; +#define PAGE_CGROUP_FLAG_CACHE (0x1) /* charged as cache */ enum { MEM_CGROUP_TYPE_UNSPEC = 0, @@ -93,6 +95,11 @@ enum { MEM_CGROUP_TYPE_MAX, }; +enum charge_type { + MEM_CGROUP_CHARGE_TYPE_CACHE = 0, + MEM_CGROUP_CHARGE_TYPE_MAPPED, +}; + static struct mem_cgroup init_mem_cgroup; static inline @@ -306,8 +313,8 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, * 0 if the charge was successful * < 0 if the cgroup is over its limit */ -int mem_cgroup_charge(struct page *page, struct mm_struct *mm, - gfp_t gfp_mask) +static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, + gfp_t gfp_mask, enum charge_type ctype) { struct mem_cgroup *mem; struct page_cgroup *pc; @@ -409,6 +416,9 @@ noreclaim: atomic_set(&pc->ref_cnt, 1); pc->mem_cgroup = mem; pc->page = page; + pc->flags = 0; + if (ctype == MEM_CGROUP_CHARGE_TYPE_CACHE) + pc->flags |= PAGE_CGROUP_FLAG_CACHE; if (page_cgroup_assign_new_page_cgroup(page, pc)) { /* * an another charge is added to this page already. @@ -433,6 +443,13 @@ err: return -ENOMEM; } +int mem_cgroup_charge(struct page *page, struct mm_struct *mm, + gfp_t gfp_mask) +{ + return mem_cgroup_charge_common(page, mm, gfp_mask, + MEM_CGROUP_CHARGE_TYPE_MAPPED); +} + /* * See if the cached pages should be charged at all? */ @@ -445,7 +462,8 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, mem = rcu_dereference(mm->mem_cgroup); if (mem->control_type == MEM_CGROUP_TYPE_ALL) - return mem_cgroup_charge(page, mm, gfp_mask); + return mem_cgroup_charge_common(page, mm, gfp_mask, + MEM_CGROUP_CHARGE_TYPE_CACHE); else return 0; } |