diff options
author | Johannes Weiner <hannes@cmpxchg.org> | 2016-03-15 22:57:25 +0100 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-03-16 00:55:16 +0100 |
commit | fdf1cdb91b6ab7a8a91df68c384f36b8a0909cab (patch) | |
tree | 828ad38946acad9990dcc733a10ebdeed0c1fbd5 /mm/filemap.c | |
parent | mm: simplify lock_page_memcg() (diff) | |
download | linux-fdf1cdb91b6ab7a8a91df68c384f36b8a0909cab.tar.xz linux-fdf1cdb91b6ab7a8a91df68c384f36b8a0909cab.zip |
mm: remove unnecessary uses of lock_page_memcg()
There are several users that nest lock_page_memcg() inside lock_page()
to prevent page->mem_cgroup from changing. But the page lock prevents
pages from moving between cgroups, so that is unnecessary overhead.
Remove lock_page_memcg() in contexts with locked contexts and fix the
debug code in the page stat functions to be okay with the page lock.
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: Vladimir Davydov <vdavydov@virtuozzo.com>
Cc: Michal Hocko <mhocko@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/filemap.c')
-rw-r--r-- | mm/filemap.c | 7 |
1 files changed, 1 insertions, 6 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index 8e629c4ef0c8..61b441b191ad 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -176,8 +176,7 @@ static void page_cache_tree_delete(struct address_space *mapping, /* * Delete a page from the page cache and free it. Caller has to make * sure the page is locked and that nobody else uses it - or that usage - * is safe. The caller must hold the mapping's tree_lock and - * lock_page_memcg(). + * is safe. The caller must hold the mapping's tree_lock. */ void __delete_from_page_cache(struct page *page, void *shadow) { @@ -260,11 +259,9 @@ void delete_from_page_cache(struct page *page) freepage = mapping->a_ops->freepage; - lock_page_memcg(page); spin_lock_irqsave(&mapping->tree_lock, flags); __delete_from_page_cache(page, NULL); spin_unlock_irqrestore(&mapping->tree_lock, flags); - unlock_page_memcg(page); if (freepage) freepage(page); @@ -557,7 +554,6 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) new->mapping = mapping; new->index = offset; - lock_page_memcg(old); spin_lock_irqsave(&mapping->tree_lock, flags); __delete_from_page_cache(old, NULL); error = radix_tree_insert(&mapping->page_tree, offset, new); @@ -572,7 +568,6 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) if (PageSwapBacked(new)) __inc_zone_page_state(new, NR_SHMEM); spin_unlock_irqrestore(&mapping->tree_lock, flags); - unlock_page_memcg(old); mem_cgroup_migrate(old, new); radix_tree_preload_end(); if (freepage) |