diff options
author | Johannes Weiner <hannes@cmpxchg.org> | 2016-03-15 22:57:22 +0100 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-03-16 00:55:16 +0100 |
commit | 62cccb8c8e7a3ca233f49d5e7dcb1557d25465cd (patch) | |
tree | 43a902faf461c65393a4efebf9ff9622017b92b1 /fs/buffer.c | |
parent | mm: migrate: do not touch page->mem_cgroup of live pages (diff) | |
download | linux-62cccb8c8e7a3ca233f49d5e7dcb1557d25465cd.tar.xz linux-62cccb8c8e7a3ca233f49d5e7dcb1557d25465cd.zip |
mm: simplify lock_page_memcg()
Now that migration doesn't clear page->mem_cgroup of live pages anymore,
it's safe to make lock_page_memcg() and the memcg stat functions take
pages, and spare the callers from memcg objects.
[akpm@linux-foundation.org: fix warnings]
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Suggested-by: Vladimir Davydov <vdavydov@virtuozzo.com>
Acked-by: Vladimir Davydov <vdavydov@virtuozzo.com>
Cc: Michal Hocko <mhocko@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/buffer.c')
-rw-r--r-- | fs/buffer.c | 18 |
1 files changed, 8 insertions, 10 deletions
diff --git a/fs/buffer.c b/fs/buffer.c index dc991510bb06..33be29675358 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -624,14 +624,14 @@ EXPORT_SYMBOL(mark_buffer_dirty_inode); * The caller must hold lock_page_memcg(). */ static void __set_page_dirty(struct page *page, struct address_space *mapping, - struct mem_cgroup *memcg, int warn) + int warn) { unsigned long flags; spin_lock_irqsave(&mapping->tree_lock, flags); if (page->mapping) { /* Race with truncate? */ WARN_ON_ONCE(warn && !PageUptodate(page)); - account_page_dirtied(page, mapping, memcg); + account_page_dirtied(page, mapping); radix_tree_tag_set(&mapping->page_tree, page_index(page), PAGECACHE_TAG_DIRTY); } @@ -666,7 +666,6 @@ static void __set_page_dirty(struct page *page, struct address_space *mapping, int __set_page_dirty_buffers(struct page *page) { int newly_dirty; - struct mem_cgroup *memcg; struct address_space *mapping = page_mapping(page); if (unlikely(!mapping)) @@ -686,14 +685,14 @@ int __set_page_dirty_buffers(struct page *page) * Lock out page->mem_cgroup migration to keep PageDirty * synchronized with per-memcg dirty page counters. */ - memcg = lock_page_memcg(page); + lock_page_memcg(page); newly_dirty = !TestSetPageDirty(page); spin_unlock(&mapping->private_lock); if (newly_dirty) - __set_page_dirty(page, mapping, memcg, 1); + __set_page_dirty(page, mapping, 1); - unlock_page_memcg(memcg); + unlock_page_memcg(page); if (newly_dirty) __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); @@ -1167,15 +1166,14 @@ void mark_buffer_dirty(struct buffer_head *bh) if (!test_set_buffer_dirty(bh)) { struct page *page = bh->b_page; struct address_space *mapping = NULL; - struct mem_cgroup *memcg; - memcg = lock_page_memcg(page); + lock_page_memcg(page); if (!TestSetPageDirty(page)) { mapping = page_mapping(page); if (mapping) - __set_page_dirty(page, mapping, memcg, 0); + __set_page_dirty(page, mapping, 0); } - unlock_page_memcg(memcg); + unlock_page_memcg(page); if (mapping) __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); } |