summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2020-06-04 01:02:24 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2020-06-04 05:09:48 +0200
commitd9eb1ea2bf8734afd8ec7d995270437a7242f82b (patch)
tree69658ef3d46c9e2c411a5a4771133de89f39dee1
parentmm: memcontrol: document the new swap control behavior (diff)
downloadlinux-d9eb1ea2bf8734afd8ec7d995270437a7242f82b.tar.xz
linux-d9eb1ea2bf8734afd8ec7d995270437a7242f82b.zip
mm: memcontrol: delete unused lrucare handling
Swapin faults were the last event to charge pages after they had already been put on the LRU list. Now that we charge directly on swapin, the lrucare portion of the charge code is unused. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Reviewed-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Alex Shi <alex.shi@linux.alibaba.com> Cc: Hugh Dickins <hughd@google.com> Cc: "Kirill A. Shutemov" <kirill@shutemov.name> Cc: Michal Hocko <mhocko@suse.com> Cc: Roman Gushchin <guro@fb.com> Cc: Balbir Singh <bsingharora@gmail.com> Cc: Shakeel Butt <shakeelb@google.com> Link: http://lkml.kernel.org/r/20200508183105.225460-19-hannes@cmpxchg.org Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/memcontrol.h5
-rw-r--r--kernel/events/uprobes.c3
-rw-r--r--mm/filemap.c2
-rw-r--r--mm/huge_memory.c2
-rw-r--r--mm/khugepaged.c4
-rw-r--r--mm/memcontrol.c57
-rw-r--r--mm/memory.c8
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/shmem.c2
-rw-r--r--mm/swap_state.c2
-rw-r--r--mm/userfaultfd.c2
11 files changed, 19 insertions, 70 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 96257f995caa..d5bf3b5bfe6d 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -355,8 +355,7 @@ static inline unsigned long mem_cgroup_protection(struct mem_cgroup *memcg,
enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
struct mem_cgroup *memcg);
-int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask,
- bool lrucare);
+int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask);
void mem_cgroup_uncharge(struct page *page);
void mem_cgroup_uncharge_list(struct list_head *page_list);
@@ -839,7 +838,7 @@ static inline enum mem_cgroup_protection mem_cgroup_protected(
}
static inline int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
- gfp_t gfp_mask, bool lrucare)
+ gfp_t gfp_mask)
{
return 0;
}
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 4253c153e985..eddc8db96027 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -167,8 +167,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
addr + PAGE_SIZE);
if (new_page) {
- err = mem_cgroup_charge(new_page, vma->vm_mm, GFP_KERNEL,
- false);
+ err = mem_cgroup_charge(new_page, vma->vm_mm, GFP_KERNEL);
if (err)
return err;
}
diff --git a/mm/filemap.c b/mm/filemap.c
index f08b0ca34e31..455990621989 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -845,7 +845,7 @@ static int __add_to_page_cache_locked(struct page *page,
page->index = offset;
if (!huge) {
- error = mem_cgroup_charge(page, current->mm, gfp_mask, false);
+ error = mem_cgroup_charge(page, current->mm, gfp_mask);
if (error)
goto error;
}
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index e9201a88157e..6df182a18d2c 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -593,7 +593,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
VM_BUG_ON_PAGE(!PageCompound(page), page);
- if (mem_cgroup_charge(page, vma->vm_mm, gfp, false)) {
+ if (mem_cgroup_charge(page, vma->vm_mm, gfp)) {
put_page(page);
count_vm_event(THP_FAULT_FALLBACK);
count_vm_event(THP_FAULT_FALLBACK_CHARGE);
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 32c85b81837a..f29038c485e0 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1059,7 +1059,7 @@ static void collapse_huge_page(struct mm_struct *mm,
goto out_nolock;
}
- if (unlikely(mem_cgroup_charge(new_page, mm, gfp, false))) {
+ if (unlikely(mem_cgroup_charge(new_page, mm, gfp))) {
result = SCAN_CGROUP_CHARGE_FAIL;
goto out_nolock;
}
@@ -1632,7 +1632,7 @@ static void collapse_file(struct mm_struct *mm,
goto out;
}
- if (unlikely(mem_cgroup_charge(new_page, mm, gfp, false))) {
+ if (unlikely(mem_cgroup_charge(new_page, mm, gfp))) {
result = SCAN_CGROUP_CHARGE_FAIL;
goto out;
}
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 425a265dc2a0..316a84025090 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2655,51 +2655,9 @@ static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
}
#endif
-static void lock_page_lru(struct page *page, int *isolated)
+static void commit_charge(struct page *page, struct mem_cgroup *memcg)
{
- pg_data_t *pgdat = page_pgdat(page);
-
- spin_lock_irq(&pgdat->lru_lock);
- if (PageLRU(page)) {
- struct lruvec *lruvec;
-
- lruvec = mem_cgroup_page_lruvec(page, pgdat);
- ClearPageLRU(page);
- del_page_from_lru_list(page, lruvec, page_lru(page));
- *isolated = 1;
- } else
- *isolated = 0;
-}
-
-static void unlock_page_lru(struct page *page, int isolated)
-{
- pg_data_t *pgdat = page_pgdat(page);
-
- if (isolated) {
- struct lruvec *lruvec;
-
- lruvec = mem_cgroup_page_lruvec(page, pgdat);
- VM_BUG_ON_PAGE(PageLRU(page), page);
- SetPageLRU(page);
- add_page_to_lru_list(page, lruvec, page_lru(page));
- }
- spin_unlock_irq(&pgdat->lru_lock);
-}
-
-static void commit_charge(struct page *page, struct mem_cgroup *memcg,
- bool lrucare)
-{
- int isolated;
-
VM_BUG_ON_PAGE(page->mem_cgroup, page);
-
- /*
- * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
- * may already be on some other mem_cgroup's LRU. Take care of it.
- */
- if (lrucare)
- lock_page_lru(page, &isolated);
-
/*
* Nobody should be changing or seriously looking at
* page->mem_cgroup at this point:
@@ -2715,9 +2673,6 @@ static void commit_charge(struct page *page, struct mem_cgroup *memcg,
* have the page locked
*/
page->mem_cgroup = memcg;
-
- if (lrucare)
- unlock_page_lru(page, isolated);
}
#ifdef CONFIG_MEMCG_KMEM
@@ -6503,22 +6458,18 @@ out:
* @page: page to charge
* @mm: mm context of the victim
* @gfp_mask: reclaim mode
- * @lrucare: page might be on the LRU already
*
* Try to charge @page to the memcg that @mm belongs to, reclaiming
* pages according to @gfp_mask if necessary.
*
* Returns 0 on success. Otherwise, an error code is returned.
*/
-int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask,
- bool lrucare)
+int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
{
unsigned int nr_pages = hpage_nr_pages(page);
struct mem_cgroup *memcg = NULL;
int ret = 0;
- VM_BUG_ON_PAGE(PageLRU(page) && !lrucare, page);
-
if (mem_cgroup_disabled())
goto out;
@@ -6552,7 +6503,7 @@ int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask,
if (ret)
goto out_put;
- commit_charge(page, memcg, lrucare);
+ commit_charge(page, memcg);
local_irq_disable();
mem_cgroup_charge_statistics(memcg, page, nr_pages);
@@ -6753,7 +6704,7 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
page_counter_charge(&memcg->memsw, nr_pages);
css_get_many(&memcg->css, nr_pages);
- commit_charge(newpage, memcg, false);
+ commit_charge(newpage, memcg);
local_irq_save(flags);
mem_cgroup_charge_statistics(memcg, newpage, nr_pages);
diff --git a/mm/memory.c b/mm/memory.c
index 9c886e4207a2..d50d8b498af5 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2675,7 +2675,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
}
}
- if (mem_cgroup_charge(new_page, mm, GFP_KERNEL, false))
+ if (mem_cgroup_charge(new_page, mm, GFP_KERNEL))
goto oom_free_new;
cgroup_throttle_swaprate(new_page, GFP_KERNEL);
@@ -3134,7 +3134,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
/* Tell memcg to use swap ownership records */
SetPageSwapCache(page);
err = mem_cgroup_charge(page, vma->vm_mm,
- GFP_KERNEL, false);
+ GFP_KERNEL);
ClearPageSwapCache(page);
if (err)
goto out_page;
@@ -3358,7 +3358,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
if (!page)
goto oom;
- if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL, false))
+ if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL))
goto oom_free_page;
cgroup_throttle_swaprate(page, GFP_KERNEL);
@@ -3854,7 +3854,7 @@ static vm_fault_t do_cow_fault(struct vm_fault *vmf)
if (!vmf->cow_page)
return VM_FAULT_OOM;
- if (mem_cgroup_charge(vmf->cow_page, vma->vm_mm, GFP_KERNEL, false)) {
+ if (mem_cgroup_charge(vmf->cow_page, vma->vm_mm, GFP_KERNEL)) {
put_page(vmf->cow_page);
return VM_FAULT_OOM;
}
diff --git a/mm/migrate.c b/mm/migrate.c
index 44cee40221ec..7bfd0962149e 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -2786,7 +2786,7 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
if (unlikely(anon_vma_prepare(vma)))
goto abort;
- if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL, false))
+ if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL))
goto abort;
/*
diff --git a/mm/shmem.c b/mm/shmem.c
index b79116185046..e83de27ce8f4 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -624,7 +624,7 @@ static int shmem_add_to_page_cache(struct page *page,
page->index = index;
if (!PageSwapCache(page)) {
- error = mem_cgroup_charge(page, charge_mm, gfp, false);
+ error = mem_cgroup_charge(page, charge_mm, gfp);
if (error) {
if (PageTransHuge(page)) {
count_vm_event(THP_FILE_FALLBACK);
diff --git a/mm/swap_state.c b/mm/swap_state.c
index f841257a3014..ab0462819a5b 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -435,7 +435,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
goto fail_unlock;
}
- if (mem_cgroup_charge(page, NULL, gfp_mask, false)) {
+ if (mem_cgroup_charge(page, NULL, gfp_mask)) {
delete_from_swap_cache(page);
goto fail_unlock;
}
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index 2745489415cc..7f5194046b01 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -96,7 +96,7 @@ static int mcopy_atomic_pte(struct mm_struct *dst_mm,
__SetPageUptodate(page);
ret = -ENOMEM;
- if (mem_cgroup_charge(page, dst_mm, GFP_KERNEL, false))
+ if (mem_cgroup_charge(page, dst_mm, GFP_KERNEL))
goto out_release;
_dst_pte = pte_mkdirty(mk_pte(page, dst_vma->vm_page_prot));