summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2020-06-04 01:01:38 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2020-06-04 05:09:47 +0200
commit6caa6a0703e03236f46461342e31ca53d0e3c091 (patch)
tree79bb588570b7b433981e78dcdd0c8769076a6798 /mm
parentmm: shmem: remove rare optimization when swapin races with hole punching (diff)
downloadlinux-6caa6a0703e03236f46461342e31ca53d0e3c091.tar.xz
linux-6caa6a0703e03236f46461342e31ca53d0e3c091.zip
mm: memcontrol: move out cgroup swaprate throttling
The cgroup swaprate throttling is about matching new anon allocations to the rate of available IO when that is being throttled. It's the io controller hooking into the VM, rather than a memory controller thing. Rename mem_cgroup_throttle_swaprate() to cgroup_throttle_swaprate(), and drop the @memcg argument which is only used to check whether the preceding page charge has succeeded and the fault is proceeding. We could decouple the call from mem_cgroup_try_charge() here as well, but that would cause unnecessary churn: the following patches convert all callsites to a new charge API and we'll decouple as we go along. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Reviewed-by: Alex Shi <alex.shi@linux.alibaba.com> Reviewed-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Reviewed-by: Shakeel Butt <shakeelb@google.com> Cc: Hugh Dickins <hughd@google.com> Cc: "Kirill A. Shutemov" <kirill@shutemov.name> Cc: Michal Hocko <mhocko@suse.com> Cc: Roman Gushchin <guro@fb.com> Cc: Balbir Singh <bsingharora@gmail.com> Link: http://lkml.kernel.org/r/20200508183105.225460-5-hannes@cmpxchg.org Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memcontrol.c5
-rw-r--r--mm/swapfile.c14
2 files changed, 9 insertions, 10 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 340c580f8363..bc0f55d0cc08 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -6553,12 +6553,11 @@ out:
int mem_cgroup_try_charge_delay(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask, struct mem_cgroup **memcgp)
{
- struct mem_cgroup *memcg;
int ret;
ret = mem_cgroup_try_charge(page, mm, gfp_mask, memcgp);
- memcg = *memcgp;
- mem_cgroup_throttle_swaprate(memcg, page_to_nid(page), gfp_mask);
+ if (*memcgp)
+ cgroup_throttle_swaprate(page, gfp_mask);
return ret;
}
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 40a80617cb03..1829fc4b3ca2 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -3798,11 +3798,12 @@ static void free_swap_count_continuations(struct swap_info_struct *si)
}
#if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
-void mem_cgroup_throttle_swaprate(struct mem_cgroup *memcg, int node,
- gfp_t gfp_mask)
+void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask)
{
struct swap_info_struct *si, *next;
- if (!(gfp_mask & __GFP_IO) || !memcg)
+ int nid = page_to_nid(page);
+
+ if (!(gfp_mask & __GFP_IO))
return;
if (!blk_cgroup_congested())
@@ -3816,11 +3817,10 @@ void mem_cgroup_throttle_swaprate(struct mem_cgroup *memcg, int node,
return;
spin_lock(&swap_avail_lock);
- plist_for_each_entry_safe(si, next, &swap_avail_heads[node],
- avail_lists[node]) {
+ plist_for_each_entry_safe(si, next, &swap_avail_heads[nid],
+ avail_lists[nid]) {
if (si->bdev) {
- blkcg_schedule_throttle(bdev_get_queue(si->bdev),
- true);
+ blkcg_schedule_throttle(bdev_get_queue(si->bdev), true);
break;
}
}