summaryrefslogtreecommitdiffstats
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
authorDavidlohr Bueso <dave@stgolabs.net>2017-09-09 01:15:21 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2017-09-09 03:26:49 +0200
commitfa90b2fd300f38cc7b3e416974116c83f3953465 (patch)
treed21f304639c4de0e67a88086fd0f988e41567250 /mm/memcontrol.c
parentfs/epoll: use faster rb_first_cached() (diff)
downloadlinux-fa90b2fd300f38cc7b3e416974116c83f3953465.tar.xz
linux-fa90b2fd300f38cc7b3e416974116c83f3953465.zip
mem/memcg: cache rightmost node
Such that we can optimize __mem_cgroup_largest_soft_limit_node(). The only overhead is the extra footprint for the cached pointer, but this should not be an issue for mem_cgroup_tree_per_node. [dave@stgolabs.net: brain fart #2] Link: http://lkml.kernel.org/r/20170731160114.GE21328@linux-80c1.suse Link: http://lkml.kernel.org/r/20170719014603.19029-17-dave@stgolabs.net Signed-off-by: Davidlohr Bueso <dbueso@suse.de> Cc: Michal Hocko <mhocko@kernel.org> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Vladimir Davydov <vdavydov.dev@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to '')
-rw-r--r--mm/memcontrol.c23
1 files changed, 18 insertions, 5 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index ca83f3854e4f..15af3da5af02 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -119,6 +119,7 @@ static const char *const mem_cgroup_lru_names[] = {
struct mem_cgroup_tree_per_node {
struct rb_root rb_root;
+ struct rb_node *rb_rightmost;
spinlock_t lock;
};
@@ -386,6 +387,7 @@ static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz,
struct rb_node **p = &mctz->rb_root.rb_node;
struct rb_node *parent = NULL;
struct mem_cgroup_per_node *mz_node;
+ bool rightmost = true;
if (mz->on_tree)
return;
@@ -397,8 +399,11 @@ static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz,
parent = *p;
mz_node = rb_entry(parent, struct mem_cgroup_per_node,
tree_node);
- if (mz->usage_in_excess < mz_node->usage_in_excess)
+ if (mz->usage_in_excess < mz_node->usage_in_excess) {
p = &(*p)->rb_left;
+ rightmost = false;
+ }
+
/*
* We can't avoid mem cgroups that are over their soft
* limit by the same amount
@@ -406,6 +411,10 @@ static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz,
else if (mz->usage_in_excess >= mz_node->usage_in_excess)
p = &(*p)->rb_right;
}
+
+ if (rightmost)
+ mctz->rb_rightmost = &mz->tree_node;
+
rb_link_node(&mz->tree_node, parent, p);
rb_insert_color(&mz->tree_node, &mctz->rb_root);
mz->on_tree = true;
@@ -416,6 +425,10 @@ static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
{
if (!mz->on_tree)
return;
+
+ if (&mz->tree_node == mctz->rb_rightmost)
+ mctz->rb_rightmost = rb_prev(&mz->tree_node);
+
rb_erase(&mz->tree_node, &mctz->rb_root);
mz->on_tree = false;
}
@@ -496,16 +509,15 @@ static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
static struct mem_cgroup_per_node *
__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
{
- struct rb_node *rightmost = NULL;
struct mem_cgroup_per_node *mz;
retry:
mz = NULL;
- rightmost = rb_last(&mctz->rb_root);
- if (!rightmost)
+ if (!mctz->rb_rightmost)
goto done; /* Nothing to reclaim from */
- mz = rb_entry(rightmost, struct mem_cgroup_per_node, tree_node);
+ mz = rb_entry(mctz->rb_rightmost,
+ struct mem_cgroup_per_node, tree_node);
/*
* Remove the node now but someone else can add it back,
* we will to add it back at the end of reclaim to its correct
@@ -5945,6 +5957,7 @@ static int __init mem_cgroup_init(void)
node_online(node) ? node : NUMA_NO_NODE);
rtpn->rb_root = RB_ROOT;
+ rtpn->rb_rightmost = NULL;
spin_lock_init(&rtpn->lock);
soft_limit_tree.rb_tree_per_node[node] = rtpn;
}