diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-12-12 03:57:19 +0100 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-12-12 03:57:19 +0100 |
commit | 2756d373a3f45a3a9ebf4ac389f9e0e02bd35a93 (patch) | |
tree | e248c5adccb3045f96b3cfe0a1ffeb37bb81e4cb /mm | |
parent | Merge branch 'for-3.19' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/l... (diff) | |
parent | cgroup: implement cgroup_get_e_css() (diff) | |
download | linux-2756d373a3f45a3a9ebf4ac389f9e0e02bd35a93.tar.xz linux-2756d373a3f45a3a9ebf4ac389f9e0e02bd35a93.zip |
Merge branch 'for-3.19' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup
Pull cgroup update from Tejun Heo:
"cpuset got simplified a bit. cgroup core got a fix on unified
hierarchy and grew some effective css related interfaces which will be
used for blkio support for writeback IO traffic which is currently
being worked on"
* 'for-3.19' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup:
cgroup: implement cgroup_get_e_css()
cgroup: add cgroup_subsys->css_e_css_changed()
cgroup: add cgroup_subsys->css_released()
cgroup: fix the async css offline wait logic in cgroup_subtree_control_write()
cgroup: restructure child_subsys_mask handling in cgroup_subtree_control_write()
cgroup: separate out cgroup_calc_child_subsys_mask() from cgroup_refresh_child_subsys_mask()
cpuset: lock vs unlock typo
cpuset: simplify cpuset_node_allowed API
cpuset: convert callback_mutex to a spinlock
Diffstat (limited to 'mm')
-rw-r--r-- | mm/hugetlb.c | 2 | ||||
-rw-r--r-- | mm/oom_kill.c | 2 | ||||
-rw-r--r-- | mm/page_alloc.c | 6 | ||||
-rw-r--r-- | mm/slab.c | 2 | ||||
-rw-r--r-- | mm/slub.c | 3 | ||||
-rw-r--r-- | mm/vmscan.c | 5 |
6 files changed, 11 insertions, 9 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 30cd96879152..919b86a2164d 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -582,7 +582,7 @@ retry_cpuset: for_each_zone_zonelist_nodemask(zone, z, zonelist, MAX_NR_ZONES - 1, nodemask) { - if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask(h))) { + if (cpuset_zone_allowed(zone, htlb_alloc_mask(h))) { page = dequeue_huge_page_node(h, zone_to_nid(zone)); if (page) { if (avoid_reserve) diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 3b014d326151..864bba992735 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -233,7 +233,7 @@ static enum oom_constraint constrained_alloc(struct zonelist *zonelist, /* Check this allocation failure is caused by cpuset's wall function */ for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx, nodemask) - if (!cpuset_zone_allowed_softwall(zone, gfp_mask)) + if (!cpuset_zone_allowed(zone, gfp_mask)) cpuset_limited = true; if (cpuset_limited) { diff --git a/mm/page_alloc.c b/mm/page_alloc.c index a7198c065999..df542feaac3b 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1990,7 +1990,7 @@ zonelist_scan: /* * Scan zonelist, looking for a zone with enough free. - * See also __cpuset_node_allowed_softwall() comment in kernel/cpuset.c. + * See also __cpuset_node_allowed() comment in kernel/cpuset.c. */ for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx, nodemask) { @@ -2001,7 +2001,7 @@ zonelist_scan: continue; if (cpusets_enabled() && (alloc_flags & ALLOC_CPUSET) && - !cpuset_zone_allowed_softwall(zone, gfp_mask)) + !cpuset_zone_allowed(zone, gfp_mask)) continue; /* * Distribute pages in proportion to the individual @@ -2529,7 +2529,7 @@ gfp_to_alloc_flags(gfp_t gfp_mask) alloc_flags |= ALLOC_HARDER; /* * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the - * comment for __cpuset_node_allowed_softwall(). + * comment for __cpuset_node_allowed(). */ alloc_flags &= ~ALLOC_CPUSET; } else if (unlikely(rt_task(current)) && !in_interrupt()) diff --git a/mm/slab.c b/mm/slab.c index 79e15f0a2a6e..fee275b5b6b7 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3015,7 +3015,7 @@ retry: for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { nid = zone_to_nid(zone); - if (cpuset_zone_allowed_hardwall(zone, flags) && + if (cpuset_zone_allowed(zone, flags | __GFP_HARDWALL) && get_node(cache, nid) && get_node(cache, nid)->free_objects) { obj = ____cache_alloc_node(cache, diff --git a/mm/slub.c b/mm/slub.c index 386bbed76e94..765c5884d03d 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1665,7 +1665,8 @@ static void *get_any_partial(struct kmem_cache *s, gfp_t flags, n = get_node(s, zone_to_nid(zone)); - if (n && cpuset_zone_allowed_hardwall(zone, flags) && + if (n && cpuset_zone_allowed(zone, + flags | __GFP_HARDWALL) && n->nr_partial > s->min_partial) { object = get_partial_node(s, n, c, flags); if (object) { diff --git a/mm/vmscan.c b/mm/vmscan.c index 4636d9e822c1..a384339bf718 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2405,7 +2405,8 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc) * to global LRU. */ if (global_reclaim(sc)) { - if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) + if (!cpuset_zone_allowed(zone, + GFP_KERNEL | __GFP_HARDWALL)) continue; lru_pages += zone_reclaimable_pages(zone); @@ -3388,7 +3389,7 @@ void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx) if (!populated_zone(zone)) return; - if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) + if (!cpuset_zone_allowed(zone, GFP_KERNEL | __GFP_HARDWALL)) return; pgdat = zone->zone_pgdat; if (pgdat->kswapd_max_order < order) { |