diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2021-07-02 02:17:24 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-07-02 02:17:24 +0200 |
commit | e267992f9ef0bf717d70a9ee18049782f77e4b3a (patch) | |
tree | 6caf3664452672f41e8039f6af4279e2df709d66 /mm/percpu-vm.c | |
parent | Merge tag 'mips_5.14' of git://git.kernel.org/pub/scm/linux/kernel/git/mips/l... (diff) | |
parent | percpu: optimize locking in pcpu_balance_workfn() (diff) | |
download | linux-e267992f9ef0bf717d70a9ee18049782f77e4b3a.tar.xz linux-e267992f9ef0bf717d70a9ee18049782f77e4b3a.zip |
Merge branch 'for-5.14' of git://git.kernel.org/pub/scm/linux/kernel/git/dennis/percpu
Pull percpu updates from Dennis Zhou:
- percpu chunk depopulation - depopulate backing pages for chunks with
empty pages when we exceed a global threshold without those pages.
This lets us reclaim a portion of memory that would previously be
lost until the full chunk would be freed (possibly never).
- memcg accounting cleanup - previously separate chunks were managed
for normal allocations and __GFP_ACCOUNT allocations. These are now
consolidated which cleans up the code quite a bit.
- a few misc clean ups for clang warnings
* 'for-5.14' of git://git.kernel.org/pub/scm/linux/kernel/git/dennis/percpu:
percpu: optimize locking in pcpu_balance_workfn()
percpu: initialize best_upa variable
percpu: rework memcg accounting
mm, memcg: introduce mem_cgroup_kmem_disabled()
mm, memcg: mark cgroup_memory_nosocket, nokmem and noswap as __ro_after_init
percpu: make symbol 'pcpu_free_slot' static
percpu: implement partial chunk depopulation
percpu: use pcpu_free_slot instead of pcpu_nr_slots - 1
percpu: factor out pcpu_check_block_hint()
percpu: split __pcpu_balance_workfn()
percpu: fix a comment about the chunks ordering
Diffstat (limited to 'mm/percpu-vm.c')
-rw-r--r-- | mm/percpu-vm.c | 35 |
1 files changed, 32 insertions, 3 deletions
diff --git a/mm/percpu-vm.c b/mm/percpu-vm.c index 8d3844bc0c7c..ee5d89fcd66f 100644 --- a/mm/percpu-vm.c +++ b/mm/percpu-vm.c @@ -329,13 +329,12 @@ static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, pcpu_free_pages(chunk, pages, page_start, page_end); } -static struct pcpu_chunk *pcpu_create_chunk(enum pcpu_chunk_type type, - gfp_t gfp) +static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp) { struct pcpu_chunk *chunk; struct vm_struct **vms; - chunk = pcpu_alloc_chunk(type, gfp); + chunk = pcpu_alloc_chunk(gfp); if (!chunk) return NULL; @@ -378,3 +377,33 @@ static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai) /* no extra restriction */ return 0; } + +/** + * pcpu_should_reclaim_chunk - determine if a chunk should go into reclaim + * @chunk: chunk of interest + * + * This is the entry point for percpu reclaim. If a chunk qualifies, it is then + * isolated and managed in separate lists at the back of pcpu_slot: sidelined + * and to_depopulate respectively. The to_depopulate list holds chunks slated + * for depopulation. They no longer contribute to pcpu_nr_empty_pop_pages once + * they are on this list. Once depopulated, they are moved onto the sidelined + * list which enables them to be pulled back in for allocation if no other chunk + * can suffice the allocation. + */ +static bool pcpu_should_reclaim_chunk(struct pcpu_chunk *chunk) +{ + /* do not reclaim either the first chunk or reserved chunk */ + if (chunk == pcpu_first_chunk || chunk == pcpu_reserved_chunk) + return false; + + /* + * If it is isolated, it may be on the sidelined list so move it back to + * the to_depopulate list. If we hit at least 1/4 pages empty pages AND + * there is no system-wide shortage of empty pages aside from this + * chunk, move it to the to_depopulate list. + */ + return ((chunk->isolated && chunk->nr_empty_pop_pages) || + (pcpu_nr_empty_pop_pages > + (PCPU_EMPTY_POP_PAGES_HIGH + chunk->nr_empty_pop_pages) && + chunk->nr_empty_pop_pages >= chunk->nr_pages / 4)); +} |