summaryrefslogtreecommitdiffstats
path: root/mm/percpu.c
diff options
context:
space:
mode:
authorDennis Zhou (Facebook) <dennisszhou@gmail.com>2017-07-25 01:02:07 +0200
committerTejun Heo <tj@kernel.org>2017-07-26 16:23:53 +0200
commit8ab16c43ea79098f4126432c6b199a5d6ba24b6d (patch)
tree60ae1a3d886790e05663a394e926a3f5fcd12d9b /mm/percpu.c
parentpercpu: combine percpu address checks (diff)
downloadlinux-8ab16c43ea79098f4126432c6b199a5d6ba24b6d.tar.xz
linux-8ab16c43ea79098f4126432c6b199a5d6ba24b6d.zip
percpu: change the number of pages marked in the first_chunk pop bitmap
The populated bitmap represents the state of the pages the chunk serves. Prior, the bitmap was marked completely used as the first chunk was allocated and immutable. This is misleading because the first chunk may not be completely filled. Additionally, with moving the base_addr up in the previous patch, the population check no longer corresponds to what was being checked. This patch modifies the population map to be only the number of pages the region serves and to make what it was checking correspond correctly again. The change is to remove any misunderstanding between the size of the populated bitmap and the actual size of it. The work function page iterators now use nr_pages for the check rather than pcpu_unit_pages because nr_populated is now chunk specific. Without this, the work function would try to populate the remainder of these chunks despite it not serving any more than nr_pages when nr_pages is set less than pcpu_unit_pages. Signed-off-by: Dennis Zhou <dennisszhou@gmail.com> Reviewed-by: Josef Bacik <jbacik@fb.com> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'mm/percpu.c')
-rw-r--r--mm/percpu.c16
1 files changed, 9 insertions, 7 deletions
diff --git a/mm/percpu.c b/mm/percpu.c
index 5b1fcefdc386..773dafea181e 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -737,7 +737,9 @@ static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr,
region_size = PFN_ALIGN(start_offset + map_size);
/* allocate chunk */
- chunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
+ chunk = memblock_virt_alloc(sizeof(struct pcpu_chunk) +
+ BITS_TO_LONGS(region_size >> PAGE_SHIFT),
+ 0);
INIT_LIST_HEAD(&chunk->list);
INIT_LIST_HEAD(&chunk->map_extend_list);
@@ -746,15 +748,15 @@ static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr,
chunk->start_offset = start_offset;
chunk->end_offset = region_size - chunk->start_offset - map_size;
- chunk->nr_pages = pcpu_unit_pages;
+ chunk->nr_pages = region_size >> PAGE_SHIFT;
chunk->map = map;
chunk->map_alloc = init_map_size;
/* manage populated page bitmap */
chunk->immutable = true;
- bitmap_fill(chunk->populated, pcpu_unit_pages);
- chunk->nr_populated = pcpu_unit_pages;
+ bitmap_fill(chunk->populated, chunk->nr_pages);
+ chunk->nr_populated = chunk->nr_pages;
chunk->contig_hint = chunk->free_size = map_size;
@@ -1212,7 +1214,7 @@ static void pcpu_balance_workfn(struct work_struct *work)
list_for_each_entry_safe(chunk, next, &to_free, list) {
int rs, re;
- pcpu_for_each_pop_region(chunk, rs, re, 0, pcpu_unit_pages) {
+ pcpu_for_each_pop_region(chunk, rs, re, 0, chunk->nr_pages) {
pcpu_depopulate_chunk(chunk, rs, re);
spin_lock_irq(&pcpu_lock);
pcpu_chunk_depopulated(chunk, rs, re);
@@ -1269,7 +1271,7 @@ retry_pop:
spin_lock_irq(&pcpu_lock);
list_for_each_entry(chunk, &pcpu_slot[slot], list) {
- nr_unpop = pcpu_unit_pages - chunk->nr_populated;
+ nr_unpop = chunk->nr_pages - chunk->nr_populated;
if (nr_unpop)
break;
}
@@ -1279,7 +1281,7 @@ retry_pop:
continue;
/* @chunk can't go away while pcpu_alloc_mutex is held */
- pcpu_for_each_unpop_region(chunk, rs, re, 0, pcpu_unit_pages) {
+ pcpu_for_each_unpop_region(chunk, rs, re, 0, chunk->nr_pages) {
int nr = min(re - rs, nr_to_pop);
ret = pcpu_populate_chunk(chunk, rs, rs + nr);