summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorDennis Zhou (Facebook) <dennisszhou@gmail.com>2017-07-25 01:02:04 +0200
committerTejun Heo <tj@kernel.org>2017-07-26 16:23:52 +0200
commit0c4169c3d11722a26773bdc0144c97fadd47d905 (patch)
tree112f559a8d294f51ef59eaebbcb06bbcd267b870 /mm
parentpercpu: end chunk area maps page aligned for the populated bitmap (diff)
downloadlinux-0c4169c3d11722a26773bdc0144c97fadd47d905.tar.xz
linux-0c4169c3d11722a26773bdc0144c97fadd47d905.zip
percpu: setup_first_chunk rename schunk/dchunk to chunk
There is no need to have the static chunk and dynamic chunk be named separately as the allocations are sequential. This preemptively solves the misnomer problem with the base_addrs being moved up in the following patch. It also removes a ternary operation deciding the first chunk. Signed-off-by: Dennis Zhou <dennisszhou@gmail.com> Reviewed-by: Josef Bacik <jbacik@fb.com> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/percpu.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/mm/percpu.c b/mm/percpu.c
index 1d2c980fde3f..e08ed61ea70a 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1602,7 +1602,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
static int smap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata;
static int dmap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata;
size_t size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
- struct pcpu_chunk *schunk, *dchunk = NULL;
+ struct pcpu_chunk *chunk;
unsigned long *group_offsets;
size_t *group_sizes;
unsigned long *unit_off;
@@ -1720,22 +1720,22 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
*/
start_offset = ai->static_size;
map_size = ai->reserved_size ?: ai->dyn_size;
- schunk = pcpu_alloc_first_chunk(base_addr, start_offset, map_size,
- smap, ARRAY_SIZE(smap));
+ chunk = pcpu_alloc_first_chunk(base_addr, start_offset, map_size, smap,
+ ARRAY_SIZE(smap));
/* init dynamic chunk if necessary */
if (ai->reserved_size) {
- pcpu_reserved_chunk = schunk;
+ pcpu_reserved_chunk = chunk;
start_offset = ai->static_size + ai->reserved_size;
map_size = ai->dyn_size;
- dchunk = pcpu_alloc_first_chunk(base_addr, start_offset,
- map_size, dmap,
- ARRAY_SIZE(dmap));
+ chunk = pcpu_alloc_first_chunk(base_addr, start_offset,
+ map_size, dmap,
+ ARRAY_SIZE(dmap));
}
/* link the first chunk in */
- pcpu_first_chunk = dchunk ?: schunk;
+ pcpu_first_chunk = chunk;
i = (pcpu_first_chunk->start_offset) ? 1 : 0;
pcpu_nr_empty_pop_pages +=
pcpu_count_occupied_pages(pcpu_first_chunk, i);