summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorzijun_hu <zijun_hu@htc.com>2016-12-13 01:45:02 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2016-12-13 03:55:09 +0100
commit8f6066049c54ef0f726869c27d610cef5d15e084 (patch)
treef51e74b3a7158fdffa0c9f37f82fffe8fe3d537b /mm
parentkasan: turn on -fsanitize-address-use-after-scope (diff)
downloadlinux-8f6066049c54ef0f726869c27d610cef5d15e084.tar.xz
linux-8f6066049c54ef0f726869c27d610cef5d15e084.zip
mm/percpu.c: fix panic triggered by BUG_ON() falsely
As shown by pcpu_build_alloc_info(), the number of units within a percpu group is deduced by rounding up the number of CPUs within the group to @upa boundary/ Therefore, the number of CPUs isn't equal to the units's if it isn't aligned to @upa normally. However, pcpu_page_first_chunk() uses BUG_ON() to assert that one number is equal to the other roughly, so a panic is maybe triggered by the BUG_ON() incorrectly. In order to fix this issue, the number of CPUs is rounded up then compared with units's and the BUG_ON() is replaced with a warning and return of an error code as well, to keep system alive as much as possible. Link: http://lkml.kernel.org/r/57FCF07C.2020103@zoho.com Signed-off-by: zijun_hu <zijun_hu@htc.com> Cc: Tejun Heo <tj@kernel.org> Cc: Christoph Lameter <cl@linux.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/percpu.c16
1 files changed, 12 insertions, 4 deletions
diff --git a/mm/percpu.c b/mm/percpu.c
index 255714302394..f696385bcc44 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -2093,6 +2093,8 @@ int __init pcpu_page_first_chunk(size_t reserved_size,
size_t pages_size;
struct page **pages;
int unit, i, j, rc;
+ int upa;
+ int nr_g0_units;
snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);
@@ -2100,7 +2102,12 @@ int __init pcpu_page_first_chunk(size_t reserved_size,
if (IS_ERR(ai))
return PTR_ERR(ai);
BUG_ON(ai->nr_groups != 1);
- BUG_ON(ai->groups[0].nr_units != num_possible_cpus());
+ upa = ai->alloc_size/ai->unit_size;
+ nr_g0_units = roundup(num_possible_cpus(), upa);
+ if (unlikely(WARN_ON(ai->groups[0].nr_units != nr_g0_units))) {
+ pcpu_free_alloc_info(ai);
+ return -EINVAL;
+ }
unit_pages = ai->unit_size >> PAGE_SHIFT;
@@ -2111,21 +2118,22 @@ int __init pcpu_page_first_chunk(size_t reserved_size,
/* allocate pages */
j = 0;
- for (unit = 0; unit < num_possible_cpus(); unit++)
+ for (unit = 0; unit < num_possible_cpus(); unit++) {
+ unsigned int cpu = ai->groups[0].cpu_map[unit];
for (i = 0; i < unit_pages; i++) {
- unsigned int cpu = ai->groups[0].cpu_map[unit];
void *ptr;
ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE);
if (!ptr) {
pr_warn("failed to allocate %s page for cpu%u\n",
- psize_str, cpu);
+ psize_str, cpu);
goto enomem;
}
/* kmemleak tracks the percpu allocations separately */
kmemleak_free(ptr);
pages[j++] = virt_to_page(ptr);
}
+ }
/* allocate vm area, map the pages and copy static data */
vm.flags = VM_ALLOC;