summaryrefslogtreecommitdiffstats
path: root/mm/percpu-vm.c
diff options
context:
space:
mode:
authorNicholas Piggin <npiggin@gmail.com>2021-04-30 07:58:53 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2021-04-30 20:20:40 +0200
commitb67177ecd956333029dbc1a4971a857fee0ccbb1 (patch)
tree0cd48f934f59d2909303256c93f5dec1c87c1481 /mm/percpu-vm.c
parentmm/vmalloc: hugepage vmalloc mappings (diff)
downloadlinux-b67177ecd956333029dbc1a4971a857fee0ccbb1.tar.xz
linux-b67177ecd956333029dbc1a4971a857fee0ccbb1.zip
mm/vmalloc: remove map_kernel_range
Patch series "mm/vmalloc: cleanup after hugepage series", v2. Christoph pointed out some overdue cleanups required after the huge vmalloc series, and I had another failure error message improvement as well. This patch (of 5): This is a shim around vmap_pages_range, get rid of it. Move the main API comment from the _noflush variant to the normal variant, and make _noflush internal to mm/. Link: https://lkml.kernel.org/r/20210322021806.892164-1-npiggin@gmail.com Link: https://lkml.kernel.org/r/20210322021806.892164-2-npiggin@gmail.com Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Cc: Uladzislau Rezki <urezki@gmail.com> Cc: Cédric Le Goater <clg@kaod.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/percpu-vm.c')
-rw-r--r--mm/percpu-vm.c5
1 files changed, 3 insertions, 2 deletions
diff --git a/mm/percpu-vm.c b/mm/percpu-vm.c
index e46f7a6917f9..88a53eb68a94 100644
--- a/mm/percpu-vm.c
+++ b/mm/percpu-vm.c
@@ -8,6 +8,7 @@
* Chunks are mapped into vmalloc areas and populated page by page.
* This is the default chunk allocator.
*/
+#include "internal.h"
static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk,
unsigned int cpu, int page_idx)
@@ -192,8 +193,8 @@ static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk,
static int __pcpu_map_pages(unsigned long addr, struct page **pages,
int nr_pages)
{
- return map_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT,
- PAGE_KERNEL, pages);
+ return vmap_pages_range_noflush(addr, addr + (nr_pages << PAGE_SHIFT),
+ PAGE_KERNEL, pages, PAGE_SHIFT);
}
/**