diff options
Diffstat (limited to 'mm/sparse.c')
-rw-r--r-- | mm/sparse.c | 53 |
1 files changed, 16 insertions, 37 deletions
diff --git a/mm/sparse.c b/mm/sparse.c index 952f06d8f373..cb3bfae64036 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -427,7 +427,8 @@ static unsigned long __init section_map_size(void) } struct page __init *__populate_section_memmap(unsigned long pfn, - unsigned long nr_pages, int nid, struct vmem_altmap *altmap) + unsigned long nr_pages, int nid, struct vmem_altmap *altmap, + struct dev_pagemap *pgmap) { unsigned long size = section_map_size(); struct page *map = sparse_buffer_alloc(size); @@ -524,7 +525,7 @@ static void __init sparse_init_nid(int nid, unsigned long pnum_begin, break; map = __populate_section_memmap(pfn, PAGES_PER_SECTION, - nid, NULL); + nid, NULL, NULL); if (!map) { pr_err("%s: node[%d] memory map backing failed. Some memory will not be available.", __func__, nid); @@ -629,9 +630,10 @@ void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn) #ifdef CONFIG_SPARSEMEM_VMEMMAP static struct page * __meminit populate_section_memmap(unsigned long pfn, - unsigned long nr_pages, int nid, struct vmem_altmap *altmap) + unsigned long nr_pages, int nid, struct vmem_altmap *altmap, + struct dev_pagemap *pgmap) { - return __populate_section_memmap(pfn, nr_pages, nid, altmap); + return __populate_section_memmap(pfn, nr_pages, nid, altmap, pgmap); } static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages, @@ -700,7 +702,8 @@ static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages) } #else struct page * __meminit populate_section_memmap(unsigned long pfn, - unsigned long nr_pages, int nid, struct vmem_altmap *altmap) + unsigned long nr_pages, int nid, struct vmem_altmap *altmap, + struct dev_pagemap *pgmap) { return kvmalloc_node(array_size(sizeof(struct page), PAGES_PER_SECTION), GFP_KERNEL, nid); @@ -823,7 +826,8 @@ static void section_deactivate(unsigned long pfn, unsigned long nr_pages, } static struct page * __meminit section_activate(int nid, unsigned long pfn, - unsigned long nr_pages, struct vmem_altmap *altmap) + unsigned long nr_pages, struct vmem_altmap *altmap, + struct dev_pagemap *pgmap) { struct mem_section *ms = __pfn_to_section(pfn); struct mem_section_usage *usage = NULL; @@ -855,7 +859,7 @@ static struct page * __meminit section_activate(int nid, unsigned long pfn, if (nr_pages < PAGES_PER_SECTION && early_section(ms)) return pfn_to_page(pfn); - memmap = populate_section_memmap(pfn, nr_pages, nid, altmap); + memmap = populate_section_memmap(pfn, nr_pages, nid, altmap, pgmap); if (!memmap) { section_deactivate(pfn, nr_pages, altmap); return ERR_PTR(-ENOMEM); @@ -869,7 +873,8 @@ static struct page * __meminit section_activate(int nid, unsigned long pfn, * @nid: The node to add section on * @start_pfn: start pfn of the memory range * @nr_pages: number of pfns to add in the section - * @altmap: device page map + * @altmap: alternate pfns to allocate the memmap backing store + * @pgmap: alternate compound page geometry for devmap mappings * * This is only intended for hotplug. * @@ -883,7 +888,8 @@ static struct page * __meminit section_activate(int nid, unsigned long pfn, * * -ENOMEM - Out of memory. */ int __meminit sparse_add_section(int nid, unsigned long start_pfn, - unsigned long nr_pages, struct vmem_altmap *altmap) + unsigned long nr_pages, struct vmem_altmap *altmap, + struct dev_pagemap *pgmap) { unsigned long section_nr = pfn_to_section_nr(start_pfn); struct mem_section *ms; @@ -894,7 +900,7 @@ int __meminit sparse_add_section(int nid, unsigned long start_pfn, if (ret < 0) return ret; - memmap = section_activate(nid, start_pfn, nr_pages, altmap); + memmap = section_activate(nid, start_pfn, nr_pages, altmap, pgmap); if (IS_ERR(memmap)) return PTR_ERR(memmap); @@ -916,33 +922,6 @@ int __meminit sparse_add_section(int nid, unsigned long start_pfn, return 0; } -#ifdef CONFIG_MEMORY_FAILURE -static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages) -{ - int i; - - /* - * A further optimization is to have per section refcounted - * num_poisoned_pages. But that would need more space per memmap, so - * for now just do a quick global check to speed up this routine in the - * absence of bad pages. - */ - if (atomic_long_read(&num_poisoned_pages) == 0) - return; - - for (i = 0; i < nr_pages; i++) { - if (PageHWPoison(&memmap[i])) { - num_poisoned_pages_dec(); - ClearPageHWPoison(&memmap[i]); - } - } -} -#else -static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages) -{ -} -#endif - void sparse_remove_section(struct mem_section *ms, unsigned long pfn, unsigned long nr_pages, unsigned long map_offset, struct vmem_altmap *altmap) |