diff options
author | Heiko Carstens <heiko.carstens@de.ibm.com> | 2008-04-30 13:38:47 +0200 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2008-04-30 13:38:48 +0200 |
commit | 17f345808563d2f425b2b15d60c4a5b00112e9eb (patch) | |
tree | e12fe48f44c5d4d50cf1e92e679bc1badea0623a /arch | |
parent | [S390] System z large page support. (diff) | |
download | linux-17f345808563d2f425b2b15d60c4a5b00112e9eb.tar.xz linux-17f345808563d2f425b2b15d60c4a5b00112e9eb.zip |
[S390] Convert to SPARSEMEM & SPARSEMEM_VMEMMAP
Convert s390 to SPARSEMEM and SPARSEMEM_VMEMMAP. We do a select
of SPARSEMEM_VMEMMAP since it is configurable. This is because
SPARSEMEM without SPARSEMEM_VMEMMAP gives us a hell of broken
include dependencies that I don't want to fix.
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/s390/Kconfig | 8 | ||||
-rw-r--r-- | arch/s390/mm/extmem.c | 8 | ||||
-rw-r--r-- | arch/s390/mm/init.c | 2 | ||||
-rw-r--r-- | arch/s390/mm/vmem.c | 81 |
4 files changed, 20 insertions, 79 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 8f5f02160ffc..29a7940f284f 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -300,6 +300,14 @@ comment "Kernel preemption" source "kernel/Kconfig.preempt" +config ARCH_SPARSEMEM_ENABLE + def_bool y + select SPARSEMEM_VMEMMAP_ENABLE + select SPARSEMEM_VMEMMAP + +config ARCH_SPARSEMEM_DEFAULT + def_bool y + source "mm/Kconfig" comment "I/O subsystem configuration" diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c index ed2af0a3303b..f231f5ec74b6 100644 --- a/arch/s390/mm/extmem.c +++ b/arch/s390/mm/extmem.c @@ -287,7 +287,7 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long if (rc < 0) goto out_free; - rc = add_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1); + rc = vmem_add_mapping(seg->start_addr, seg->end - seg->start_addr + 1); if (rc) goto out_free; @@ -351,7 +351,7 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long release_resource(seg->res); kfree(seg->res); out_shared: - remove_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1); + vmem_remove_mapping(seg->start_addr, seg->end - seg->start_addr + 1); out_free: kfree(seg); out: @@ -474,7 +474,7 @@ segment_modify_shared (char *name, int do_nonshared) rc = 0; goto out_unlock; out_del: - remove_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1); + vmem_remove_mapping(seg->start_addr, seg->end - seg->start_addr + 1); list_del(&seg->list); dcss_diag(DCSS_PURGESEG, seg->dcss_name, &dummy, &dummy); kfree(seg); @@ -508,7 +508,7 @@ segment_unload(char *name) goto out_unlock; release_resource(seg->res); kfree(seg->res); - remove_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1); + vmem_remove_mapping(seg->start_addr, seg->end - seg->start_addr + 1); list_del(&seg->list); dcss_diag(DCSS_PURGESEG, seg->dcss_name, &dummy, &dummy); kfree(seg); diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index acc92f46a096..fa31de6ae97a 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -106,6 +106,8 @@ void __init paging_init(void) __ctl_load(S390_lowcore.kernel_asce, 13, 13); __raw_local_irq_ssm(ssm_mask); + sparse_memory_present_with_active_regions(MAX_NUMNODES); + sparse_init(); memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); #ifdef CONFIG_ZONE_DMA max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS); diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index 97bce6c97574..beccacf907f3 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c @@ -27,43 +27,6 @@ struct memory_segment { static LIST_HEAD(mem_segs); -void __meminit memmap_init(unsigned long size, int nid, unsigned long zone, - unsigned long start_pfn) -{ - struct page *start, *end; - struct page *map_start, *map_end; - int i; - - start = pfn_to_page(start_pfn); - end = start + size; - - for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { - unsigned long cstart, cend; - - cstart = PFN_DOWN(memory_chunk[i].addr); - cend = cstart + PFN_DOWN(memory_chunk[i].size); - - map_start = mem_map + cstart; - map_end = mem_map + cend; - - if (map_start < start) - map_start = start; - if (map_end > end) - map_end = end; - - map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1)) - / sizeof(struct page); - map_end += ((PFN_ALIGN((unsigned long) map_end) - - (unsigned long) map_end) - / sizeof(struct page)); - - if (map_start < map_end) - memmap_init_zone((unsigned long)(map_end - map_start), - nid, zone, page_to_pfn(map_start), - MEMMAP_EARLY); - } -} - static void __ref *vmem_alloc_pages(unsigned int order) { if (slab_is_available()) @@ -115,7 +78,7 @@ static pte_t __init_refok *vmem_pte_alloc(void) /* * Add a physical memory range to the 1:1 mapping. */ -static int vmem_add_range(unsigned long start, unsigned long size, int ro) +static int vmem_add_mem(unsigned long start, unsigned long size, int ro) { unsigned long address; pgd_t *pg_dir; @@ -209,10 +172,9 @@ static void vmem_remove_range(unsigned long start, unsigned long size) /* * Add a backed mem_map array to the virtual mem_map array. */ -static int vmem_add_mem_map(unsigned long start, unsigned long size) +int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) { unsigned long address, start_addr, end_addr; - struct page *map_start, *map_end; pgd_t *pg_dir; pud_t *pu_dir; pmd_t *pm_dir; @@ -220,11 +182,8 @@ static int vmem_add_mem_map(unsigned long start, unsigned long size) pte_t pte; int ret = -ENOMEM; - map_start = VMEM_MAP + PFN_DOWN(start); - map_end = VMEM_MAP + PFN_DOWN(start + size); - - start_addr = (unsigned long) map_start & PAGE_MASK; - end_addr = PFN_ALIGN((unsigned long) map_end); + start_addr = (unsigned long) start; + end_addr = (unsigned long) (start + nr); for (address = start_addr; address < end_addr; address += PAGE_SIZE) { pg_dir = pgd_offset_k(address); @@ -268,16 +227,6 @@ out: return ret; } -static int vmem_add_mem(unsigned long start, unsigned long size, int ro) -{ - int ret; - - ret = vmem_add_mem_map(start, size); - if (ret) - return ret; - return vmem_add_range(start, size, ro); -} - /* * Add memory segment to the segment list if it doesn't overlap with * an already present segment. @@ -315,7 +264,7 @@ static void __remove_shared_memory(struct memory_segment *seg) vmem_remove_range(seg->start, seg->size); } -int remove_shared_memory(unsigned long start, unsigned long size) +int vmem_remove_mapping(unsigned long start, unsigned long size) { struct memory_segment *seg; int ret; @@ -339,11 +288,9 @@ out: return ret; } -int add_shared_memory(unsigned long start, unsigned long size) +int vmem_add_mapping(unsigned long start, unsigned long size) { struct memory_segment *seg; - struct page *page; - unsigned long pfn, num_pfn, end_pfn; int ret; mutex_lock(&vmem_mutex); @@ -361,21 +308,6 @@ int add_shared_memory(unsigned long start, unsigned long size) ret = vmem_add_mem(start, size, 0); if (ret) goto out_remove; - - pfn = PFN_DOWN(start); - num_pfn = PFN_DOWN(size); - end_pfn = pfn + num_pfn; - - page = pfn_to_page(pfn); - memset(page, 0, num_pfn * sizeof(struct page)); - - for (; pfn < end_pfn; pfn++) { - page = pfn_to_page(pfn); - init_page_count(page); - reset_page_mapcount(page); - SetPageReserved(page); - INIT_LIST_HEAD(&page->lru); - } goto out; out_remove: @@ -401,7 +333,6 @@ void __init vmem_map_init(void) INIT_LIST_HEAD(&init_mm.context.crst_list); INIT_LIST_HEAD(&init_mm.context.pgtable_list); init_mm.context.noexec = 0; - NODE_DATA(0)->node_mem_map = VMEM_MAP; ro_start = ((unsigned long)&_stext) & PAGE_MASK; ro_end = PFN_ALIGN((unsigned long)&_eshared); for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { |