diff options
author | David S. Miller <davem@davemloft.net> | 2010-01-23 07:45:46 +0100 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-01-23 07:45:46 +0100 |
commit | 6be325719b3e54624397e413efd4b33a997e55a3 (patch) | |
tree | 57f321a56794cab2222e179b16731e0d76a4a68a /arch/sh/mm | |
parent | be2net: fix bug in rx page posting (diff) | |
parent | Linux 2.6.33-rc5 (diff) | |
download | linux-6be325719b3e54624397e413efd4b33a997e55a3.tar.xz linux-6be325719b3e54624397e413efd4b33a997e55a3.zip |
Merge branch 'master' of /home/davem/src/GIT/linux-2.6/
Diffstat (limited to 'arch/sh/mm')
-rw-r--r-- | arch/sh/mm/Kconfig | 7 | ||||
-rw-r--r-- | arch/sh/mm/cache-sh4.c | 3 | ||||
-rw-r--r-- | arch/sh/mm/cache.c | 8 | ||||
-rw-r--r-- | arch/sh/mm/ioremap_32.c | 10 | ||||
-rw-r--r-- | arch/sh/mm/ioremap_64.c | 6 | ||||
-rw-r--r-- | arch/sh/mm/numa.c | 15 |
6 files changed, 26 insertions, 23 deletions
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig index 0e7ba8e891cf..986a71b88ca3 100644 --- a/arch/sh/mm/Kconfig +++ b/arch/sh/mm/Kconfig @@ -82,8 +82,7 @@ config 32BIT config PMB_ENABLE bool "Support 32-bit physical addressing through PMB" - depends on MMU && EXPERIMENTAL && CPU_SH4A - default y + depends on MMU && EXPERIMENTAL && CPU_SH4A && !CPU_SH4AL_DSP help If you say Y here, physical addressing will be extended to 32-bits through the SH-4A PMB. If this is not set, legacy @@ -96,7 +95,7 @@ choice config PMB bool "PMB" - depends on MMU && EXPERIMENTAL && CPU_SH4A + depends on MMU && EXPERIMENTAL && CPU_SH4A && !CPU_SH4AL_DSP help If you say Y here, physical addressing will be extended to 32-bits through the SH-4A PMB. If this is not set, legacy @@ -104,7 +103,7 @@ config PMB config PMB_FIXED bool "fixed PMB" - depends on MMU && EXPERIMENTAL && CPU_SH4A + depends on MMU && EXPERIMENTAL && CPU_SH4A && !CPU_SH4AL_DSP select 32BIT help If this option is enabled, fixed PMB mappings are inherited diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c index f36a08bf3d5c..560ddb6bc8a7 100644 --- a/arch/sh/mm/cache-sh4.c +++ b/arch/sh/mm/cache-sh4.c @@ -256,8 +256,7 @@ static void sh4_flush_cache_page(void *args) address = (unsigned long)vaddr; } - if (pages_do_alias(address, phys)) - flush_cache_one(CACHE_OC_ADDRESS_ARRAY | + flush_cache_one(CACHE_OC_ADDRESS_ARRAY | (address & shm_align_mask), phys); if (vma->vm_flags & VM_EXEC) diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c index e9415d3ea94a..b8607fa7ae12 100644 --- a/arch/sh/mm/cache.c +++ b/arch/sh/mm/cache.c @@ -133,12 +133,8 @@ void __update_cache(struct vm_area_struct *vma, page = pfn_to_page(pfn); if (pfn_valid(pfn)) { int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags); - if (dirty) { - unsigned long addr = (unsigned long)page_address(page); - - if (pages_do_alias(addr, address & PAGE_MASK)) - __flush_purge_region((void *)addr, PAGE_SIZE); - } + if (dirty) + __flush_purge_region(page_address(page), PAGE_SIZE); } } diff --git a/arch/sh/mm/ioremap_32.c b/arch/sh/mm/ioremap_32.c index a86eaa9d75a5..2141befb4f91 100644 --- a/arch/sh/mm/ioremap_32.c +++ b/arch/sh/mm/ioremap_32.c @@ -33,10 +33,10 @@ * have to convert them into an offset in a page-aligned mapping, but the * caller shouldn't need to know that small detail. */ -void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, - unsigned long flags) +void __iomem *__ioremap_caller(unsigned long phys_addr, unsigned long size, + unsigned long flags, void *caller) { - struct vm_struct * area; + struct vm_struct *area; unsigned long offset, last_addr, addr, orig_addr; pgprot_t pgprot; @@ -67,7 +67,7 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, /* * Ok, go for it.. */ - area = get_vm_area(size, VM_IOREMAP); + area = get_vm_area_caller(size, VM_IOREMAP, caller); if (!area) return NULL; area->phys_addr = phys_addr; @@ -103,7 +103,7 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, return (void __iomem *)(offset + (char *)orig_addr); } -EXPORT_SYMBOL(__ioremap); +EXPORT_SYMBOL(__ioremap_caller); void __iounmap(void __iomem *addr) { diff --git a/arch/sh/mm/ioremap_64.c b/arch/sh/mm/ioremap_64.c index b16843d02b76..ef434657d428 100644 --- a/arch/sh/mm/ioremap_64.c +++ b/arch/sh/mm/ioremap_64.c @@ -258,15 +258,15 @@ static void shmedia_unmapioaddr(unsigned long vaddr) pte_clear(&init_mm, vaddr, ptep); } -void __iomem *__ioremap(unsigned long offset, unsigned long size, - unsigned long flags) +void __iomem *__ioremap_caller(unsigned long offset, unsigned long size, + unsigned long flags, void *caller) { char name[14]; sprintf(name, "phys_%08x", (u32)offset); return shmedia_alloc_io(offset, size, name, flags); } -EXPORT_SYMBOL(__ioremap); +EXPORT_SYMBOL(__ioremap_caller); void __iounmap(void __iomem *virtual) { diff --git a/arch/sh/mm/numa.c b/arch/sh/mm/numa.c index 6c524446c0f6..422e92721878 100644 --- a/arch/sh/mm/numa.c +++ b/arch/sh/mm/numa.c @@ -28,7 +28,7 @@ void __init setup_memory(void) { unsigned long free_pfn = PFN_UP(__pa(_end)); u64 base = min_low_pfn << PAGE_SHIFT; - u64 size = (max_low_pfn << PAGE_SHIFT) - min_low_pfn; + u64 size = (max_low_pfn << PAGE_SHIFT) - base; lmb_add(base, size); @@ -38,6 +38,15 @@ void __init setup_memory(void) (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET)); /* + * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET. + */ + if (CONFIG_ZERO_PAGE_OFFSET != 0) + lmb_reserve(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET); + + lmb_analyze(); + lmb_dump_all(); + + /* * Node 0 sets up its pgdat at the first available pfn, * and bumps it up before setting up the bootmem allocator. */ @@ -71,7 +80,7 @@ void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end) /* Node-local pgdat */ NODE_DATA(nid) = __va(lmb_alloc_base(sizeof(struct pglist_data), - SMP_CACHE_BYTES, end_pfn)); + SMP_CACHE_BYTES, end)); memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); NODE_DATA(nid)->bdata = &bootmem_node_data[nid]; @@ -81,7 +90,7 @@ void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end) /* Node-local bootmap */ bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn); bootmem_paddr = lmb_alloc_base(bootmap_pages << PAGE_SHIFT, - PAGE_SIZE, end_pfn); + PAGE_SIZE, end); init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT, start_pfn, end_pfn); |