diff options
author | Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | 2012-04-18 21:52:50 +0200 |
---|---|---|
committer | Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | 2012-04-18 21:52:50 +0200 |
commit | 681e4a5e13c1c8315694eb4f44e0cdd84c9082d2 (patch) | |
tree | 699f14527c118859026e8ce0214e689d0b9c88cb /arch/mips/mm | |
parent | xen/p2m: m2p_find_override: use list_for_each_entry_safe (diff) | |
parent | Merge branch 'for-3.4/drivers' of git://git.kernel.dk/linux-block (diff) | |
download | linux-681e4a5e13c1c8315694eb4f44e0cdd84c9082d2.tar.xz linux-681e4a5e13c1c8315694eb4f44e0cdd84c9082d2.zip |
Merge commit 'c104f1fa1ecf4ee0fc06e31b1f77630b2551be81' into stable/for-linus-3.4
* commit 'c104f1fa1ecf4ee0fc06e31b1f77630b2551be81': (14566 commits)
cpufreq: OMAP: fix build errors: depends on ARCH_OMAP2PLUS
sparc64: Eliminate obsolete __handle_softirq() function
sparc64: Fix bootup crash on sun4v.
kconfig: delete last traces of __enabled_ from autoconf.h
Revert "kconfig: fix __enabled_ macros definition for invisible and un-selected symbols"
kconfig: fix IS_ENABLED to not require all options to be defined
irq_domain: fix type mismatch in debugfs output format
staging: android: fix mem leaks in __persistent_ram_init()
staging: vt6656: Don't leak memory in drivers/staging/vt6656/ioctl.c::private_ioctl()
staging: iio: hmc5843: Fix crash in probe function.
panic: fix stack dump print on direct call to panic()
drivers/rtc/rtc-pl031.c: enable clock on all ST variants
Revert "mm: vmscan: fix misused nr_reclaimed in shrink_mem_cgroup_zone()"
hugetlb: fix race condition in hugetlb_fault()
drivers/rtc/rtc-twl.c: use static register while reading time
drivers/rtc/rtc-s3c.c: add placeholder for driver private data
drivers/rtc/rtc-s3c.c: fix compilation error
MAINTAINERS: add PCDP console maintainer
memcg: do not open code accesses to res_counter members
drivers/rtc/rtc-efi.c: fix section mismatch warning
...
Diffstat (limited to 'arch/mips/mm')
-rw-r--r-- | arch/mips/mm/Makefile | 5 | ||||
-rw-r--r-- | arch/mips/mm/c-octeon.c | 9 | ||||
-rw-r--r-- | arch/mips/mm/c-r3k.c | 1 | ||||
-rw-r--r-- | arch/mips/mm/c-r4k.c | 8 | ||||
-rw-r--r-- | arch/mips/mm/c-tx39.c | 1 | ||||
-rw-r--r-- | arch/mips/mm/dma-default.c | 8 | ||||
-rw-r--r-- | arch/mips/mm/fault.c | 37 | ||||
-rw-r--r-- | arch/mips/mm/gup.c | 315 | ||||
-rw-r--r-- | arch/mips/mm/highmem.c | 4 | ||||
-rw-r--r-- | arch/mips/mm/init.c | 17 | ||||
-rw-r--r-- | arch/mips/mm/page.c | 1 | ||||
-rw-r--r-- | arch/mips/mm/sc-ip22.c | 1 | ||||
-rw-r--r-- | arch/mips/mm/sc-mips.c | 1 | ||||
-rw-r--r-- | arch/mips/mm/sc-r5k.c | 1 | ||||
-rw-r--r-- | arch/mips/mm/tlb-r3k.c | 2 | ||||
-rw-r--r-- | arch/mips/mm/tlb-r4k.c | 69 | ||||
-rw-r--r-- | arch/mips/mm/tlb-r8k.c | 1 | ||||
-rw-r--r-- | arch/mips/mm/tlbex.c | 1 |
18 files changed, 390 insertions, 92 deletions
diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile index 4d8c1623eee2..4aa20280613e 100644 --- a/arch/mips/mm/Makefile +++ b/arch/mips/mm/Makefile @@ -3,8 +3,8 @@ # obj-y += cache.o dma-default.o extable.o fault.o \ - init.o mmap.o tlbex.o tlbex-fault.o uasm.o \ - page.o + gup.o init.o mmap.o page.o tlbex.o \ + tlbex-fault.o uasm.o obj-$(CONFIG_32BIT) += ioremap.o pgtable-32.o obj-$(CONFIG_64BIT) += pgtable-64.o @@ -31,6 +31,7 @@ obj-$(CONFIG_CPU_TX49XX) += c-r4k.o cex-gen.o tlb-r4k.o obj-$(CONFIG_CPU_VR41XX) += c-r4k.o cex-gen.o tlb-r4k.o obj-$(CONFIG_CPU_CAVIUM_OCTEON) += c-octeon.o cex-oct.o tlb-r4k.o obj-$(CONFIG_CPU_XLR) += c-r4k.o tlb-r4k.o cex-gen.o +obj-$(CONFIG_CPU_XLP) += c-r4k.o tlb-r4k.o cex-gen.o obj-$(CONFIG_IP22_CPU_SCACHE) += sc-ip22.o obj-$(CONFIG_R5000_CPU_SCACHE) += sc-r5k.o diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c index daa81f7284ac..47037ec5589b 100644 --- a/arch/mips/mm/c-octeon.c +++ b/arch/mips/mm/c-octeon.c @@ -21,7 +21,6 @@ #include <asm/page.h> #include <asm/pgtable.h> #include <asm/r4kcache.h> -#include <asm/system.h> #include <asm/mmu_context.h> #include <asm/war.h> @@ -81,9 +80,9 @@ static void octeon_flush_icache_all_cores(struct vm_area_struct *vma) if (vma) mask = *mm_cpumask(vma->vm_mm); else - mask = cpu_online_map; - cpu_clear(cpu, mask); - for_each_cpu_mask(cpu, mask) + mask = *cpu_online_mask; + cpumask_clear_cpu(cpu, &mask); + for_each_cpu(cpu, &mask) octeon_send_ipi_single(cpu, SMP_ICACHE_FLUSH); preempt_enable(); @@ -223,7 +222,7 @@ static void __cpuinit probe_octeon(void) break; default: - panic("Unsupported Cavium Networks CPU type\n"); + panic("Unsupported Cavium Networks CPU type"); break; } diff --git a/arch/mips/mm/c-r3k.c b/arch/mips/mm/c-r3k.c index 0765583d0c92..031c4c2cdf2e 100644 --- a/arch/mips/mm/c-r3k.c +++ b/arch/mips/mm/c-r3k.c @@ -18,7 +18,6 @@ #include <asm/page.h> #include <asm/pgtable.h> #include <asm/mmu_context.h> -#include <asm/system.h> #include <asm/isadep.h> #include <asm/io.h> #include <asm/bootinfo.h> diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index a79fe9aa7721..bda8eb26ece7 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -29,7 +29,6 @@ #include <asm/pgtable.h> #include <asm/r4kcache.h> #include <asm/sections.h> -#include <asm/system.h> #include <asm/mmu_context.h> #include <asm/war.h> #include <asm/cacheflush.h> /* for run_uncached() */ @@ -498,7 +497,7 @@ static inline void local_r4k_flush_cache_page(void *args) if (map_coherent) vaddr = kmap_coherent(page, addr); else - vaddr = kmap_atomic(page, KM_USER0); + vaddr = kmap_atomic(page); addr = (unsigned long)vaddr; } @@ -521,7 +520,7 @@ static inline void local_r4k_flush_cache_page(void *args) if (map_coherent) kunmap_coherent(); else - kunmap_atomic(vaddr, KM_USER0); + kunmap_atomic(vaddr); } } @@ -1235,6 +1234,9 @@ static void __cpuinit setup_scache(void) loongson2_sc_init(); return; #endif + case CPU_XLP: + /* don't need to worry about L2, fully coherent */ + return; default: if (c->isa_level == MIPS_CPU_ISA_M32R1 || diff --git a/arch/mips/mm/c-tx39.c b/arch/mips/mm/c-tx39.c index a43c197ccf8c..87d23cada6d6 100644 --- a/arch/mips/mm/c-tx39.c +++ b/arch/mips/mm/c-tx39.c @@ -18,7 +18,6 @@ #include <asm/page.h> #include <asm/pgtable.h> #include <asm/mmu_context.h> -#include <asm/system.h> #include <asm/isadep.h> #include <asm/io.h> #include <asm/bootinfo.h> diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c index 46084912e588..3fab2046c8a4 100644 --- a/arch/mips/mm/dma-default.c +++ b/arch/mips/mm/dma-default.c @@ -98,7 +98,7 @@ void *dma_alloc_noncoherent(struct device *dev, size_t size, EXPORT_SYMBOL(dma_alloc_noncoherent); static void *mips_dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t * dma_handle, gfp_t gfp) + dma_addr_t * dma_handle, gfp_t gfp, struct dma_attrs *attrs) { void *ret; @@ -132,7 +132,7 @@ void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, EXPORT_SYMBOL(dma_free_noncoherent); static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_handle) + dma_addr_t dma_handle, struct dma_attrs *attrs) { unsigned long addr = (unsigned long) vaddr; int order = get_order(size); @@ -323,8 +323,8 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size, EXPORT_SYMBOL(dma_cache_sync); static struct dma_map_ops mips_default_dma_map_ops = { - .alloc_coherent = mips_dma_alloc_coherent, - .free_coherent = mips_dma_free_coherent, + .alloc = mips_dma_alloc_coherent, + .free = mips_dma_free_coherent, .map_page = mips_dma_map_page, .unmap_page = mips_dma_unmap_page, .map_sg = mips_dma_map_sg, diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index 937cf3368164..c14f6dfed995 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c @@ -22,7 +22,6 @@ #include <asm/branch.h> #include <asm/mmu_context.h> -#include <asm/system.h> #include <asm/uaccess.h> #include <asm/ptrace.h> #include <asm/highmem.h> /* For VMALLOC_END */ @@ -42,6 +41,8 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, unsigned long writ const int field = sizeof(unsigned long) * 2; siginfo_t info; int fault; + unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | + (write ? FAULT_FLAG_WRITE : 0); #if 0 printk("Cpu%d[%s:%d:%0*lx:%ld:%0*lx]\n", raw_smp_processor_id(), @@ -91,6 +92,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, unsigned long writ if (in_atomic() || !mm) goto bad_area_nosemaphore; +retry: down_read(&mm->mmap_sem); vma = find_vma(mm, address); if (!vma) @@ -144,7 +146,11 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); + fault = handle_mm_fault(mm, vma, address, flags); + + if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) + return; + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) @@ -153,12 +159,27 @@ good_area: goto do_sigbus; BUG(); } - if (fault & VM_FAULT_MAJOR) { - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); - tsk->maj_flt++; - } else { - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); - tsk->min_flt++; + if (flags & FAULT_FLAG_ALLOW_RETRY) { + if (fault & VM_FAULT_MAJOR) { + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, + regs, address); + tsk->maj_flt++; + } else { + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, + regs, address); + tsk->min_flt++; + } + if (fault & VM_FAULT_RETRY) { + flags &= ~FAULT_FLAG_ALLOW_RETRY; + + /* + * No need to up_read(&mm->mmap_sem) as we would + * have already released it in __lock_page_or_retry + * in mm/filemap.c. + */ + + goto retry; + } } up_read(&mm->mmap_sem); diff --git a/arch/mips/mm/gup.c b/arch/mips/mm/gup.c new file mode 100644 index 000000000000..33aadbcf170b --- /dev/null +++ b/arch/mips/mm/gup.c @@ -0,0 +1,315 @@ +/* + * Lockless get_user_pages_fast for MIPS + * + * Copyright (C) 2008 Nick Piggin + * Copyright (C) 2008 Novell Inc. + * Copyright (C) 2011 Ralf Baechle + */ +#include <linux/sched.h> +#include <linux/mm.h> +#include <linux/vmstat.h> +#include <linux/highmem.h> +#include <linux/swap.h> +#include <linux/hugetlb.h> + +#include <asm/pgtable.h> + +static inline pte_t gup_get_pte(pte_t *ptep) +{ +#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) + pte_t pte; + +retry: + pte.pte_low = ptep->pte_low; + smp_rmb(); + pte.pte_high = ptep->pte_high; + smp_rmb(); + if (unlikely(pte.pte_low != ptep->pte_low)) + goto retry; + + return pte; +#else + return ACCESS_ONCE(*ptep); +#endif +} + +static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, + int write, struct page **pages, int *nr) +{ + pte_t *ptep = pte_offset_map(&pmd, addr); + do { + pte_t pte = gup_get_pte(ptep); + struct page *page; + + if (!pte_present(pte) || + pte_special(pte) || (write && !pte_write(pte))) { + pte_unmap(ptep); + return 0; + } + VM_BUG_ON(!pfn_valid(pte_pfn(pte))); + page = pte_page(pte); + get_page(page); + SetPageReferenced(page); + pages[*nr] = page; + (*nr)++; + + } while (ptep++, addr += PAGE_SIZE, addr != end); + + pte_unmap(ptep - 1); + return 1; +} + +static inline void get_head_page_multiple(struct page *page, int nr) +{ + VM_BUG_ON(page != compound_head(page)); + VM_BUG_ON(page_count(page) == 0); + atomic_add(nr, &page->_count); + SetPageReferenced(page); +} + +static int gup_huge_pmd(pmd_t pmd, unsigned long addr, unsigned long end, + int write, struct page **pages, int *nr) +{ + pte_t pte = *(pte_t *)&pmd; + struct page *head, *page; + int refs; + + if (write && !pte_write(pte)) + return 0; + /* hugepages are never "special" */ + VM_BUG_ON(pte_special(pte)); + VM_BUG_ON(!pfn_valid(pte_pfn(pte))); + + refs = 0; + head = pte_page(pte); + page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT); + do { + VM_BUG_ON(compound_head(page) != head); + pages[*nr] = page; + if (PageTail(page)) + get_huge_page_tail(page); + (*nr)++; + page++; + refs++; + } while (addr += PAGE_SIZE, addr != end); + + get_head_page_multiple(head, refs); + return 1; +} + +static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, + int write, struct page **pages, int *nr) +{ + unsigned long next; + pmd_t *pmdp; + + pmdp = pmd_offset(&pud, addr); + do { + pmd_t pmd = *pmdp; + + next = pmd_addr_end(addr, end); + /* + * The pmd_trans_splitting() check below explains why + * pmdp_splitting_flush has to flush the tlb, to stop + * this gup-fast code from running while we set the + * splitting bit in the pmd. Returning zero will take + * the slow path that will call wait_split_huge_page() + * if the pmd is still in splitting state. gup-fast + * can't because it has irq disabled and + * wait_split_huge_page() would never return as the + * tlb flush IPI wouldn't run. + */ + if (pmd_none(pmd) || pmd_trans_splitting(pmd)) + return 0; + if (unlikely(pmd_huge(pmd))) { + if (!gup_huge_pmd(pmd, addr, next, write, pages,nr)) + return 0; + } else { + if (!gup_pte_range(pmd, addr, next, write, pages,nr)) + return 0; + } + } while (pmdp++, addr = next, addr != end); + + return 1; +} + +static int gup_huge_pud(pud_t pud, unsigned long addr, unsigned long end, + int write, struct page **pages, int *nr) +{ + pte_t pte = *(pte_t *)&pud; + struct page *head, *page; + int refs; + + if (write && !pte_write(pte)) + return 0; + /* hugepages are never "special" */ + VM_BUG_ON(pte_special(pte)); + VM_BUG_ON(!pfn_valid(pte_pfn(pte))); + + refs = 0; + head = pte_page(pte); + page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT); + do { + VM_BUG_ON(compound_head(page) != head); + pages[*nr] = page; + (*nr)++; + page++; + refs++; + } while (addr += PAGE_SIZE, addr != end); + + get_head_page_multiple(head, refs); + return 1; +} + +static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end, + int write, struct page **pages, int *nr) +{ + unsigned long next; + pud_t *pudp; + + pudp = pud_offset(&pgd, addr); + do { + pud_t pud = *pudp; + + next = pud_addr_end(addr, end); + if (pud_none(pud)) + return 0; + if (unlikely(pud_huge(pud))) { + if (!gup_huge_pud(pud, addr, next, write, pages,nr)) + return 0; + } else { + if (!gup_pmd_range(pud, addr, next, write, pages,nr)) + return 0; + } + } while (pudp++, addr = next, addr != end); + + return 1; +} + +/* + * Like get_user_pages_fast() except its IRQ-safe in that it won't fall + * back to the regular GUP. + */ +int __get_user_pages_fast(unsigned long start, int nr_pages, int write, + struct page **pages) +{ + struct mm_struct *mm = current->mm; + unsigned long addr, len, end; + unsigned long next; + unsigned long flags; + pgd_t *pgdp; + int nr = 0; + + start &= PAGE_MASK; + addr = start; + len = (unsigned long) nr_pages << PAGE_SHIFT; + end = start + len; + if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ, + (void __user *)start, len))) + return 0; + + /* + * XXX: batch / limit 'nr', to avoid large irq off latency + * needs some instrumenting to determine the common sizes used by + * important workloads (eg. DB2), and whether limiting the batch + * size will decrease performance. + * + * It seems like we're in the clear for the moment. Direct-IO is + * the main guy that batches up lots of get_user_pages, and even + * they are limited to 64-at-a-time which is not so many. + */ + /* + * This doesn't prevent pagetable teardown, but does prevent + * the pagetables and pages from being freed. + * + * So long as we atomically load page table pointers versus teardown, + * we can follow the address down to the page and take a ref on it. + */ + local_irq_save(flags); + pgdp = pgd_offset(mm, addr); + do { + pgd_t pgd = *pgdp; + + next = pgd_addr_end(addr, end); + if (pgd_none(pgd)) + break; + if (!gup_pud_range(pgd, addr, next, write, pages, &nr)) + break; + } while (pgdp++, addr = next, addr != end); + local_irq_restore(flags); + + return nr; +} + +/** + * get_user_pages_fast() - pin user pages in memory + * @start: starting user address + * @nr_pages: number of pages from start to pin + * @write: whether pages will be written to + * @pages: array that receives pointers to the pages pinned. + * Should be at least nr_pages long. + * + * Attempt to pin user pages in memory without taking mm->mmap_sem. + * If not successful, it will fall back to taking the lock and + * calling get_user_pages(). + * + * Returns number of pages pinned. This may be fewer than the number + * requested. If nr_pages is 0 or negative, returns 0. If no pages + * were pinned, returns -errno. + */ +int get_user_pages_fast(unsigned long start, int nr_pages, int write, + struct page **pages) +{ + struct mm_struct *mm = current->mm; + unsigned long addr, len, end; + unsigned long next; + pgd_t *pgdp; + int ret, nr = 0; + + start &= PAGE_MASK; + addr = start; + len = (unsigned long) nr_pages << PAGE_SHIFT; + + end = start + len; + if (end < start) + goto slow_irqon; + + /* XXX: batch / limit 'nr' */ + local_irq_disable(); + pgdp = pgd_offset(mm, addr); + do { + pgd_t pgd = *pgdp; + + next = pgd_addr_end(addr, end); + if (pgd_none(pgd)) + goto slow; + if (!gup_pud_range(pgd, addr, next, write, pages, &nr)) + goto slow; + } while (pgdp++, addr = next, addr != end); + local_irq_enable(); + + VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT); + return nr; +slow: + local_irq_enable(); + +slow_irqon: + /* Try to get the remaining pages with get_user_pages */ + start += nr << PAGE_SHIFT; + pages += nr; + + down_read(&mm->mmap_sem); + ret = get_user_pages(current, mm, start, + (end - start) >> PAGE_SHIFT, + write, 0, pages, NULL); + up_read(&mm->mmap_sem); + + /* Have to be a bit careful with return values */ + if (nr > 0) { + if (ret < 0) + ret = nr; + else + ret += nr; + } + return ret; +} diff --git a/arch/mips/mm/highmem.c b/arch/mips/mm/highmem.c index 3634c7ea06ac..aff57057a949 100644 --- a/arch/mips/mm/highmem.c +++ b/arch/mips/mm/highmem.c @@ -41,7 +41,7 @@ EXPORT_SYMBOL(kunmap); * kmaps are appropriate for short, tight code paths only. */ -void *__kmap_atomic(struct page *page) +void *kmap_atomic(struct page *page) { unsigned long vaddr; int idx, type; @@ -62,7 +62,7 @@ void *__kmap_atomic(struct page *page) return (void*) vaddr; } -EXPORT_SYMBOL(__kmap_atomic); +EXPORT_SYMBOL(kmap_atomic); void __kunmap_atomic(void *kvaddr) { diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index b7ebc4fa89bc..1a85ba92eb5c 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -207,21 +207,21 @@ void copy_user_highpage(struct page *to, struct page *from, { void *vfrom, *vto; - vto = kmap_atomic(to, KM_USER1); + vto = kmap_atomic(to); if (cpu_has_dc_aliases && page_mapped(from) && !Page_dcache_dirty(from)) { vfrom = kmap_coherent(from, vaddr); copy_page(vto, vfrom); kunmap_coherent(); } else { - vfrom = kmap_atomic(from, KM_USER0); + vfrom = kmap_atomic(from); copy_page(vto, vfrom); - kunmap_atomic(vfrom, KM_USER0); + kunmap_atomic(vfrom); } if ((!cpu_has_ic_fills_f_dc) || pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK)) flush_data_cache_page((unsigned long)vto); - kunmap_atomic(vto, KM_USER1); + kunmap_atomic(vto); /* Make sure this page is cleared on other CPU's too before using it */ smp_wmb(); } @@ -304,9 +304,14 @@ int page_is_ram(unsigned long pagenr) for (i = 0; i < boot_mem_map.nr_map; i++) { unsigned long addr, end; - if (boot_mem_map.map[i].type != BOOT_MEM_RAM) + switch (boot_mem_map.map[i].type) { + case BOOT_MEM_RAM: + case BOOT_MEM_INIT_RAM: + break; + default: /* not usable memory */ continue; + } addr = PFN_UP(boot_mem_map.map[i].addr); end = PFN_DOWN(boot_mem_map.map[i].addr + @@ -379,7 +384,7 @@ void __init mem_init(void) reservedpages = ram = 0; for (tmp = 0; tmp < max_low_pfn; tmp++) - if (page_is_ram(tmp)) { + if (page_is_ram(tmp) && pfn_valid(tmp)) { ram++; if (PageReserved(pfn_to_page(tmp))) reservedpages++; diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c index 36272f7d3744..cc0b626858b3 100644 --- a/arch/mips/mm/page.c +++ b/arch/mips/mm/page.c @@ -22,7 +22,6 @@ #include <asm/page.h> #include <asm/pgtable.h> #include <asm/prefetch.h> -#include <asm/system.h> #include <asm/bootinfo.h> #include <asm/mipsregs.h> #include <asm/mmu_context.h> diff --git a/arch/mips/mm/sc-ip22.c b/arch/mips/mm/sc-ip22.c index a6bd11fba7bf..1eb708ef75ff 100644 --- a/arch/mips/mm/sc-ip22.c +++ b/arch/mips/mm/sc-ip22.c @@ -12,7 +12,6 @@ #include <asm/bcache.h> #include <asm/page.h> #include <asm/pgtable.h> -#include <asm/system.h> #include <asm/bootinfo.h> #include <asm/sgi/ip22.h> #include <asm/sgi/mc.h> diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c index 9cca8de00545..93d937b4b1ba 100644 --- a/arch/mips/mm/sc-mips.c +++ b/arch/mips/mm/sc-mips.c @@ -11,7 +11,6 @@ #include <asm/cacheops.h> #include <asm/page.h> #include <asm/pgtable.h> -#include <asm/system.h> #include <asm/mmu_context.h> #include <asm/r4kcache.h> diff --git a/arch/mips/mm/sc-r5k.c b/arch/mips/mm/sc-r5k.c index ae1e533a096e..8d90ff25b123 100644 --- a/arch/mips/mm/sc-r5k.c +++ b/arch/mips/mm/sc-r5k.c @@ -12,7 +12,6 @@ #include <asm/cacheops.h> #include <asm/page.h> #include <asm/pgtable.h> -#include <asm/system.h> #include <asm/mmu_context.h> #include <asm/r4kcache.h> diff --git a/arch/mips/mm/tlb-r3k.c b/arch/mips/mm/tlb-r3k.c index 87bb85d8d537..a63d1ed0827f 100644 --- a/arch/mips/mm/tlb-r3k.c +++ b/arch/mips/mm/tlb-r3k.c @@ -19,7 +19,7 @@ #include <asm/page.h> #include <asm/pgtable.h> #include <asm/mmu_context.h> -#include <asm/system.h> +#include <asm/tlbmisc.h> #include <asm/isadep.h> #include <asm/io.h> #include <asm/bootinfo.h> diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c index 0d394e0e8837..d2572cb232db 100644 --- a/arch/mips/mm/tlb-r4k.c +++ b/arch/mips/mm/tlb-r4k.c @@ -18,7 +18,7 @@ #include <asm/bootinfo.h> #include <asm/mmu_context.h> #include <asm/pgtable.h> -#include <asm/system.h> +#include <asm/tlbmisc.h> extern void build_tlb_refill_handler(void); @@ -120,22 +120,30 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, if (cpu_context(cpu, mm) != 0) { unsigned long size, flags; + int huge = is_vm_hugetlb_page(vma); ENTER_CRITICAL(flags); - size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; - size = (size + 1) >> 1; + if (huge) { + start = round_down(start, HPAGE_SIZE); + end = round_up(end, HPAGE_SIZE); + size = (end - start) >> HPAGE_SHIFT; + } else { + start = round_down(start, PAGE_SIZE << 1); + end = round_up(end, PAGE_SIZE << 1); + size = (end - start) >> (PAGE_SHIFT + 1); + } if (size <= current_cpu_data.tlbsize/2) { int oldpid = read_c0_entryhi(); int newpid = cpu_asid(cpu, mm); - start &= (PAGE_MASK << 1); - end += ((PAGE_SIZE << 1) - 1); - end &= (PAGE_MASK << 1); while (start < end) { int idx; write_c0_entryhi(start | newpid); - start += (PAGE_SIZE << 1); + if (huge) + start += HPAGE_SIZE; + else + start += (PAGE_SIZE << 1); mtc0_tlbw_hazard(); tlb_probe(); tlb_probe_hazard(); @@ -368,51 +376,6 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, EXIT_CRITICAL(flags); } -/* - * Used for loading TLB entries before trap_init() has started, when we - * don't actually want to add a wired entry which remains throughout the - * lifetime of the system - */ - -static int temp_tlb_entry __cpuinitdata; - -__init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1, - unsigned long entryhi, unsigned long pagemask) -{ - int ret = 0; - unsigned long flags; - unsigned long wired; - unsigned long old_pagemask; - unsigned long old_ctx; - - ENTER_CRITICAL(flags); - /* Save old context and create impossible VPN2 value */ - old_ctx = read_c0_entryhi(); - old_pagemask = read_c0_pagemask(); - wired = read_c0_wired(); - if (--temp_tlb_entry < wired) { - printk(KERN_WARNING - "No TLB space left for add_temporary_entry\n"); - ret = -ENOSPC; - goto out; - } - - write_c0_index(temp_tlb_entry); - write_c0_pagemask(pagemask); - write_c0_entryhi(entryhi); - write_c0_entrylo0(entrylo0); - write_c0_entrylo1(entrylo1); - mtc0_tlbw_hazard(); - tlb_write_indexed(); - tlbw_use_hazard(); - - write_c0_entryhi(old_ctx); - write_c0_pagemask(old_pagemask); -out: - EXIT_CRITICAL(flags); - return ret; -} - static int __cpuinitdata ntlb; static int __init set_ntlb(char *str) { @@ -450,8 +413,6 @@ void __cpuinit tlb_init(void) write_c0_pagegrain(pg); } - temp_tlb_entry = current_cpu_data.tlbsize - 1; - /* From this point on the ARC firmware is dead. */ local_flush_tlb_all(); diff --git a/arch/mips/mm/tlb-r8k.c b/arch/mips/mm/tlb-r8k.c index 3d95f76c106b..91c2499f806a 100644 --- a/arch/mips/mm/tlb-r8k.c +++ b/arch/mips/mm/tlb-r8k.c @@ -17,7 +17,6 @@ #include <asm/bootinfo.h> #include <asm/mmu_context.h> #include <asm/pgtable.h> -#include <asm/system.h> extern void build_tlb_refill_handler(void); diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index e06370f58ef3..0bc485b3cd60 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -32,6 +32,7 @@ #include <asm/pgtable.h> #include <asm/war.h> #include <asm/uasm.h> +#include <asm/setup.h> /* * TLB load/store/modify handlers. |