diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2008-02-05 07:29:14 +0100 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2008-02-05 18:44:18 +0100 |
commit | 5e5419734c8719cbc01af959ad9c0844002c0df5 (patch) | |
tree | a075dca3f719946689efa0245464855cbf2a20ce /arch | |
parent | Page allocator: clean up pcp draining functions (diff) | |
download | linux-5e5419734c8719cbc01af959ad9c0844002c0df5.tar.xz linux-5e5419734c8719cbc01af959ad9c0844002c0df5.zip |
add mm argument to pte/pmd/pud/pgd_free
(with Martin Schwidefsky <schwidefsky@de.ibm.com>)
The pgd/pud/pmd/pte page table allocation functions get a mm_struct pointer as
first argument. The free functions do not get the mm_struct argument. This
is 1) asymmetrical and 2) to do mm related page table allocations the mm
argument is needed on the free function as well.
[kamalesh@linux.vnet.ibm.com: i386 fix]
[akpm@linux-foundation.org: coding-syle fixes]
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: <linux-arch@vger.kernel.org>
Signed-off-by: Kamalesh Babulal <kamalesh@linux.vnet.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm/kernel/smp.c | 2 | ||||
-rw-r--r-- | arch/arm/mm/ioremap.c | 2 | ||||
-rw-r--r-- | arch/arm/mm/pgd.c | 8 | ||||
-rw-r--r-- | arch/frv/mm/pgalloc.c | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/pgtable_32.c | 6 | ||||
-rw-r--r-- | arch/ppc/mm/pgtable.c | 6 | ||||
-rw-r--r-- | arch/um/kernel/mem.c | 2 | ||||
-rw-r--r-- | arch/um/kernel/skas/mmu.c | 8 | ||||
-rw-r--r-- | arch/x86/mm/pgtable_32.c | 12 |
9 files changed, 24 insertions, 24 deletions
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index e9dfbab46cb6..eefae1de334c 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -150,7 +150,7 @@ int __cpuinit __cpu_up(unsigned int cpu) secondary_data.pgdir = 0; *pmd_offset(pgd, PHYS_OFFSET) = __pmd(0); - pgd_free(pgd); + pgd_free(&init_mm, pgd); if (ret) { printk(KERN_CRIT "CPU%u: processor failed to boot\n", cpu); diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 75952779ce19..303a7ff6bfd2 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c @@ -162,7 +162,7 @@ static void unmap_area_sections(unsigned long virt, unsigned long size) * Free the page table, if there was one. */ if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE) - pte_free_kernel(pmd_page_vaddr(pmd)); + pte_free_kernel(&init_mm, pmd_page_vaddr(pmd)); } addr += PGDIR_SIZE; diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c index 50b9aed6000d..500c9610ab30 100644 --- a/arch/arm/mm/pgd.c +++ b/arch/arm/mm/pgd.c @@ -65,14 +65,14 @@ pgd_t *get_pgd_slow(struct mm_struct *mm) return new_pgd; no_pte: - pmd_free(new_pmd); + pmd_free(mm, new_pmd); no_pmd: free_pages((unsigned long)new_pgd, 2); no_pgd: return NULL; } -void free_pgd_slow(pgd_t *pgd) +void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd) { pmd_t *pmd; struct page *pte; @@ -94,8 +94,8 @@ void free_pgd_slow(pgd_t *pgd) pmd_clear(pmd); dec_zone_page_state(virt_to_page((unsigned long *)pgd), NR_PAGETABLE); pte_lock_deinit(pte); - pte_free(pte); - pmd_free(pmd); + pte_free(mm, pte); + pmd_free(mm, pmd); free: free_pages((unsigned long) pgd, 2); } diff --git a/arch/frv/mm/pgalloc.c b/arch/frv/mm/pgalloc.c index 7787c3cc52c6..1a2e5c8d03a9 100644 --- a/arch/frv/mm/pgalloc.c +++ b/arch/frv/mm/pgalloc.c @@ -140,7 +140,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm) return pgd; } -void pgd_free(pgd_t *pgd) +void pgd_free(struct mm_struct *mm, pgd_t *pgd) { /* in the non-PAE case, clear_page_tables() clears user pgd entries */ quicklist_free(0, pgd_dtor, pgd); diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index 64488723162a..f80f90c4d58b 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -86,7 +86,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm) return ret; } -void pgd_free(pgd_t *pgd) +void pgd_free(struct mm_struct *mm, pgd_t *pgd) { free_pages((unsigned long)pgd, PGDIR_ORDER); } @@ -123,7 +123,7 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) return ptepage; } -void pte_free_kernel(pte_t *pte) +void pte_free_kernel(struct mm_struct *mm, pte_t *pte) { #ifdef CONFIG_SMP hash_page_sync(); @@ -131,7 +131,7 @@ void pte_free_kernel(pte_t *pte) free_page((unsigned long)pte); } -void pte_free(struct page *ptepage) +void pte_free(struct mm_struct *mm, struct page *ptepage) { #ifdef CONFIG_SMP hash_page_sync(); diff --git a/arch/ppc/mm/pgtable.c b/arch/ppc/mm/pgtable.c index fadacfd18806..409fcaa4994a 100644 --- a/arch/ppc/mm/pgtable.c +++ b/arch/ppc/mm/pgtable.c @@ -74,7 +74,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm) return ret; } -void pgd_free(pgd_t *pgd) +void pgd_free(struct mm_struct *mm, pgd_t *pgd) { free_pages((unsigned long)pgd, PGDIR_ORDER); } @@ -111,7 +111,7 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) return ptepage; } -void pte_free_kernel(pte_t *pte) +void pte_free_kernel(struct mm_struct *mm, pte_t *pte) { #ifdef CONFIG_SMP hash_page_sync(); @@ -119,7 +119,7 @@ void pte_free_kernel(pte_t *pte) free_page((unsigned long)pte); } -void pte_free(struct page *ptepage) +void pte_free(struct mm_struct *mm, struct page *ptepage) { #ifdef CONFIG_SMP hash_page_sync(); diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c index 59822dee438a..e3e72418f241 100644 --- a/arch/um/kernel/mem.c +++ b/arch/um/kernel/mem.c @@ -348,7 +348,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm) return pgd; } -void pgd_free(pgd_t *pgd) +void pgd_free(struct mm_struct *mm, pgd_t *pgd) { free_page((unsigned long) pgd); } diff --git a/arch/um/kernel/skas/mmu.c b/arch/um/kernel/skas/mmu.c index f859ec306cd5..b56fe8b67a81 100644 --- a/arch/um/kernel/skas/mmu.c +++ b/arch/um/kernel/skas/mmu.c @@ -58,9 +58,9 @@ static int init_stub_pte(struct mm_struct *mm, unsigned long proc, return 0; out_pmd: - pud_free(pud); + pud_free(mm, pud); out_pte: - pmd_free(pmd); + pmd_free(mm, pmd); out: return -ENOMEM; } @@ -144,10 +144,10 @@ void destroy_context(struct mm_struct *mm) if (!proc_mm || !ptrace_faultinfo) { free_page(mmu->id.stack); pte_lock_deinit(virt_to_page(mmu->last_page_table)); - pte_free_kernel((pte_t *) mmu->last_page_table); + pte_free_kernel(mm, (pte_t *) mmu->last_page_table); dec_zone_page_state(virt_to_page(mmu->last_page_table), NR_PAGETABLE); #ifdef CONFIG_3_LEVEL_PGTABLES - pmd_free((pmd_t *) mmu->last_pmd); + pmd_free(mm, (pmd_t *) mmu->last_pmd); #endif } diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c index c7db504be1ea..6c1914622a88 100644 --- a/arch/x86/mm/pgtable_32.c +++ b/arch/x86/mm/pgtable_32.c @@ -272,7 +272,7 @@ static void pgd_dtor(void *pgd) * preallocate which never got a corresponding vma will need to be * freed manually. */ -static void pgd_mop_up_pmds(pgd_t *pgdp) +static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp) { int i; @@ -285,7 +285,7 @@ static void pgd_mop_up_pmds(pgd_t *pgdp) pgdp[i] = native_make_pgd(0); paravirt_release_pd(pgd_val(pgd) >> PAGE_SHIFT); - pmd_free(pmd); + pmd_free(mm, pmd); } } } @@ -313,7 +313,7 @@ static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd) pmd_t *pmd = pmd_alloc_one(mm, addr); if (!pmd) { - pgd_mop_up_pmds(pgd); + pgd_mop_up_pmds(mm, pgd); return 0; } @@ -333,7 +333,7 @@ static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd) return 1; } -static void pgd_mop_up_pmds(pgd_t *pgd) +static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp) { } #endif /* CONFIG_X86_PAE */ @@ -352,9 +352,9 @@ pgd_t *pgd_alloc(struct mm_struct *mm) return pgd; } -void pgd_free(pgd_t *pgd) +void pgd_free(struct mm_struct *mm, pgd_t *pgd) { - pgd_mop_up_pmds(pgd); + pgd_mop_up_pmds(mm, pgd); quicklist_free(0, pgd_dtor, pgd); } |