diff options
author | David Gibson <david@gibson.dropbear.id.au> | 2009-10-28 17:27:18 +0100 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2009-10-30 07:20:57 +0100 |
commit | a0668cdc154e54bf0c85182e0535eea237d53146 (patch) | |
tree | 84efcadf011e16c240ac9b1c948141fc1cc7d324 /arch/powerpc/include/asm/pgalloc.h | |
parent | powerpc/mm: Make hpte_need_flush() correctly mask for multiple page sizes (diff) | |
download | linux-a0668cdc154e54bf0c85182e0535eea237d53146.tar.xz linux-a0668cdc154e54bf0c85182e0535eea237d53146.zip |
powerpc/mm: Cleanup management of kmem_caches for pagetables
Currently we have a fair bit of rather fiddly code to manage the
various kmem_caches used to store page tables of various levels. We
generally have two caches holding some combination of PGD, PUD and PMD
tables, plus several more for the special hugepage pagetables.
This patch cleans this all up by taking a different approach. Rather
than the caches being designated as for PUDs or for hugeptes for 16M
pages, the caches are simply allocated to be a specific size. Thus
sharing of caches between different types/levels of pagetables happens
naturally. The pagetable size, where needed, is passed around encoded
in the same way as {PGD,PUD,PMD}_INDEX_SIZE; that is n where the
pagetable contains 2^n pointers.
Signed-off-by: David Gibson <dwg@au1.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/include/asm/pgalloc.h')
-rw-r--r-- | arch/powerpc/include/asm/pgalloc.h | 30 |
1 files changed, 4 insertions, 26 deletions
diff --git a/arch/powerpc/include/asm/pgalloc.h b/arch/powerpc/include/asm/pgalloc.h index f2e812de7c3c..abe8532bd14e 100644 --- a/arch/powerpc/include/asm/pgalloc.h +++ b/arch/powerpc/include/asm/pgalloc.h @@ -24,25 +24,6 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage) __free_page(ptepage); } -typedef struct pgtable_free { - unsigned long val; -} pgtable_free_t; - -/* This needs to be big enough to allow for MMU_PAGE_COUNT + 2 to be stored - * and small enough to fit in the low bits of any naturally aligned page - * table cache entry. Arbitrarily set to 0x1f, that should give us some - * room to grow - */ -#define PGF_CACHENUM_MASK 0x1f - -static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum, - unsigned long mask) -{ - BUG_ON(cachenum > PGF_CACHENUM_MASK); - - return (pgtable_free_t){.val = ((unsigned long) p & ~mask) | cachenum}; -} - #ifdef CONFIG_PPC64 #include <asm/pgalloc-64.h> #else @@ -50,12 +31,12 @@ static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum, #endif #ifdef CONFIG_SMP -extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf); +extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift); extern void pte_free_finish(void); #else /* CONFIG_SMP */ -static inline void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf) +static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift) { - pgtable_free(pgf); + pgtable_free(table, shift); } static inline void pte_free_finish(void) { } #endif /* !CONFIG_SMP */ @@ -63,12 +44,9 @@ static inline void pte_free_finish(void) { } static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *ptepage, unsigned long address) { - pgtable_free_t pgf = pgtable_free_cache(page_address(ptepage), - PTE_NONCACHE_NUM, - PTE_TABLE_SIZE-1); tlb_flush_pgtable(tlb, address); pgtable_page_dtor(ptepage); - pgtable_free_tlb(tlb, pgf); + pgtable_free_tlb(tlb, page_address(ptepage), 0); } #endif /* __KERNEL__ */ |