diff options
author | Christophe Leroy <christophe.leroy@c-s.fr> | 2018-11-29 15:07:01 +0100 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2018-12-04 09:45:01 +0100 |
commit | 32ea4c14999006fea541b5f78d008fffc1656849 (patch) | |
tree | 5f3a1af0e2a9ec621d1bfebd7e8eb51446810d07 /arch/powerpc/include/asm/nohash | |
parent | powerpc/mm: add helpers to get/set mm.context->pte_frag (diff) | |
download | linux-32ea4c14999006fea541b5f78d008fffc1656849.tar.xz linux-32ea4c14999006fea541b5f78d008fffc1656849.zip |
powerpc/mm: Extend pte_fragment functionality to PPC32
In order to allow the 8xx to handle pte_fragments, this patch
extends the use of pte_fragments to PPC32 platforms.
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc/include/asm/nohash')
-rw-r--r-- | arch/powerpc/include/asm/nohash/32/mmu.h | 4 | ||||
-rw-r--r-- | arch/powerpc/include/asm/nohash/32/pgalloc.h | 22 | ||||
-rw-r--r-- | arch/powerpc/include/asm/nohash/32/pgtable.h | 7 |
3 files changed, 18 insertions, 15 deletions
diff --git a/arch/powerpc/include/asm/nohash/32/mmu.h b/arch/powerpc/include/asm/nohash/32/mmu.h index f61f933a4cd8..7d94a36d57d2 100644 --- a/arch/powerpc/include/asm/nohash/32/mmu.h +++ b/arch/powerpc/include/asm/nohash/32/mmu.h @@ -2,6 +2,8 @@ #ifndef _ASM_POWERPC_NOHASH_32_MMU_H_ #define _ASM_POWERPC_NOHASH_32_MMU_H_ +#include <asm/page.h> + #if defined(CONFIG_40x) /* 40x-style software loaded TLB */ #include <asm/nohash/32/mmu-40x.h> @@ -17,7 +19,7 @@ #endif #ifndef __ASSEMBLY__ -typedef struct page *pgtable_t; +typedef pte_t *pgtable_t; #endif #endif /* _ASM_POWERPC_NOHASH_32_MMU_H_ */ diff --git a/arch/powerpc/include/asm/nohash/32/pgalloc.h b/arch/powerpc/include/asm/nohash/32/pgalloc.h index 8825953c225b..7e234582dce5 100644 --- a/arch/powerpc/include/asm/nohash/32/pgalloc.h +++ b/arch/powerpc/include/asm/nohash/32/pgalloc.h @@ -61,11 +61,10 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t pte_page) { - *pmdp = __pmd((page_to_pfn(pte_page) << PAGE_SHIFT) | _PMD_USER | - _PMD_PRESENT); + *pmdp = __pmd(__pa(pte_page) | _PMD_USER | _PMD_PRESENT); } -#define pmd_pgtable(pmd) pmd_page(pmd) +#define pmd_pgtable(pmd) ((pgtable_t)pmd_page_vaddr(pmd)) #else static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, @@ -77,31 +76,32 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t pte_page) { - *pmdp = __pmd((unsigned long)lowmem_page_address(pte_page) | _PMD_PRESENT); + *pmdp = __pmd((unsigned long)pte_page | _PMD_PRESENT); } -#define pmd_pgtable(pmd) pmd_page(pmd) +#define pmd_pgtable(pmd) ((pgtable_t)pmd_page_vaddr(pmd)) #endif extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr); extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr); +void pte_frag_destroy(void *pte_frag); +pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel); +void pte_fragment_free(unsigned long *table, int kernel); static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) { - free_page((unsigned long)pte); + pte_fragment_free((unsigned long *)pte, 1); } static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage) { - pgtable_page_dtor(ptepage); - __free_page(ptepage); + pte_fragment_free((unsigned long *)ptepage, 0); } static inline void pgtable_free(void *table, unsigned index_size) { if (!index_size) { - pgtable_page_dtor(virt_to_page(table)); - free_page((unsigned long)table); + pte_fragment_free((unsigned long *)table, 0); } else { BUG_ON(index_size > MAX_PGTABLE_INDEX_SIZE); kmem_cache_free(PGT_CACHE(index_size), table); @@ -140,6 +140,6 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, unsigned long address) { tlb_flush_pgtable(tlb, address); - pgtable_free_tlb(tlb, page_address(table), 0); + pgtable_free_tlb(tlb, table, 0); } #endif /* _ASM_POWERPC_PGALLOC_32_H */ diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h index 3ffb0ff5a038..e9c8604cc8c5 100644 --- a/arch/powerpc/include/asm/nohash/32/pgtable.h +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h @@ -333,12 +333,12 @@ static inline int pte_young(pte_t pte) */ #ifndef CONFIG_BOOKE #define pmd_page_vaddr(pmd) \ - ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) + ((unsigned long)__va(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1))) #define pmd_page(pmd) \ pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT) #else #define pmd_page_vaddr(pmd) \ - ((unsigned long) (pmd_val(pmd) & PAGE_MASK)) + ((unsigned long)(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1))) #define pmd_page(pmd) \ pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT)) #endif @@ -357,7 +357,8 @@ static inline int pte_young(pte_t pte) (pmd_bad(*(dir)) ? NULL : (pte_t *)pmd_page_vaddr(*(dir)) + \ pte_index(addr)) #define pte_offset_map(dir, addr) \ - ((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr)) + ((pte_t *)(kmap_atomic(pmd_page(*(dir))) + \ + (pmd_page_vaddr(*(dir)) & ~PAGE_MASK)) + pte_index(addr)) #define pte_unmap(pte) kunmap_atomic(pte) /* |