diff options
author | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2008-02-09 18:24:36 +0100 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2008-02-09 18:24:40 +0100 |
commit | 5a216a20837c5f5fa1ca4b8ae8991ffd96b08e6f (patch) | |
tree | dde54e28497e920fa460cc95dadb6b38f1b2dbe0 /include/asm-s390/tlb.h | |
parent | [S390] 1K/2K page table pages. (diff) | |
download | linux-5a216a20837c5f5fa1ca4b8ae8991ffd96b08e6f.tar.xz linux-5a216a20837c5f5fa1ca4b8ae8991ffd96b08e6f.zip |
[S390] Add four level page tables for CONFIG_64BIT=y.
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to '')
-rw-r--r-- | include/asm-s390/tlb.h | 33 |
1 files changed, 24 insertions, 9 deletions
diff --git a/include/asm-s390/tlb.h b/include/asm-s390/tlb.h index ecac75ec6cb0..9b2ddb7aac49 100644 --- a/include/asm-s390/tlb.h +++ b/include/asm-s390/tlb.h @@ -38,7 +38,7 @@ struct mmu_gather { struct mm_struct *mm; unsigned int fullmm; unsigned int nr_ptes; - unsigned int nr_pmds; + unsigned int nr_pxds; void *array[TLB_NR_PTRS]; }; @@ -53,7 +53,7 @@ static inline struct mmu_gather *tlb_gather_mmu(struct mm_struct *mm, tlb->fullmm = full_mm_flush || (num_online_cpus() == 1) || (atomic_read(&mm->mm_users) <= 1 && mm == current->active_mm); tlb->nr_ptes = 0; - tlb->nr_pmds = TLB_NR_PTRS; + tlb->nr_pxds = TLB_NR_PTRS; if (tlb->fullmm) __tlb_flush_mm(mm); return tlb; @@ -62,12 +62,13 @@ static inline struct mmu_gather *tlb_gather_mmu(struct mm_struct *mm, static inline void tlb_flush_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) { - if (!tlb->fullmm && (tlb->nr_ptes > 0 || tlb->nr_pmds < TLB_NR_PTRS)) + if (!tlb->fullmm && (tlb->nr_ptes > 0 || tlb->nr_pxds < TLB_NR_PTRS)) __tlb_flush_mm(tlb->mm); while (tlb->nr_ptes > 0) pte_free(tlb->mm, tlb->array[--tlb->nr_ptes]); - while (tlb->nr_pmds < TLB_NR_PTRS) - pmd_free(tlb->mm, (pmd_t *) tlb->array[tlb->nr_pmds++]); + while (tlb->nr_pxds < TLB_NR_PTRS) + /* pgd_free frees the pointer as region or segment table */ + pgd_free(tlb->mm, tlb->array[tlb->nr_pxds++]); } static inline void tlb_finish_mmu(struct mmu_gather *tlb, @@ -99,7 +100,7 @@ static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte) { if (!tlb->fullmm) { tlb->array[tlb->nr_ptes++] = pte; - if (tlb->nr_ptes >= tlb->nr_pmds) + if (tlb->nr_ptes >= tlb->nr_pxds) tlb_flush_mmu(tlb, 0, 0); } else pte_free(tlb->mm, pte); @@ -113,15 +114,29 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) { #ifdef __s390x__ if (!tlb->fullmm) { - tlb->array[--tlb->nr_pmds] = (struct page *) pmd; - if (tlb->nr_ptes >= tlb->nr_pmds) + tlb->array[--tlb->nr_pxds] = pmd; + if (tlb->nr_ptes >= tlb->nr_pxds) tlb_flush_mmu(tlb, 0, 0); } else pmd_free(tlb->mm, pmd); #endif } -#define pud_free_tlb(tlb, pud) do { } while (0) +/* + * pud_free_tlb frees a pud table and clears the CRSTE for the + * region third table entry from the tlb. + */ +static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud) +{ +#ifdef __s390x__ + if (!tlb->fullmm) { + tlb->array[--tlb->nr_pxds] = pud; + if (tlb->nr_ptes >= tlb->nr_pxds) + tlb_flush_mmu(tlb, 0, 0); + } else + pud_free(tlb->mm, pud); +#endif +} #define tlb_start_vma(tlb, vma) do { } while (0) #define tlb_end_vma(tlb, vma) do { } while (0) |