diff options
Diffstat (limited to 'arch/parisc/include/asm/pgtable.h')
-rw-r--r-- | arch/parisc/include/asm/pgtable.h | 69 |
1 files changed, 54 insertions, 15 deletions
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h index c7bb74e22436..a39b079e73f2 100644 --- a/arch/parisc/include/asm/pgtable.h +++ b/arch/parisc/include/asm/pgtable.h @@ -17,7 +17,7 @@ #include <asm/processor.h> #include <asm/cache.h> -extern spinlock_t pa_tlb_lock; +static inline spinlock_t *pgd_spinlock(pgd_t *); /* * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel @@ -34,16 +34,46 @@ extern spinlock_t pa_tlb_lock; */ #define kern_addr_valid(addr) (1) -/* Purge data and instruction TLB entries. Must be called holding - * the pa_tlb_lock. The TLB purge instructions are slow on SMP - * machines since the purge must be broadcast to all CPUs. +/* This is for the serialization of PxTLB broadcasts. At least on the N class + * systems, only one PxTLB inter processor broadcast can be active at any one + * time on the Merced bus. + + * PTE updates are protected by locks in the PMD. + */ +extern spinlock_t pa_tlb_flush_lock; +extern spinlock_t pa_swapper_pg_lock; +#if defined(CONFIG_64BIT) && defined(CONFIG_SMP) +extern int pa_serialize_tlb_flushes; +#else +#define pa_serialize_tlb_flushes (0) +#endif + +#define purge_tlb_start(flags) do { \ + if (pa_serialize_tlb_flushes) \ + spin_lock_irqsave(&pa_tlb_flush_lock, flags); \ + else \ + local_irq_save(flags); \ + } while (0) +#define purge_tlb_end(flags) do { \ + if (pa_serialize_tlb_flushes) \ + spin_unlock_irqrestore(&pa_tlb_flush_lock, flags); \ + else \ + local_irq_restore(flags); \ + } while (0) + +/* Purge data and instruction TLB entries. The TLB purge instructions + * are slow on SMP machines since the purge must be broadcast to all CPUs. */ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) { + unsigned long flags; + + purge_tlb_start(flags); mtsp(mm->context, 1); pdtlb(addr); pitlb(addr); + purge_tlb_end(flags); } /* Certain architectures need to do special things when PTEs @@ -59,11 +89,11 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) do { \ pte_t old_pte; \ unsigned long flags; \ - spin_lock_irqsave(&pa_tlb_lock, flags); \ + spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags);\ old_pte = *ptep; \ set_pte(ptep, pteval); \ purge_tlb_entries(mm, addr); \ - spin_unlock_irqrestore(&pa_tlb_lock, flags); \ + spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags);\ } while (0) #endif /* !__ASSEMBLY__ */ @@ -88,10 +118,10 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) #if CONFIG_PGTABLE_LEVELS == 3 #define PGD_ORDER 1 /* Number of pages per pgd */ #define PMD_ORDER 1 /* Number of pages per pmd */ -#define PGD_ALLOC_ORDER 2 /* first pgd contains pmd */ +#define PGD_ALLOC_ORDER (2 + 1) /* first pgd contains pmd */ #else #define PGD_ORDER 1 /* Number of pages per pgd */ -#define PGD_ALLOC_ORDER PGD_ORDER +#define PGD_ALLOC_ORDER (PGD_ORDER + 1) #endif /* Definitions for 3rd level (we use PLD here for Page Lower directory @@ -459,6 +489,15 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *); #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) + +static inline spinlock_t *pgd_spinlock(pgd_t *pgd) +{ + if (unlikely(pgd == swapper_pg_dir)) + return &pa_swapper_pg_lock; + return (spinlock_t *)((char *)pgd + (PAGE_SIZE << (PGD_ALLOC_ORDER - 1))); +} + + static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { pte_t pte; @@ -467,15 +506,15 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned if (!pte_young(*ptep)) return 0; - spin_lock_irqsave(&pa_tlb_lock, flags); + spin_lock_irqsave(pgd_spinlock(vma->vm_mm->pgd), flags); pte = *ptep; if (!pte_young(pte)) { - spin_unlock_irqrestore(&pa_tlb_lock, flags); + spin_unlock_irqrestore(pgd_spinlock(vma->vm_mm->pgd), flags); return 0; } set_pte(ptep, pte_mkold(pte)); purge_tlb_entries(vma->vm_mm, addr); - spin_unlock_irqrestore(&pa_tlb_lock, flags); + spin_unlock_irqrestore(pgd_spinlock(vma->vm_mm->pgd), flags); return 1; } @@ -485,11 +524,11 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t old_pte; unsigned long flags; - spin_lock_irqsave(&pa_tlb_lock, flags); + spin_lock_irqsave(pgd_spinlock(mm->pgd), flags); old_pte = *ptep; set_pte(ptep, __pte(0)); purge_tlb_entries(mm, addr); - spin_unlock_irqrestore(&pa_tlb_lock, flags); + spin_unlock_irqrestore(pgd_spinlock(mm->pgd), flags); return old_pte; } @@ -497,10 +536,10 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { unsigned long flags; - spin_lock_irqsave(&pa_tlb_lock, flags); + spin_lock_irqsave(pgd_spinlock(mm->pgd), flags); set_pte(ptep, pte_wrprotect(*ptep)); purge_tlb_entries(mm, addr); - spin_unlock_irqrestore(&pa_tlb_lock, flags); + spin_unlock_irqrestore(pgd_spinlock(mm->pgd), flags); } #define pte_same(A,B) (pte_val(A) == pte_val(B)) |