diff options
Diffstat (limited to 'arch/powerpc/include/asm/book3s')
-rw-r--r-- | arch/powerpc/include/asm/book3s/32/pgtable.h | 78 |
1 files changed, 31 insertions, 47 deletions
diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h index 0d4bccb4b9f2..8a091d125f2d 100644 --- a/arch/powerpc/include/asm/book3s/32/pgtable.h +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h @@ -218,7 +218,7 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot); */ #define pte_clear(mm, addr, ptep) \ - do { pte_update(ptep, ~_PAGE_HASHPTE, 0); } while (0) + do { pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, 0, 0); } while (0) #define pmd_none(pmd) (!pmd_val(pmd)) #define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD) @@ -253,84 +253,68 @@ extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, * and the PTE may be either 32 or 64 bit wide. In the later case, * when using atomic updates, only the low part of the PTE is * accessed atomically. - * - * In addition, on 44x, we also maintain a global flag indicating - * that an executable user mapping was modified, which is needed - * to properly flush the virtually tagged instruction cache of - * those implementations. */ -#ifndef CONFIG_PTE_64BIT -static inline unsigned long pte_update(pte_t *p, - unsigned long clr, - unsigned long set) +static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p, + unsigned long clr, unsigned long set, int huge) { - unsigned long old, tmp; - - __asm__ __volatile__("\ -1: lwarx %0,0,%3\n\ - andc %1,%0,%4\n\ - or %1,%1,%5\n" -" stwcx. %1,0,%3\n\ - bne- 1b" - : "=&r" (old), "=&r" (tmp), "=m" (*p) - : "r" (p), "r" (clr), "r" (set), "m" (*p) - : "cc" ); - - return old; -} -#else /* CONFIG_PTE_64BIT */ -static inline unsigned long long pte_update(pte_t *p, - unsigned long clr, - unsigned long set) -{ - unsigned long long old; + pte_basic_t old; unsigned long tmp; - __asm__ __volatile__("\ -1: lwarx %L0,0,%4\n\ - lwzx %0,0,%3\n\ - andc %1,%L0,%5\n\ - or %1,%1,%6\n" -" stwcx. %1,0,%4\n\ - bne- 1b" + __asm__ __volatile__( +#ifndef CONFIG_PTE_64BIT +"1: lwarx %0, 0, %3\n" +" andc %1, %0, %4\n" +#else +"1: lwarx %L0, 0, %3\n" +" lwz %0, -4(%3)\n" +" andc %1, %L0, %4\n" +#endif +" or %1, %1, %5\n" +" stwcx. %1, 0, %3\n" +" bne- 1b" : "=&r" (old), "=&r" (tmp), "=m" (*p) - : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p) +#ifndef CONFIG_PTE_64BIT + : "r" (p), +#else + : "b" ((unsigned long)(p) + 4), +#endif + "r" (clr), "r" (set), "m" (*p) : "cc" ); return old; } -#endif /* CONFIG_PTE_64BIT */ /* * 2.6 calls this without flushing the TLB entry; this is wrong * for our hash-based implementation, we fix that up here. */ #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG -static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep) +static inline int __ptep_test_and_clear_young(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) { unsigned long old; - old = pte_update(ptep, _PAGE_ACCESSED, 0); + old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0); if (old & _PAGE_HASHPTE) { unsigned long ptephys = __pa(ptep) & PAGE_MASK; - flush_hash_pages(context, addr, ptephys, 1); + flush_hash_pages(mm->context.id, addr, ptephys, 1); } return (old & _PAGE_ACCESSED) != 0; } #define ptep_test_and_clear_young(__vma, __addr, __ptep) \ - __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep) + __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep) #define __HAVE_ARCH_PTEP_GET_AND_CLEAR static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0)); + return __pte(pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, 0, 0)); } #define __HAVE_ARCH_PTEP_SET_WRPROTECT static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - pte_update(ptep, _PAGE_RW, 0); + pte_update(mm, addr, ptep, _PAGE_RW, 0, 0); } static inline void __ptep_set_access_flags(struct vm_area_struct *vma, @@ -341,7 +325,7 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma, unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC); - pte_update(ptep, 0, set); + pte_update(vma->vm_mm, address, ptep, 0, set, 0); flush_tlb_page(vma, address); } @@ -539,7 +523,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) | (pte_val(pte) & ~_PAGE_HASHPTE)); else - pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte)); + pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, pte_val(pte), 0); #elif defined(CONFIG_PTE_64BIT) /* Second case is 32-bit with 64-bit PTE. In this case, we |