diff options
Diffstat (limited to 'include/asm-generic/pgtable.h')
-rw-r--r-- | include/asm-generic/pgtable.h | 33 |
1 files changed, 33 insertions, 0 deletions
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index db7df7daa0d8..7056a25479d6 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -188,6 +188,23 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, } #endif + +/* + * If two threads concurrently fault at the same page, the thread that + * won the race updates the PTE and its local TLB/Cache. The other thread + * gives up, simply does nothing, and continues; on architectures where + * software can update TLB, local TLB can be updated here to avoid next page + * fault. This function updates TLB only, do nothing with cache or others. + * It is the difference with function update_mmu_cache. + */ +#ifndef __HAVE_ARCH_UPDATE_MMU_TLB +static inline void update_mmu_tlb(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep) +{ +} +#define __HAVE_ARCH_UPDATE_MMU_TLB +#endif + /* * Some architectures may be able to avoid expensive synchronization * primitives when modifications are made to PTE's which are already @@ -227,6 +244,22 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres } #endif +/* + * On some architectures hardware does not set page access bit when accessing + * memory page, it is responsibilty of software setting this bit. It brings + * out extra page fault penalty to track page access bit. For optimization page + * access bit can be set during all page fault flow on these arches. + * To be differentiate with macro pte_mkyoung, this macro is used on platforms + * where software maintains page access bit. + */ +#ifndef pte_sw_mkyoung +static inline pte_t pte_sw_mkyoung(pte_t pte) +{ + return pte; +} +#define pte_sw_mkyoung pte_sw_mkyoung +#endif + #ifndef pte_savedwrite #define pte_savedwrite pte_write #endif |