From a7b9f671f2d141528491c346e21e8a179cee9d21 Mon Sep 17 00:00:00 2001 From: LEROY Christophe Date: Mon, 19 Jan 2015 17:04:38 +0100 Subject: powerpc32: adds handling of _PAGE_RO Some powerpc like the 8xx don't have a RW bit in PTE bits but a RO (Read Only) bit. This patch implements the handling of a _PAGE_RO flag to be used in place of _PAGE_RW Signed-off-by: Christophe Leroy [scottwood@freescale.com: fix whitespace] Signed-off-by: Scott Wood --- arch/powerpc/include/asm/pgtable-ppc32.h | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'arch/powerpc/include/asm/pgtable-ppc32.h') diff --git a/arch/powerpc/include/asm/pgtable-ppc32.h b/arch/powerpc/include/asm/pgtable-ppc32.h index 234e07c47803..62a3e49a9a14 100644 --- a/arch/powerpc/include/asm/pgtable-ppc32.h +++ b/arch/powerpc/include/asm/pgtable-ppc32.h @@ -275,7 +275,7 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), 0); + pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), _PAGE_RO); } static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) @@ -286,9 +286,11 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry) { - unsigned long bits = pte_val(entry) & + unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC); - pte_update(ptep, 0, bits); + unsigned long clr = ~pte_val(entry) & _PAGE_RO; + + pte_update(ptep, clr, set); } #define __HAVE_ARCH_PTE_SAME -- cgit v1.2.3 From cadbfd01468663e8b9f9518a58ef67398a20d06b Mon Sep 17 00:00:00 2001 From: LEROY Christophe Date: Mon, 19 Jan 2015 17:04:44 +0100 Subject: powerpc/8xx: use _PAGE_RO instead of _PAGE_RW On powerpc 8xx, in TLB entries, 0x400 bit is set to 1 for read-only pages and is set to 0 for RW pages. So we should use _PAGE_RO instead of _PAGE_RW Signed-off-by: Christophe Leroy Signed-off-by: Scott Wood --- arch/powerpc/include/asm/pgtable-ppc32.h | 11 +++++------ arch/powerpc/include/asm/pte-8xx.h | 9 ++++----- arch/powerpc/kernel/head_8xx.S | 3 --- 3 files changed, 9 insertions(+), 14 deletions(-) (limited to 'arch/powerpc/include/asm/pgtable-ppc32.h') diff --git a/arch/powerpc/include/asm/pgtable-ppc32.h b/arch/powerpc/include/asm/pgtable-ppc32.h index 62a3e49a9a14..9cde3c1522e3 100644 --- a/arch/powerpc/include/asm/pgtable-ppc32.h +++ b/arch/powerpc/include/asm/pgtable-ppc32.h @@ -178,12 +178,11 @@ static inline unsigned long pte_update(pte_t *p, andc %1,%0,%5\n\ or %1,%1,%6\n\ /* 0x200 == Extended encoding, bit 22 */ \ - /* Bit 22 has to be 1 if neither _PAGE_USER nor _PAGE_RW are set */ \ - rlwimi %1,%1,32-2,0x200\n /* get _PAGE_USER */ \ - rlwinm %3,%1,32-1,0x200\n /* get _PAGE_RW */ \ - or %1,%3,%1\n\ - xori %1,%1,0x200\n" -" stwcx. %1,0,%4\n\ + /* Bit 22 has to be 1 when _PAGE_USER is unset and _PAGE_RO is set */ \ + rlwimi %1,%1,32-1,0x200\n /* get _PAGE_RO */ \ + rlwinm %3,%1,32-2,0x200\n /* get _PAGE_USER */ \ + andc %1,%1,%3\n\ + stwcx. %1,0,%4\n\ bne- 1b" : "=&r" (old), "=&r" (tmp), "=m" (*p), "=&r" (tmp2) : "r" (p), "r" (clr), "r" (set), "m" (*p) diff --git a/arch/powerpc/include/asm/pte-8xx.h b/arch/powerpc/include/asm/pte-8xx.h index daa4616e61c4..eb6edb44f140 100644 --- a/arch/powerpc/include/asm/pte-8xx.h +++ b/arch/powerpc/include/asm/pte-8xx.h @@ -46,9 +46,9 @@ * require a TLB exception handler change. It is assumed unused bits * are always zero. */ -#define _PAGE_RW 0x0400 /* lsb PP bits, inverted in HW */ +#define _PAGE_RO 0x0400 /* lsb PP bits */ #define _PAGE_USER 0x0800 /* msb PP bits */ -/* set when neither _PAGE_USER nor _PAGE_RW are set */ +/* set when _PAGE_USER is unset and _PAGE_RO is set */ #define _PAGE_KNLRO 0x0200 #define _PMD_PRESENT 0x0001 @@ -62,9 +62,8 @@ #define PTE_ATOMIC_UPDATES 1 /* We need to add _PAGE_SHARED to kernel pages */ -#define _PAGE_KERNEL_RO (_PAGE_SHARED | _PAGE_KNLRO) -#define _PAGE_KERNEL_ROX (_PAGE_EXEC | _PAGE_KNLRO) -#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW | _PAGE_HWWRITE) +#define _PAGE_KERNEL_RO (_PAGE_SHARED | _PAGE_RO | _PAGE_KNLRO) +#define _PAGE_KERNEL_ROX (_PAGE_EXEC | _PAGE_RO | _PAGE_KNLRO) #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_PTE_8xx_H */ diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index d99aac0d69f1..65492d043964 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S @@ -441,9 +441,6 @@ DataStoreTLBMiss: and r11, r11, r10 rlwimi r10, r11, 0, _PAGE_PRESENT #endif - /* invert RW */ - xori r10, r10, _PAGE_RW - /* The Linux PTE won't go exactly into the MMU TLB. * Software indicator bits 22 and 28 must be clear. * Software indicator bits 24, 25, 26, and 27 must be -- cgit v1.2.3 From ce67f5d0a00cce231e62334c3624737623c32d6a Mon Sep 17 00:00:00 2001 From: LEROY Christophe Date: Tue, 20 Jan 2015 10:57:34 +0100 Subject: powerpc32: Use kmem_cache memory for PGDIR When pages are not 4K, PGDIR table is allocated with kmalloc(). In order to optimise TLB handlers, aligned memory is needed. kmalloc() doesn't provide aligned memory blocks, so lets use a kmem_cache pool instead. Signed-off-by: Christophe Leroy Signed-off-by: Scott Wood --- arch/powerpc/include/asm/pgtable-ppc32.h | 4 ++++ arch/powerpc/mm/pgtable_32.c | 16 ++++++++++++++-- 2 files changed, 18 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/include/asm/pgtable-ppc32.h') diff --git a/arch/powerpc/include/asm/pgtable-ppc32.h b/arch/powerpc/include/asm/pgtable-ppc32.h index 9cde3c1522e3..26ce0ab0a9e4 100644 --- a/arch/powerpc/include/asm/pgtable-ppc32.h +++ b/arch/powerpc/include/asm/pgtable-ppc32.h @@ -347,10 +347,14 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry) #define pte_to_pgoff(pte) (pte_val(pte) >> 3) #define pgoff_to_pte(off) ((pte_t) { ((off) << 3) | _PAGE_FILE }) +#ifndef CONFIG_PPC_4K_PAGES +void pgtable_cache_init(void); +#else /* * No page table caches to initialise */ #define pgtable_cache_init() do { } while (0) +#endif extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, pmd_t **pmdp); diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index 833139620431..03b1a3b0fbd5 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -72,13 +72,25 @@ extern unsigned long p_mapped_by_tlbcam(phys_addr_t pa); #define PGDIR_ORDER (32 + PGD_T_LOG2 - PGDIR_SHIFT) +#ifndef CONFIG_PPC_4K_PAGES +static struct kmem_cache *pgtable_cache; + +void pgtable_cache_init(void) +{ + pgtable_cache = kmem_cache_create("PGDIR cache", 1 << PGDIR_ORDER, + 1 << PGDIR_ORDER, 0, NULL); + if (pgtable_cache == NULL) + panic("Couldn't allocate pgtable caches"); +} +#endif + pgd_t *pgd_alloc(struct mm_struct *mm) { pgd_t *ret; /* pgdir take page or two with 4K pages and a page fraction otherwise */ #ifndef CONFIG_PPC_4K_PAGES - ret = kzalloc(1 << PGDIR_ORDER, GFP_KERNEL); + ret = kmem_cache_alloc(pgtable_cache, GFP_KERNEL | __GFP_ZERO); #else ret = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, PGDIR_ORDER - PAGE_SHIFT); @@ -89,7 +101,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm) void pgd_free(struct mm_struct *mm, pgd_t *pgd) { #ifndef CONFIG_PPC_4K_PAGES - kfree((void *)pgd); + kmem_cache_free(pgtable_cache, (void *)pgd); #else free_pages((unsigned long)pgd, PGDIR_ORDER - PAGE_SHIFT); #endif -- cgit v1.2.3